3 3 12 12 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 // SPDX-License-Identifier: GPL-2.0-or-later /* PKCS#8 Private Key parser [RFC 5208]. * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "PKCS8: "fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/oid_registry.h> #include <keys/asymmetric-subtype.h> #include <keys/asymmetric-parser.h> #include <crypto/public_key.h> #include "pkcs8.asn1.h" struct pkcs8_parse_context { struct public_key *pub; unsigned long data; /* Start of data */ enum OID last_oid; /* Last OID encountered */ enum OID algo_oid; /* Algorithm OID */ u32 key_size; const void *key; }; /* * Note an OID when we find one for later processing when we know how to * interpret it. */ int pkcs8_note_OID(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs8_parse_context *ctx = context; ctx->last_oid = look_up_OID(value, vlen); if (ctx->last_oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); pr_info("Unknown OID: [%lu] %s\n", (unsigned long)value - ctx->data, buffer); } return 0; } /* * Note the version number of the ASN.1 blob. */ int pkcs8_note_version(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { if (vlen != 1 || ((const u8 *)value)[0] != 0) { pr_warn("Unsupported PKCS#8 version\n"); return -EBADMSG; } return 0; } /* * Note the public algorithm. */ int pkcs8_note_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs8_parse_context *ctx = context; if (ctx->last_oid != OID_rsaEncryption) return -ENOPKG; ctx->pub->pkey_algo = "rsa"; return 0; } /* * Note the key data of the ASN.1 blob. */ int pkcs8_note_key(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct pkcs8_parse_context *ctx = context; ctx->key = value; ctx->key_size = vlen; return 0; } /* * Parse a PKCS#8 private key blob. */ static struct public_key *pkcs8_parse(const void *data, size_t datalen) { struct pkcs8_parse_context ctx; struct public_key *pub; long ret; memset(&ctx, 0, sizeof(ctx)); ret = -ENOMEM; ctx.pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!ctx.pub) goto error; ctx.data = (unsigned long)data; /* Attempt to decode the private key */ ret = asn1_ber_decoder(&pkcs8_decoder, &ctx, data, datalen); if (ret < 0) goto error_decode; ret = -ENOMEM; pub = ctx.pub; pub->key = kmemdup(ctx.key, ctx.key_size, GFP_KERNEL); if (!pub->key) goto error_decode; pub->keylen = ctx.key_size; pub->key_is_private = true; return pub; error_decode: kfree(ctx.pub); error: return ERR_PTR(ret); } /* * Attempt to parse a data blob for a key as a PKCS#8 private key. */ static int pkcs8_key_preparse(struct key_preparsed_payload *prep) { struct public_key *pub; pub = pkcs8_parse(prep->data, prep->datalen); if (IS_ERR(pub)) return PTR_ERR(pub); pr_devel("Cert Key Algo: %s\n", pub->pkey_algo); pub->id_type = "PKCS8"; /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); prep->payload.data[asym_subtype] = &public_key_subtype; prep->payload.data[asym_key_ids] = NULL; prep->payload.data[asym_crypto] = pub; prep->payload.data[asym_auth] = NULL; prep->quotalen = 100; return 0; } static struct asymmetric_key_parser pkcs8_key_parser = { .owner = THIS_MODULE, .name = "pkcs8", .parse = pkcs8_key_preparse, }; /* * Module stuff */ static int __init pkcs8_key_init(void) { return register_asymmetric_key_parser(&pkcs8_key_parser); } static void __exit pkcs8_key_exit(void) { unregister_asymmetric_key_parser(&pkcs8_key_parser); } module_init(pkcs8_key_init); module_exit(pkcs8_key_exit); MODULE_DESCRIPTION("PKCS#8 certificate parser"); MODULE_LICENSE("GPL");
1 1 4 2 2 6 2 1 3 1 1 1 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/dlm.h> #include <linux/dlm_plock.h> #include <linux/slab.h> #include <trace/events/dlm.h> #include "dlm_internal.h" #include "lockspace.h" static DEFINE_SPINLOCK(ops_lock); static LIST_HEAD(send_list); static LIST_HEAD(recv_list); static DECLARE_WAIT_QUEUE_HEAD(send_wq); static DECLARE_WAIT_QUEUE_HEAD(recv_wq); struct plock_async_data { void *fl; void *file; struct file_lock flc; int (*callback)(struct file_lock *fl, int result); }; struct plock_op { struct list_head list; int done; struct dlm_plock_info info; /* if set indicates async handling */ struct plock_async_data *data; }; static inline void set_version(struct dlm_plock_info *info) { info->version[0] = DLM_PLOCK_VERSION_MAJOR; info->version[1] = DLM_PLOCK_VERSION_MINOR; info->version[2] = DLM_PLOCK_VERSION_PATCH; } static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info) { struct plock_op *op = NULL, *iter; list_for_each_entry(iter, &recv_list, list) { if (iter->info.fsid == info->fsid && iter->info.number == info->number && iter->info.owner == info->owner && iter->info.pid == info->pid && iter->info.start == info->start && iter->info.end == info->end && iter->info.ex == info->ex && iter->info.wait) { op = iter; break; } } return op; } static int check_version(struct dlm_plock_info *info) { if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) || (DLM_PLOCK_VERSION_MINOR < info->version[1])) { log_print("plock device version mismatch: " "kernel (%u.%u.%u), user (%u.%u.%u)", DLM_PLOCK_VERSION_MAJOR, DLM_PLOCK_VERSION_MINOR, DLM_PLOCK_VERSION_PATCH, info->version[0], info->version[1], info->version[2]); return -EINVAL; } return 0; } static void dlm_release_plock_op(struct plock_op *op) { kfree(op->data); kfree(op); } static void send_op(struct plock_op *op) { set_version(&op->info); spin_lock(&ops_lock); list_add_tail(&op->list, &send_list); spin_unlock(&ops_lock); wake_up(&send_wq); } static int do_lock_cancel(const struct dlm_plock_info *orig_info) { struct plock_op *op; int rv; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) return -ENOMEM; op->info = *orig_info; op->info.optype = DLM_PLOCK_OP_CANCEL; op->info.wait = 0; send_op(op); wait_event(recv_wq, (op->done != 0)); rv = op->info.rv; dlm_release_plock_op(op); return rv; } int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { struct plock_async_data *op_data; struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->c.flc_pid; op->info.ex = lock_is_write(fl); op->info.wait = !!(fl->c.flc_flags & FL_SLEEP); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; op->info.owner = (__u64)(long) fl->c.flc_owner; /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) { dlm_release_plock_op(op); rv = -ENOMEM; goto out; } op_data->callback = fl->fl_lmops->lm_grant; locks_init_lock(&op_data->flc); locks_copy_lock(&op_data->flc, fl); op_data->fl = fl; op_data->file = file; op->data = op_data; send_op(op); rv = FILE_LOCK_DEFERRED; goto out; } send_op(op); if (op->info.wait) { rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { spin_lock(&ops_lock); /* recheck under ops_lock if we got a done != 0, * if so this interrupt case should be ignored */ if (op->done != 0) { spin_unlock(&ops_lock); goto do_lock_wait; } spin_unlock(&ops_lock); rv = do_lock_cancel(&op->info); switch (rv) { case 0: /* waiter was deleted in user space, answer will never come * remove original request. The original request must be * on recv_list because the answer of do_lock_cancel() * synchronized it. */ spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); rv = -EINTR; break; case -ENOENT: /* cancellation wasn't successful but op should be done */ fallthrough; default: /* internal error doing cancel we need to wait */ goto wait; } log_debug(ls, "%s: wait interrupted %x %llx pid %d", __func__, ls->ls_global_id, (unsigned long long)number, op->info.pid); dlm_release_plock_op(op); goto out; } } else { wait: wait_event(recv_wq, (op->done != 0)); } do_lock_wait: WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (!rv) { if (locks_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_lock); /* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { struct plock_async_data *op_data = op->data; struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(struct file_lock *fl, int result) = NULL; int rv = 0; WARN_ON(!list_empty(&op->list)); /* check if the following 2 are still valid or make a copy */ file = op_data->file; flc = &op_data->flc; fl = op_data->fl; notify = op_data->callback; if (op->info.rv) { notify(fl, op->info.rv); goto out; } /* got fs lock; bookkeep locally as well: */ flc->c.flc_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. * The filesystem's own lock is the authoritative lock, * so a failure to get the lock locally is not a disaster. * As long as the fs cannot reliably cancel locks (especially * in a low-memory situation), we're better off ignoring * this failure than trying to recover. */ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p", (unsigned long long)op->info.number, file, fl); } rv = notify(fl, 0); if (rv) { /* XXX: We need to cancel the fs lock here: */ log_print("%s: lock granted after lock request failed; dangling lock!", __func__); goto out; } out: dlm_release_plock_op(op); return rv; } int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; unsigned char saved_flags = fl->c.flc_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } /* cause the vfs unlock to return ENOENT if lock is not found */ fl->c.flc_flags |= FL_EXISTS; rv = locks_lock_file_wait(file, fl); if (rv == -ENOENT) { rv = 0; goto out_free; } if (rv < 0) { log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx", rv, (unsigned long long)number); } op->info.optype = DLM_PLOCK_OP_UNLOCK; op->info.pid = fl->c.flc_pid; op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; op->info.owner = (__u64)(long) fl->c.flc_owner; if (fl->c.flc_flags & FL_CLOSE) { op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); rv = 0; goto out; } send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (rv == -ENOENT) rv = 0; out_free: dlm_release_plock_op(op); out: dlm_put_lockspace(ls); fl->c.flc_flags = saved_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); /* * NOTE: This implementation can only handle async lock requests as nfs * do it. It cannot handle cancellation of a pending lock request sitting * in wait_event(), but for now only nfs is the only user local kernel * user. */ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_plock_info info; struct plock_op *op; struct dlm_ls *ls; int rv; /* this only works for async request for now and nfs is the only * kernel user right now. */ if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant)) return -EOPNOTSUPP; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; memset(&info, 0, sizeof(info)); info.pid = fl->c.flc_pid; info.ex = lock_is_write(fl); info.fsid = ls->ls_global_id; dlm_put_lockspace(ls); info.number = number; info.start = fl->fl_start; info.end = fl->fl_end; info.owner = (__u64)(long) fl->c.flc_owner; rv = do_lock_cancel(&info); switch (rv) { case 0: spin_lock(&ops_lock); /* lock request to cancel must be on recv_list because * do_lock_cancel() synchronizes it. */ op = plock_lookup_waiter(&info); if (WARN_ON_ONCE(!op)) { spin_unlock(&ops_lock); rv = -ENOLCK; break; } list_del(&op->list); spin_unlock(&ops_lock); WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); op->data->callback(op->data->fl, -EINTR); dlm_release_plock_op(op); rv = -EINTR; break; case -ENOENT: /* if cancel wasn't successful we probably were to late * or it was a non-blocking lock request, so just unlock it. */ rv = dlm_posix_unlock(lockspace, number, file, fl); break; default: break; } return rv; } EXPORT_SYMBOL_GPL(dlm_posix_cancel); int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_GET; op->info.pid = fl->c.flc_pid; op->info.ex = lock_is_write(fl); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; op->info.owner = (__u64)(long) fl->c.flc_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); /* info.rv from userspace is 1 for conflict, 0 for no-conflict, -ENOENT if there are no locks on the file */ rv = op->info.rv; fl->c.flc_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); fl->c.flc_type = (op->info.ex) ? F_WRLCK : F_RDLCK; fl->c.flc_flags = FL_POSIX; fl->c.flc_pid = op->info.pid; if (op->info.nodeid != dlm_our_nodeid()) fl->c.flc_pid = -fl->c.flc_pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_get); /* a read copies out one plock request from the send list */ static ssize_t dev_read(struct file *file, char __user *u, size_t count, loff_t *ppos) { struct dlm_plock_info info; struct plock_op *op = NULL; if (count < sizeof(info)) return -EINVAL; spin_lock(&ops_lock); if (!list_empty(&send_list)) { op = list_first_entry(&send_list, struct plock_op, list); if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); if (!op) return -EAGAIN; trace_dlm_plock_read(&info); /* there is no need to get a reply from userspace for unlocks that were generated by the vfs cleaning up for a close (the process did not make an unlock call). */ if (op->info.flags & DLM_PLOCK_FL_CLOSE) dlm_release_plock_op(op); if (copy_to_user(u, &info, sizeof(info))) return -EFAULT; return sizeof(info); } /* a write copies in one plock result that should match a plock_op on the recv list */ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { struct plock_op *op = NULL, *iter; struct dlm_plock_info info; int do_callback = 0; if (count != sizeof(info)) return -EINVAL; if (copy_from_user(&info, u, sizeof(info))) return -EFAULT; trace_dlm_plock_write(&info); if (check_version(&info)) return -EINVAL; /* * The results for waiting ops (SETLKW) can be returned in any * order, so match all fields to find the op. The results for * non-waiting ops are returned in the order that they were sent * to userspace, so match the result with the first non-waiting op. */ spin_lock(&ops_lock); if (info.wait) { op = plock_lookup_waiter(&info); } else { list_for_each_entry(iter, &recv_list, list) { if (!iter->info.wait && iter->info.fsid == info.fsid) { op = iter; break; } } } if (op) { /* Sanity check that op and info match. */ if (info.wait) WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); else WARN_ON(op->info.number != info.number || op->info.owner != info.owner || op->info.optype != info.optype); list_del_init(&op->list); memcpy(&op->info, &info, sizeof(info)); if (op->data) do_callback = 1; else op->done = 1; } spin_unlock(&ops_lock); if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else pr_debug("%s: no op %x %llx", __func__, info.fsid, (unsigned long long)info.number); return count; } static __poll_t dev_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &send_wq, wait); spin_lock(&ops_lock); if (!list_empty(&send_list)) mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&ops_lock); return mask; } static const struct file_operations dev_fops = { .read = dev_read, .write = dev_write, .poll = dev_poll, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice plock_dev_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DLM_PLOCK_MISC_NAME, .fops = &dev_fops }; int dlm_plock_init(void) { int rv; rv = misc_register(&plock_dev_misc); if (rv) log_print("dlm_plock_init: misc_register failed %d", rv); return rv; } void dlm_plock_exit(void) { misc_deregister(&plock_dev_misc); WARN_ON(!list_empty(&send_list)); WARN_ON(!list_empty(&recv_list)); }
100 100 100 100 110 1 2 5 2 3 2 2 6 1 1 5 2 5 2 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 11 1 1 1 4 1 1 2 1 1 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 // SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <net/ip_tunnels.h> #include "br_private.h" #include "br_private_tunnel.h" static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v) { __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id); struct nlattr *nest; if (!v->tinfo.tunnel_dst) return true; nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO); if (!nest) return false; if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) { nla_nest_cancel(skb, nest); return false; } nla_nest_end(skb, nest); return true; } static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *range_end) { return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) || vlan_tunid_inrange(v_curr, range_end); } /* check if the options' state of v_curr allow it to enter the range */ bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *range_end) { u8 range_mc_rtr = br_vlan_multicast_router(range_end); u8 curr_mc_rtr = br_vlan_multicast_router(v_curr); return v_curr->state == range_end->state && __vlan_tun_can_enter_range(v_curr, range_end) && curr_mc_rtr == range_mc_rtr; } bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v, const struct net_bridge_port *p) { if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) || !__vlan_tun_put(skb, v) || nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS, !!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED))) return false; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER, br_vlan_multicast_router(v))) return false; if (p && !br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx) && (nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS, br_multicast_ngroups_get(&v->port_mcast_ctx)) || nla_put_u32(skb, BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS, br_multicast_ngroups_get_max(&v->port_mcast_ctx)))) return false; #endif return true; } size_t br_vlan_opts_nl_size(void) { return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */ + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS */ #endif + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS */ + 0; } static int br_vlan_modify_state(struct net_bridge_vlan_group *vg, struct net_bridge_vlan *v, u8 state, bool *changed, struct netlink_ext_ack *extack) { struct net_bridge *br; ASSERT_RTNL(); if (state > BR_STATE_BLOCKING) { NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state"); return -EINVAL; } if (br_vlan_is_brentry(v)) br = v->br; else br = v->port->br; if (br->stp_enabled == BR_KERNEL_STP) { NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP"); return -EBUSY; } if (br_opt_get(br, BROPT_MST_ENABLED)) { NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state directly when MST is enabled"); return -EBUSY; } if (v->state == state) return 0; if (v->vid == br_get_pvid(vg)) br_vlan_set_pvid_state(vg, state); br_vlan_set_state(v, state); *changed = true; return 0; } static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = { [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 }, [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 }, }; static int br_vlan_modify_tunnel(const struct net_bridge_port *p, struct net_bridge_vlan *v, struct nlattr **tb, bool *changed, struct netlink_ext_ack *extack) { struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr; struct bridge_vlan_info *vinfo; u32 tun_id = 0; int cmd, err; if (!p) { NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans"); return -EINVAL; } if (!(p->flags & BR_VLAN_TUNNEL)) { NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set"); return -EINVAL; } attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]; err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr, br_vlandb_tinfo_pol, extack); if (err) return err; if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) { NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute"); return -ENOENT; } cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]); switch (cmd) { case RTM_SETLINK: if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) { NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute"); return -ENOENT; } /* when working on vlan ranges this is the starting tunnel id */ tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]); /* vlan info attr is guaranteed by br_vlan_rtm_process_one */ vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]); /* tunnel ids are mapped to each vlan in increasing order, * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the * current vlan, so we compute: tun_id + v - vinfo->vid */ tun_id += v->vid - vinfo->vid; break; case RTM_DELLINK: break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command"); return -EINVAL; } return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed); } static int br_vlan_process_one_opts(const struct net_bridge *br, const struct net_bridge_port *p, struct net_bridge_vlan_group *vg, struct net_bridge_vlan *v, struct nlattr **tb, bool *changed, struct netlink_ext_ack *extack) { int err; *changed = false; if (tb[BRIDGE_VLANDB_ENTRY_STATE]) { u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]); err = br_vlan_modify_state(vg, v, state, changed, extack); if (err) return err; } if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) { err = br_vlan_modify_tunnel(p, v, tb, changed, extack); if (err) return err; } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]) { u8 val; val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]); err = br_multicast_set_vlan_router(v, val); if (err) return err; *changed = true; } if (tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]) { u32 val; if (!p) { NL_SET_ERR_MSG_MOD(extack, "Can't set mcast_max_groups for non-port vlans"); return -EINVAL; } if (br_multicast_port_ctx_vlan_disabled(&v->port_mcast_ctx)) { NL_SET_ERR_MSG_MOD(extack, "Multicast snooping disabled on this VLAN"); return -EINVAL; } val = nla_get_u32(tb[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS]); br_multicast_ngroups_set_max(&v->port_mcast_ctx, val); *changed = true; } #endif if (tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]) { bool enabled = v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED; bool val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS]); if (!p) { NL_SET_ERR_MSG_MOD(extack, "Can't set neigh_suppress for non-port vlans"); return -EINVAL; } if (val != enabled) { v->priv_flags ^= BR_VLFLAG_NEIGH_SUPPRESS_ENABLED; *changed = true; } } return 0; } int br_vlan_process_options(const struct net_bridge *br, const struct net_bridge_port *p, struct net_bridge_vlan *range_start, struct net_bridge_vlan *range_end, struct nlattr **tb, struct netlink_ext_ack *extack) { struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; struct net_bridge_vlan_group *vg; int vid, err = 0; u16 pvid; if (p) vg = nbp_vlan_group(p); else vg = br_vlan_group(br); if (!range_start || !br_vlan_should_use(range_start)) { NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options"); return -ENOENT; } if (!range_end || !br_vlan_should_use(range_end)) { NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options"); return -ENOENT; } pvid = br_get_pvid(vg); for (vid = range_start->vid; vid <= range_end->vid; vid++) { bool changed = false; v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) { NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options"); err = -ENOENT; break; } err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed, extack); if (err) break; if (changed) { /* vlan options changed, check for range */ if (!curr_start) { curr_start = v; curr_end = v; continue; } if (v->vid == pvid || !br_vlan_can_enter_range(v, curr_end)) { br_vlan_notify(br, p, curr_start->vid, curr_end->vid, RTM_NEWVLAN); curr_start = v; } curr_end = v; } else { /* nothing changed and nothing to notify yet */ if (!curr_start) continue; br_vlan_notify(br, p, curr_start->vid, curr_end->vid, RTM_NEWVLAN); curr_start = NULL; curr_end = NULL; } } if (curr_start) br_vlan_notify(br, p, curr_start->vid, curr_end->vid, RTM_NEWVLAN); return err; } bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr, const struct net_bridge_vlan *r_end) { return v_curr->vid - r_end->vid == 1 && v_curr->msti == r_end->msti && ((v_curr->priv_flags ^ r_end->priv_flags) & BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 && br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx, &r_end->br_mcast_ctx); } bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range, const struct net_bridge_vlan *v_opts) { struct nlattr *nest2 __maybe_unused; u64 clockval __maybe_unused; struct nlattr *nest; nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS); if (!nest) return false; if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid)) goto out_err; if (vid_range && vid < vid_range && nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range)) goto out_err; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING, !!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) || nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION, v_opts->br_mcast_ctx.multicast_igmp_version) || nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT, v_opts->br_mcast_ctx.multicast_last_member_count) || nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT, v_opts->br_mcast_ctx.multicast_startup_query_count) || nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER, v_opts->br_mcast_ctx.multicast_querier) || br_multicast_dump_querier_state(skb, &v_opts->br_mcast_ctx, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval); if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL, clockval, BRIDGE_VLANDB_GOPTS_PAD)) goto out_err; if (br_rports_have_mc_router(&v_opts->br_mcast_ctx)) { nest2 = nla_nest_start(skb, BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS); if (!nest2) goto out_err; rcu_read_lock(); if (br_rports_fill_info(skb, &v_opts->br_mcast_ctx)) { rcu_read_unlock(); nla_nest_cancel(skb, nest2); goto out_err; } rcu_read_unlock(); nla_nest_end(skb, nest2); } #if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION, v_opts->br_mcast_ctx.multicast_mld_version)) goto out_err; #endif #endif if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_MSTI, v_opts->msti)) goto out_err; nla_nest_end(skb, nest); return true; out_err: nla_nest_cancel(skb, nest); return false; } static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v) { return NLMSG_ALIGN(sizeof(struct br_vlan_msg)) + nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */ + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */ + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */ + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */ + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */ + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER */ + br_multicast_querier_state_size() /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE */ + nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ + br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */ #endif + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_MSTI */ + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */ } static void br_vlan_global_opts_notify(const struct net_bridge *br, u16 vid, u16 vid_range) { struct net_bridge_vlan *v; struct br_vlan_msg *bvm; struct nlmsghdr *nlh; struct sk_buff *skb; int err = -ENOBUFS; /* right now notifications are done only with rtnl held */ ASSERT_RTNL(); /* need to find the vlan due to flags/options */ v = br_vlan_find(br_vlan_group(br), vid); if (!v) return; skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(v), GFP_KERNEL); if (!skb) goto out_err; err = -EMSGSIZE; nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0); if (!nlh) goto out_err; bvm = nlmsg_data(nlh); memset(bvm, 0, sizeof(*bvm)); bvm->family = AF_BRIDGE; bvm->ifindex = br->dev->ifindex; if (!br_vlan_global_opts_fill(skb, vid, vid_range, v)) goto out_err; nlmsg_end(skb, nlh); rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL); return; out_err: rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err); kfree_skb(skb); } static int br_vlan_process_global_one_opts(const struct net_bridge *br, struct net_bridge_vlan_group *vg, struct net_bridge_vlan *v, struct nlattr **tb, bool *changed, struct netlink_ext_ack *extack) { int err __maybe_unused; *changed = false; #ifdef CONFIG_BRIDGE_IGMP_SNOOPING if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) { u8 mc_snooping; mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]); if (br_multicast_toggle_global_vlan(v, !!mc_snooping)) *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) { u8 ver; ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]); err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver); if (err) return err; *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) { u32 cnt; cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]); v->br_mcast_ctx.multicast_last_member_count = cnt; *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) { u32 cnt; cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]); v->br_mcast_ctx.multicast_startup_query_count = cnt; *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]); v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]); v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]); v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]); br_multicast_set_query_intvl(&v->br_mcast_ctx, val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]); v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) { u64 val; val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]); br_multicast_set_startup_query_intvl(&v->br_mcast_ctx, val); *changed = true; } if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) { u8 val; val = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]); err = br_multicast_set_querier(&v->br_mcast_ctx, val); if (err) return err; *changed = true; } #if IS_ENABLED(CONFIG_IPV6) if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) { u8 ver; ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]); err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver); if (err) return err; *changed = true; } #endif #endif if (tb[BRIDGE_VLANDB_GOPTS_MSTI]) { u16 msti; msti = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_MSTI]); err = br_mst_vlan_set_msti(v, msti); if (err) return err; *changed = true; } return 0; } static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = { [BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 }, [BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 }, [BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 }, [BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 }, [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER] = { .type = NLA_U8 }, [BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 }, [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 }, [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 }, [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 }, [BRIDGE_VLANDB_GOPTS_MSTI] = NLA_POLICY_MAX(NLA_U16, VLAN_N_VID - 1), }; int br_vlan_rtm_process_global_options(struct net_device *dev, const struct nlattr *attr, int cmd, struct netlink_ext_ack *extack) { struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL; struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1]; struct net_bridge_vlan_group *vg; u16 vid, vid_range = 0; struct net_bridge *br; int err = 0; if (cmd != RTM_NEWVLAN) { NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation"); return -EINVAL; } if (!netif_is_bridge_master(dev)) { NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device"); return -EINVAL; } br = netdev_priv(dev); vg = br_vlan_group(br); if (WARN_ON(!vg)) return -ENODEV; err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr, br_vlan_db_gpol, extack); if (err) return err; if (!tb[BRIDGE_VLANDB_GOPTS_ID]) { NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id"); return -EINVAL; } vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]); if (!br_vlan_valid_id(vid, extack)) return -EINVAL; if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) { vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]); if (!br_vlan_valid_id(vid_range, extack)) return -EINVAL; if (vid >= vid_range) { NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id"); return -EINVAL; } } else { vid_range = vid; } for (; vid <= vid_range; vid++) { bool changed = false; v = br_vlan_find(vg, vid); if (!v) { NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options"); err = -ENOENT; break; } err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed, extack); if (err) break; if (changed) { /* vlan options changed, check for range */ if (!curr_start) { curr_start = v; curr_end = v; continue; } if (!br_vlan_global_opts_can_enter_range(v, curr_end)) { br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid); curr_start = v; } curr_end = v; } else { /* nothing changed and nothing to notify yet */ if (!curr_start) continue; br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid); curr_start = NULL; curr_end = NULL; } } if (curr_start) br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid); return err; }
1119 1091 1090 1119 1122 1150 1149 1151 1149 1173 1 1101 1096 25 875 40 878 42 1323 1143 1132 1119 1107 1121 1092 1092 1065 1067 1066 1066 2 1067 1336 1338 1336 358 1093 1096 6 1043 1044 24 24 1336 1340 1118 1117 1119 1099 6 1 1101 1090 18 1053 1050 47 1097 1097 1098 1097 1116 1045 1042 1364 1361 15 451 18 1163 1152 82 1121 16 10 17 65 20 46 65 1 65 65 24 42 1121 24 42 1352 65 1367 1333 1122 1309 1171 1334 1145 87 1123 87 1366 1368 1363 1124 1093 1121 503 1091 1119 1120 1094 1123 1086 1125 1092 1127 1127 1124 2 1122 1 1 1123 1123 1123 1460 1447 1048 1049 1049 1126 1125 1122 2 1118 1117 67 1 65 1087 1090 3 1121 1118 1117 1116 2310 1099 1035 1503 1004 1503 1450 1453 1122 1121 1120 1117 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 // SPDX-License-Identifier: GPL-2.0 /* * drivers/base/power/runtime.c - Helper functions for device runtime PM * * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> */ #include <linux/sched/mm.h> #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/export.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> #include <linux/rculist.h> #include <trace/events/rpm.h> #include "../base.h" #include "power.h" typedef int (*pm_callback_t)(struct device *); static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) { pm_callback_t cb; const struct dev_pm_ops *ops; if (dev->pm_domain) ops = &dev->pm_domain->ops; else if (dev->type && dev->type->pm) ops = dev->type->pm; else if (dev->class && dev->class->pm) ops = dev->class->pm; else if (dev->bus && dev->bus->pm) ops = dev->bus->pm; else ops = NULL; if (ops) cb = *(pm_callback_t *)((void *)ops + cb_offset); else cb = NULL; if (!cb && dev->driver && dev->driver->pm) cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); return cb; } #define RPM_GET_CALLBACK(dev, callback) \ __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback)) static int rpm_resume(struct device *dev, int rpmflags); static int rpm_suspend(struct device *dev, int rpmflags); /** * update_pm_runtime_accounting - Update the time accounting of power states * @dev: Device to update the accounting for * * In order to be able to have time accounting of the various power states * (as used by programs such as PowerTOP to show the effectiveness of runtime * PM), we need to track the time spent in each state. * update_pm_runtime_accounting must be called each time before the * runtime_status field is updated, to account the time in the old state * correctly. */ static void update_pm_runtime_accounting(struct device *dev) { u64 now, last, delta; if (dev->power.disable_depth > 0) return; last = dev->power.accounting_timestamp; now = ktime_get_mono_fast_ns(); dev->power.accounting_timestamp = now; /* * Because ktime_get_mono_fast_ns() is not monotonic during * timekeeping updates, ensure that 'now' is after the last saved * timesptamp. */ if (now < last) return; delta = now - last; if (dev->power.runtime_status == RPM_SUSPENDED) dev->power.suspended_time += delta; else dev->power.active_time += delta; } static void __update_runtime_status(struct device *dev, enum rpm_status status) { update_pm_runtime_accounting(dev); trace_rpm_status(dev, status); dev->power.runtime_status = status; } static u64 rpm_get_accounted_time(struct device *dev, bool suspended) { u64 time; unsigned long flags; spin_lock_irqsave(&dev->power.lock, flags); update_pm_runtime_accounting(dev); time = suspended ? dev->power.suspended_time : dev->power.active_time; spin_unlock_irqrestore(&dev->power.lock, flags); return time; } u64 pm_runtime_active_time(struct device *dev) { return rpm_get_accounted_time(dev, false); } u64 pm_runtime_suspended_time(struct device *dev) { return rpm_get_accounted_time(dev, true); } EXPORT_SYMBOL_GPL(pm_runtime_suspended_time); /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. * @dev: Device to handle. */ static void pm_runtime_deactivate_timer(struct device *dev) { if (dev->power.timer_expires > 0) { hrtimer_try_to_cancel(&dev->power.suspend_timer); dev->power.timer_expires = 0; } } /** * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests. * @dev: Device to handle. */ static void pm_runtime_cancel_pending(struct device *dev) { pm_runtime_deactivate_timer(dev); /* * In case there's a request pending, make sure its work function will * return without doing anything. */ dev->power.request = RPM_REQ_NONE; } /* * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. * @dev: Device to handle. * * Compute the autosuspend-delay expiration time based on the device's * power.last_busy time. If the delay has already expired or is disabled * (negative) or the power.use_autosuspend flag isn't set, return 0. * Otherwise return the expiration time in nanoseconds (adjusted to be nonzero). * * This function may be called either with or without dev->power.lock held. * Either way it can be racy, since power.last_busy may be updated at any time. */ u64 pm_runtime_autosuspend_expiration(struct device *dev) { int autosuspend_delay; u64 expires; if (!dev->power.use_autosuspend) return 0; autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) return 0; expires = READ_ONCE(dev->power.last_busy); expires += (u64)autosuspend_delay * NSEC_PER_MSEC; if (expires > ktime_get_mono_fast_ns()) return expires; /* Expires in the future */ return 0; } EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); static int dev_memalloc_noio(struct device *dev, void *data) { return dev->power.memalloc_noio; } /* * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag. * @dev: Device to handle. * @enable: True for setting the flag and False for clearing the flag. * * Set the flag for all devices in the path from the device to the * root device in the device tree if @enable is true, otherwise clear * the flag for devices in the path whose siblings don't set the flag. * * The function should only be called by block device, or network * device driver for solving the deadlock problem during runtime * resume/suspend: * * If memory allocation with GFP_KERNEL is called inside runtime * resume/suspend callback of any one of its ancestors(or the * block device itself), the deadlock may be triggered inside the * memory allocation since it might not complete until the block * device becomes active and the involed page I/O finishes. The * situation is pointed out first by Alan Stern. Network device * are involved in iSCSI kind of situation. * * The lock of dev_hotplug_mutex is held in the function for handling * hotplug race because pm_runtime_set_memalloc_noio() may be called * in async probe(). * * The function should be called between device_add() and device_del() * on the affected device(block/network device). */ void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) { static DEFINE_MUTEX(dev_hotplug_mutex); mutex_lock(&dev_hotplug_mutex); for (;;) { bool enabled; /* hold power lock since bitfield is not SMP-safe. */ spin_lock_irq(&dev->power.lock); enabled = dev->power.memalloc_noio; dev->power.memalloc_noio = enable; spin_unlock_irq(&dev->power.lock); /* * not need to enable ancestors any more if the device * has been enabled. */ if (enabled && enable) break; dev = dev->parent; /* * clear flag of the parent device only if all the * children don't set the flag because ancestor's * flag was set by any one of the descendants. */ if (!dev || (!enable && device_for_each_child(dev, NULL, dev_memalloc_noio))) break; } mutex_unlock(&dev_hotplug_mutex); } EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio); /** * rpm_check_suspend_allowed - Test whether a device may be suspended. * @dev: Device to test. */ static int rpm_check_suspend_allowed(struct device *dev) { int retval = 0; if (dev->power.runtime_error) retval = -EINVAL; else if (dev->power.disable_depth > 0) retval = -EACCES; else if (atomic_read(&dev->power.usage_count)) retval = -EAGAIN; else if (!dev->power.ignore_children && atomic_read(&dev->power.child_count)) retval = -EBUSY; /* Pending resume requests take precedence over suspends. */ else if ((dev->power.deferred_resume && dev->power.runtime_status == RPM_SUSPENDING) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; else if (__dev_pm_qos_resume_latency(dev) == 0) retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; return retval; } static int rpm_get_suppliers(struct device *dev) { struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) { int retval; if (!(link->flags & DL_FLAG_PM_RUNTIME)) continue; retval = pm_runtime_get_sync(link->supplier); /* Ignore suppliers with disabled runtime PM. */ if (retval < 0 && retval != -EACCES) { pm_runtime_put_noidle(link->supplier); return retval; } refcount_inc(&link->rpm_active); } return 0; } /** * pm_runtime_release_supplier - Drop references to device link's supplier. * @link: Target device link. * * Drop all runtime PM references associated with @link to its supplier device. */ void pm_runtime_release_supplier(struct device_link *link) { struct device *supplier = link->supplier; /* * The additional power.usage_count check is a safety net in case * the rpm_active refcount becomes saturated, in which case * refcount_dec_not_one() would return true forever, but it is not * strictly necessary. */ while (refcount_dec_not_one(&link->rpm_active) && atomic_read(&supplier->power.usage_count) > 0) pm_runtime_put_noidle(supplier); } static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend) { struct device_link *link; list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) { pm_runtime_release_supplier(link); if (try_to_suspend) pm_request_idle(link->supplier); } } static void rpm_put_suppliers(struct device *dev) { __rpm_put_suppliers(dev, true); } static void rpm_suspend_suppliers(struct device *dev) { struct device_link *link; int idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) pm_request_idle(link->supplier); device_links_read_unlock(idx); } /** * __rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. * @dev: Device to run the callback for. */ static int __rpm_callback(int (*cb)(struct device *), struct device *dev) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int retval = 0, idx; bool use_links = dev->power.links_count > 0; if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); } else { spin_unlock_irq(&dev->power.lock); /* * Resume suppliers if necessary. * * The device's runtime PM status cannot change until this * routine returns, so it is safe to read the status outside of * the lock. */ if (use_links && dev->power.runtime_status == RPM_RESUMING) { idx = device_links_read_lock(); retval = rpm_get_suppliers(dev); if (retval) { rpm_put_suppliers(dev); goto fail; } device_links_read_unlock(idx); } } if (cb) retval = cb(dev); if (dev->power.irq_safe) { spin_lock(&dev->power.lock); } else { /* * If the device is suspending and the callback has returned * success, drop the usage counters of the suppliers that have * been reference counted on its resume. * * Do that if resume fails too. */ if (use_links && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) || (dev->power.runtime_status == RPM_RESUMING && retval))) { idx = device_links_read_lock(); __rpm_put_suppliers(dev, false); fail: device_links_read_unlock(idx); } spin_lock_irq(&dev->power.lock); } return retval; } /** * rpm_callback - Run a given runtime PM callback for a given device. * @cb: Runtime PM callback to run. * @dev: Device to run the callback for. */ static int rpm_callback(int (*cb)(struct device *), struct device *dev) { int retval; if (dev->power.memalloc_noio) { unsigned int noio_flag; /* * Deadlock might be caused if memory allocation with * GFP_KERNEL happens inside runtime_suspend and * runtime_resume callbacks of one block device's * ancestor or the block device itself. Network * device might be thought as part of iSCSI block * device, so network device and its ancestor should * be marked as memalloc_noio too. */ noio_flag = memalloc_noio_save(); retval = __rpm_callback(cb, dev); memalloc_noio_restore(noio_flag); } else { retval = __rpm_callback(cb, dev); } dev->power.runtime_error = retval; return retval != -EACCES ? retval : -EIO; } /** * rpm_idle - Notify device bus type if the device can be suspended. * @dev: Device to notify the bus type about. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be suspended. If * another idle notification has been started earlier, return immediately. If * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise * run the ->runtime_idle() callback directly. If the ->runtime_idle callback * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_idle(struct device *dev, int rpmflags) { int (*callback)(struct device *); int retval; trace_rpm_idle(dev, rpmflags); retval = rpm_check_suspend_allowed(dev); if (retval < 0) ; /* Conditions are wrong. */ /* Idle notifications are allowed only in the RPM_ACTIVE state. */ else if (dev->power.runtime_status != RPM_ACTIVE) retval = -EAGAIN; /* * Any pending request other than an idle notification takes * precedence over us, except that the timer may be running. */ else if (dev->power.request_pending && dev->power.request > RPM_REQ_IDLE) retval = -EAGAIN; /* Act as though RPM_NOWAIT is always set. */ else if (dev->power.idle_notification) retval = -EINPROGRESS; if (retval) goto out; /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; callback = RPM_GET_CALLBACK(dev, runtime_idle); /* If no callback assume success. */ if (!callback || dev->power.no_callbacks) goto out; /* Carry out an asynchronous or a synchronous idle notification. */ if (rpmflags & RPM_ASYNC) { dev->power.request = RPM_REQ_IDLE; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } trace_rpm_return_int(dev, _THIS_IP_, 0); return 0; } dev->power.idle_notification = true; if (dev->power.irq_safe) spin_unlock(&dev->power.lock); else spin_unlock_irq(&dev->power.lock); retval = callback(dev); if (dev->power.irq_safe) spin_lock(&dev->power.lock); else spin_lock_irq(&dev->power.lock); dev->power.idle_notification = false; wake_up_all(&dev->power.wait_queue); out: trace_rpm_return_int(dev, _THIS_IP_, retval); return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); } /** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be suspended. * Cancel a pending idle notification, autosuspend or suspend. If * another suspend has been started earlier, either return immediately * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC * flags. If the RPM_ASYNC flag is set then queue a suspend request; * otherwise run the ->runtime_suspend() callback directly. When * ->runtime_suspend succeeded, if a deferred resume was requested while * the callback was running then carry it out, otherwise send an idle * notification for its parent (if the suspend succeeded and both * ignore_children of parent->power and irq_safe of dev->power are not set). * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO * flag is set and the next autosuspend-delay expiration time is in the * future, schedule another autosuspend attempt. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_suspend(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; int retval; trace_rpm_suspend(dev, rpmflags); repeat: retval = rpm_check_suspend_allowed(dev); if (retval < 0) goto out; /* Conditions are wrong. */ /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) retval = -EAGAIN; if (retval) goto out; /* If the autosuspend_delay time hasn't expired yet, reschedule. */ if ((rpmflags & RPM_AUTO) && dev->power.runtime_status != RPM_SUSPENDING) { u64 expires = pm_runtime_autosuspend_expiration(dev); if (expires != 0) { /* Pending requests need to be canceled. */ dev->power.request = RPM_REQ_NONE; /* * Optimization: If the timer is already running and is * set to expire at or before the autosuspend delay, * avoid the overhead of resetting it. Just let it * expire; pm_suspend_timer_fn() will take care of the * rest. */ if (!(dev->power.timer_expires && dev->power.timer_expires <= expires)) { /* * We add a slack of 25% to gather wakeups * without sacrificing the granularity. */ u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * (NSEC_PER_MSEC >> 2); dev->power.timer_expires = expires; hrtimer_start_range_ns(&dev->power.suspend_timer, ns_to_ktime(expires), slack, HRTIMER_MODE_ABS); } dev->power.timer_autosuspends = 1; goto out; } } /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); if (dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { retval = -EINPROGRESS; goto out; } if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); cpu_relax(); spin_lock(&dev->power.lock); goto repeat; } /* Wait for the other suspend running in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_SUSPENDING) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); goto repeat; } if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ /* Carry out an asynchronous or a synchronous suspend. */ if (rpmflags & RPM_ASYNC) { dev->power.request = (rpmflags & RPM_AUTO) ? RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } goto out; } __update_runtime_status(dev, RPM_SUSPENDING); callback = RPM_GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); retval = rpm_callback(callback, dev); if (retval) goto fail; dev_pm_enable_wake_irq_complete(dev); no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); if (dev->parent) { parent = dev->parent; atomic_add_unless(&parent->power.child_count, -1, 0); } wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; } if (dev->power.irq_safe) goto out; /* Maybe the parent is now able to suspend. */ if (parent && !parent->power.ignore_children) { spin_unlock(&dev->power.lock); spin_lock(&parent->power.lock); rpm_idle(parent, RPM_ASYNC); spin_unlock(&parent->power.lock); spin_lock(&dev->power.lock); } /* Maybe the suppliers are now able to suspend. */ if (dev->power.links_count > 0) { spin_unlock_irq(&dev->power.lock); rpm_suspend_suppliers(dev); spin_lock_irq(&dev->power.lock); } out: trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; fail: dev_pm_disable_wake_irq_check(dev, true); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); if (retval == -EAGAIN || retval == -EBUSY) { dev->power.runtime_error = 0; /* * If the callback routine failed an autosuspend, and * if the last_busy time has been updated so that there * is a new autosuspend expiration time, automatically * reschedule another autosuspend. */ if ((rpmflags & RPM_AUTO) && pm_runtime_autosuspend_expiration(dev) != 0) goto repeat; } else { pm_runtime_cancel_pending(dev); } goto out; } /** * rpm_resume - Carry out runtime resume of given device. * @dev: Device to resume. * @rpmflags: Flag bits. * * Check if the device's runtime PM status allows it to be resumed. Cancel * any scheduled or pending requests. If another resume has been started * earlier, either return immediately or wait for it to finish, depending on the * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in * parallel with this function, either tell the other process to resume after * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC * flag is set then queue a resume request; otherwise run the * ->runtime_resume() callback directly. Queue an idle notification for the * device if the resume succeeded. * * This function must be called under dev->power.lock with interrupts disabled. */ static int rpm_resume(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { int (*callback)(struct device *); struct device *parent = NULL; int retval = 0; trace_rpm_resume(dev, rpmflags); repeat: if (dev->power.runtime_error) { retval = -EINVAL; } else if (dev->power.disable_depth > 0) { if (dev->power.runtime_status == RPM_ACTIVE && dev->power.last_status == RPM_ACTIVE) retval = 1; else retval = -EACCES; } if (retval) goto out; /* * Other scheduled or pending requests need to be canceled. Small * optimization: If an autosuspend timer is running, leave it running * rather than cancelling it now only to restart it again in the near * future. */ dev->power.request = RPM_REQ_NONE; if (!dev->power.timer_autosuspends) pm_runtime_deactivate_timer(dev); if (dev->power.runtime_status == RPM_ACTIVE) { retval = 1; goto out; } if (dev->power.runtime_status == RPM_RESUMING || dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { if (dev->power.runtime_status == RPM_SUSPENDING) { dev->power.deferred_resume = true; if (rpmflags & RPM_NOWAIT) retval = -EINPROGRESS; } else { retval = -EINPROGRESS; } goto out; } if (dev->power.irq_safe) { spin_unlock(&dev->power.lock); cpu_relax(); spin_lock(&dev->power.lock); goto repeat; } /* Wait for the operation carried out in parallel with us. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_RESUMING && dev->power.runtime_status != RPM_SUSPENDING) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); goto repeat; } /* * See if we can skip waking up the parent. This is safe only if * power.no_callbacks is set, because otherwise we don't know whether * the resume will actually succeed. */ if (dev->power.no_callbacks && !parent && dev->parent) { spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); if (dev->parent->power.disable_depth > 0 || dev->parent->power.ignore_children || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); } /* Carry out an asynchronous or a synchronous resume. */ if (rpmflags & RPM_ASYNC) { dev->power.request = RPM_REQ_RESUME; if (!dev->power.request_pending) { dev->power.request_pending = true; queue_work(pm_wq, &dev->power.work); } retval = 0; goto out; } if (!parent && dev->parent) { /* * Increment the parent's usage counter and resume it if * necessary. Not needed if dev is irq-safe; then the * parent is permanently resumed. */ parent = dev->parent; if (dev->power.irq_safe) goto skip_parent; spin_unlock(&dev->power.lock); pm_runtime_get_noresume(parent); spin_lock(&parent->power.lock); /* * Resume the parent if it has runtime PM enabled and not been * set to ignore its children. */ if (!parent->power.disable_depth && !parent->power.ignore_children) { rpm_resume(parent, 0); if (parent->power.runtime_status != RPM_ACTIVE) retval = -EBUSY; } spin_unlock(&parent->power.lock); spin_lock(&dev->power.lock); if (retval) goto out; goto repeat; } skip_parent: if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ __update_runtime_status(dev, RPM_RESUMING); callback = RPM_GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); dev_pm_enable_wake_irq_check(dev, false); } else { no_callback: __update_runtime_status(dev, RPM_ACTIVE); pm_runtime_mark_last_busy(dev); if (parent) atomic_inc(&parent->power.child_count); } wake_up_all(&dev->power.wait_queue); if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: if (parent && !dev->power.irq_safe) { spin_unlock_irq(&dev->power.lock); pm_runtime_put(parent); spin_lock_irq(&dev->power.lock); } trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; } /** * pm_runtime_work - Universal runtime PM work function. * @work: Work structure used for scheduling the execution of this function. * * Use @work to get the device object the work is to be done for, determine what * is to be done and execute the appropriate runtime PM function. */ static void pm_runtime_work(struct work_struct *work) { struct device *dev = container_of(work, struct device, power.work); enum rpm_request req; spin_lock_irq(&dev->power.lock); if (!dev->power.request_pending) goto out; req = dev->power.request; dev->power.request = RPM_REQ_NONE; dev->power.request_pending = false; switch (req) { case RPM_REQ_NONE: break; case RPM_REQ_IDLE: rpm_idle(dev, RPM_NOWAIT); break; case RPM_REQ_SUSPEND: rpm_suspend(dev, RPM_NOWAIT); break; case RPM_REQ_AUTOSUSPEND: rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); break; case RPM_REQ_RESUME: rpm_resume(dev, RPM_NOWAIT); break; } out: spin_unlock_irq(&dev->power.lock); } /** * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). * @timer: hrtimer used by pm_schedule_suspend(). * * Check if the time is right and queue a suspend request. */ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) { struct device *dev = container_of(timer, struct device, power.suspend_timer); unsigned long flags; u64 expires; spin_lock_irqsave(&dev->power.lock, flags); expires = dev->power.timer_expires; /* * If 'expires' is after the current time, we've been called * too early. */ if (expires > 0 && expires < ktime_get_mono_fast_ns()) { dev->power.timer_expires = 0; rpm_suspend(dev, dev->power.timer_autosuspends ? (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); return HRTIMER_NORESTART; } /** * pm_schedule_suspend - Set up a timer to submit a suspend request in future. * @dev: Device to suspend. * @delay: Time to wait before submitting a suspend request, in milliseconds. */ int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; u64 expires; int retval; spin_lock_irqsave(&dev->power.lock, flags); if (!delay) { retval = rpm_suspend(dev, RPM_ASYNC); goto out; } retval = rpm_check_suspend_allowed(dev); if (retval) goto out; /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; dev->power.timer_expires = expires; dev->power.timer_autosuspends = 0; hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); out: spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(pm_schedule_suspend); static int rpm_drop_usage_count(struct device *dev) { int ret; ret = atomic_sub_return(1, &dev->power.usage_count); if (ret >= 0) return ret; /* * Because rpm_resume() does not check the usage counter, it will resume * the device even if the usage counter is 0 or negative, so it is * sufficient to increment the usage counter here to reverse the change * made above. */ atomic_inc(&dev->power.usage_count); dev_warn(dev, "Runtime PM usage count underflow!\n"); return -EINVAL; } /** * __pm_runtime_idle - Entry point for runtime idle operations. * @dev: Device to send idle notification for. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and * return immediately if it is larger than zero (if it becomes negative, log a * warning, increment it, and return an error). Then carry out an idle * notification, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_idle(struct device *dev, int rpmflags) { unsigned long flags; int retval; if (rpmflags & RPM_GET_PUT) { retval = rpm_drop_usage_count(dev); if (retval < 0) { return retval; } else if (retval > 0) { trace_rpm_usage(dev, rpmflags); return 0; } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_idle); /** * __pm_runtime_suspend - Entry point for runtime put/suspend operations. * @dev: Device to suspend. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, decrement the device's usage count and * return immediately if it is larger than zero (if it becomes negative, log a * warning, increment it, and return an error). Then carry out a suspend, * either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_suspend(struct device *dev, int rpmflags) { unsigned long flags; int retval; if (rpmflags & RPM_GET_PUT) { retval = rpm_drop_usage_count(dev); if (retval < 0) { return retval; } else if (retval > 0) { trace_rpm_usage(dev, rpmflags); return 0; } } might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_suspend); /** * __pm_runtime_resume - Entry point for runtime resume operations. * @dev: Device to resume. * @rpmflags: Flag bits. * * If the RPM_GET_PUT flag is set, increment the device's usage count. Then * carry out a resume, either synchronous or asynchronous. * * This routine may be called in atomic context if the RPM_ASYNC flag is set, * or if pm_runtime_irq_safe() has been called. */ int __pm_runtime_resume(struct device *dev, int rpmflags) { unsigned long flags; int retval; might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && dev->power.runtime_status != RPM_ACTIVE); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_resume(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** * pm_runtime_get_conditional - Conditionally bump up device usage counter. * @dev: Device to handle. * @ign_usage_count: Whether or not to look at the current usage counter value. * * Return -EINVAL if runtime PM is disabled for @dev. * * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either * @ign_usage_count is %true or the runtime PM usage counter of @dev is not * zero, increment the usage counter of @dev and return 1. Otherwise, return 0 * without changing the usage counter. * * If @ign_usage_count is %true, this function can be used to prevent suspending * the device when its runtime PM status is %RPM_ACTIVE. * * If @ign_usage_count is %false, this function can be used to prevent * suspending the device when both its runtime PM status is %RPM_ACTIVE and its * runtime PM usage counter is not zero. * * The caller is responsible for decrementing the runtime PM usage counter of * @dev after this function has returned a positive value for it. */ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count) { unsigned long flags; int retval; spin_lock_irqsave(&dev->power.lock, flags); if (dev->power.disable_depth > 0) { retval = -EINVAL; } else if (dev->power.runtime_status != RPM_ACTIVE) { retval = 0; } else if (ign_usage_count) { retval = 1; atomic_inc(&dev->power.usage_count); } else { retval = atomic_inc_not_zero(&dev->power.usage_count); } trace_rpm_usage(dev, 0); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } /** * pm_runtime_get_if_active - Bump up runtime PM usage counter if the device is * in active state * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is * %RPM_ACTIVE, in which case it returns 1. If the device is in a different * state, 0 is returned. -EINVAL is returned if runtime PM is disabled for the * device, in which case also the usage_count will remain unmodified. */ int pm_runtime_get_if_active(struct device *dev) { return pm_runtime_get_conditional(dev, true); } EXPORT_SYMBOL_GPL(pm_runtime_get_if_active); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. * @dev: Target device. * * Increment the runtime PM usage counter of @dev if its runtime PM status is * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case * it returns 1. If the device is in a different state or its usage_count is 0, * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device, * in which case also the usage_count will remain unmodified. */ int pm_runtime_get_if_in_use(struct device *dev) { return pm_runtime_get_conditional(dev, false); } EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use); /** * __pm_runtime_set_status - Set runtime PM status of a device. * @dev: Device to handle. * @status: New runtime PM status of the device. * * If runtime PM of the device is disabled or its power.runtime_error field is * different from zero, the status may be changed either to RPM_ACTIVE, or to * RPM_SUSPENDED, as long as that reflects the actual state of the device. * However, if the device has a parent and the parent is not active, and the * parent's power.ignore_children flag is unset, the device's status cannot be * set to RPM_ACTIVE, so -EBUSY is returned in that case. * * If successful, __pm_runtime_set_status() clears the power.runtime_error field * and the device parent's counter of unsuspended children is modified to * reflect the new status. If the new status is RPM_SUSPENDED, an idle * notification request for the parent is submitted. * * If @dev has any suppliers (as reflected by device links to them), and @status * is RPM_ACTIVE, they will be activated upfront and if the activation of one * of them fails, the status of @dev will be changed to RPM_SUSPENDED (instead * of the @status value) and the suppliers will be deacticated on exit. The * error returned by the failing supplier activation will be returned in that * case. */ int __pm_runtime_set_status(struct device *dev, unsigned int status) { struct device *parent = dev->parent; bool notify_parent = false; unsigned long flags; int error = 0; if (status != RPM_ACTIVE && status != RPM_SUSPENDED) return -EINVAL; spin_lock_irqsave(&dev->power.lock, flags); /* * Prevent PM-runtime from being enabled for the device or return an * error if it is enabled already and working. */ if (dev->power.runtime_error || dev->power.disable_depth) dev->power.disable_depth++; else error = -EAGAIN; spin_unlock_irqrestore(&dev->power.lock, flags); if (error) return error; /* * If the new status is RPM_ACTIVE, the suppliers can be activated * upfront regardless of the current status, because next time * rpm_put_suppliers() runs, the rpm_active refcounts of the links * involved will be dropped down to one anyway. */ if (status == RPM_ACTIVE) { int idx = device_links_read_lock(); error = rpm_get_suppliers(dev); if (error) status = RPM_SUSPENDED; device_links_read_unlock(idx); } spin_lock_irqsave(&dev->power.lock, flags); if (dev->power.runtime_status == status || !parent) goto out_set; if (status == RPM_SUSPENDED) { atomic_add_unless(&parent->power.child_count, -1, 0); notify_parent = !parent->power.ignore_children; } else { spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); /* * It is invalid to put an active child under a parent that is * not active, has runtime PM enabled and the * 'power.ignore_children' flag unset. */ if (!parent->power.disable_depth && !parent->power.ignore_children && parent->power.runtime_status != RPM_ACTIVE) { dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", dev_name(dev), dev_name(parent)); error = -EBUSY; } else if (dev->power.runtime_status == RPM_SUSPENDED) { atomic_inc(&parent->power.child_count); } spin_unlock(&parent->power.lock); if (error) { status = RPM_SUSPENDED; goto out; } } out_set: __update_runtime_status(dev, status); if (!error) dev->power.runtime_error = 0; out: spin_unlock_irqrestore(&dev->power.lock, flags); if (notify_parent) pm_request_idle(parent); if (status == RPM_SUSPENDED) { int idx = device_links_read_lock(); rpm_put_suppliers(dev); device_links_read_unlock(idx); } pm_runtime_enable(dev); return error; } EXPORT_SYMBOL_GPL(__pm_runtime_set_status); /** * __pm_runtime_barrier - Cancel pending requests and wait for completions. * @dev: Device to handle. * * Flush all pending requests for the device from pm_wq and wait for all * runtime PM operations involving the device in progress to complete. * * Should be called under dev->power.lock with interrupts disabled. */ static void __pm_runtime_barrier(struct device *dev) { pm_runtime_deactivate_timer(dev); if (dev->power.request_pending) { dev->power.request = RPM_REQ_NONE; spin_unlock_irq(&dev->power.lock); cancel_work_sync(&dev->power.work); spin_lock_irq(&dev->power.lock); dev->power.request_pending = false; } if (dev->power.runtime_status == RPM_SUSPENDING || dev->power.runtime_status == RPM_RESUMING || dev->power.idle_notification) { DEFINE_WAIT(wait); /* Suspend, wake-up or idle notification in progress. */ for (;;) { prepare_to_wait(&dev->power.wait_queue, &wait, TASK_UNINTERRUPTIBLE); if (dev->power.runtime_status != RPM_SUSPENDING && dev->power.runtime_status != RPM_RESUMING && !dev->power.idle_notification) break; spin_unlock_irq(&dev->power.lock); schedule(); spin_lock_irq(&dev->power.lock); } finish_wait(&dev->power.wait_queue, &wait); } } /** * pm_runtime_barrier - Flush pending requests and wait for completions. * @dev: Device to handle. * * Prevent the device from being suspended by incrementing its usage counter and * if there's a pending resume request for the device, wake the device up. * Next, make sure that all pending requests for the device have been flushed * from pm_wq and wait for all runtime PM operations involving the device in * progress to complete. * * Return value: * 1, if there was a resume request pending and the device had to be woken up, * 0, otherwise */ int pm_runtime_barrier(struct device *dev) { int retval = 0; pm_runtime_get_noresume(dev); spin_lock_irq(&dev->power.lock); if (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME) { rpm_resume(dev, 0); retval = 1; } __pm_runtime_barrier(dev); spin_unlock_irq(&dev->power.lock); pm_runtime_put_noidle(dev); return retval; } EXPORT_SYMBOL_GPL(pm_runtime_barrier); /** * __pm_runtime_disable - Disable runtime PM of a device. * @dev: Device to handle. * @check_resume: If set, check if there's a resume request for the device. * * Increment power.disable_depth for the device and if it was zero previously, * cancel all pending runtime PM requests for the device and wait for all * operations in progress to complete. The device can be either active or * suspended after its runtime PM has been disabled. * * If @check_resume is set and there's a resume request pending when * __pm_runtime_disable() is called and power.disable_depth is zero, the * function will wake up the device before disabling its runtime PM. */ void __pm_runtime_disable(struct device *dev, bool check_resume) { spin_lock_irq(&dev->power.lock); if (dev->power.disable_depth > 0) { dev->power.disable_depth++; goto out; } /* * Wake up the device if there's a resume request pending, because that * means there probably is some I/O to process and disabling runtime PM * shouldn't prevent the device from processing the I/O. */ if (check_resume && dev->power.request_pending && dev->power.request == RPM_REQ_RESUME) { /* * Prevent suspends and idle notifications from being carried * out after we have woken up the device. */ pm_runtime_get_noresume(dev); rpm_resume(dev, 0); pm_runtime_put_noidle(dev); } /* Update time accounting before disabling PM-runtime. */ update_pm_runtime_accounting(dev); if (!dev->power.disable_depth++) { __pm_runtime_barrier(dev); dev->power.last_status = dev->power.runtime_status; } out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(__pm_runtime_disable); /** * pm_runtime_enable - Enable runtime PM of a device. * @dev: Device to handle. */ void pm_runtime_enable(struct device *dev) { unsigned long flags; spin_lock_irqsave(&dev->power.lock, flags); if (!dev->power.disable_depth) { dev_warn(dev, "Unbalanced %s!\n", __func__); goto out; } if (--dev->power.disable_depth > 0) goto out; dev->power.last_status = RPM_INVALID; dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); if (dev->power.runtime_status == RPM_SUSPENDED && !dev->power.ignore_children && atomic_read(&dev->power.child_count) > 0) dev_warn(dev, "Enabling runtime PM for inactive device with active children\n"); out: spin_unlock_irqrestore(&dev->power.lock, flags); } EXPORT_SYMBOL_GPL(pm_runtime_enable); static void pm_runtime_disable_action(void *data) { pm_runtime_dont_use_autosuspend(data); pm_runtime_disable(data); } /** * devm_pm_runtime_enable - devres-enabled version of pm_runtime_enable. * * NOTE: this will also handle calling pm_runtime_dont_use_autosuspend() for * you at driver exit time if needed. * * @dev: Device to handle. */ int devm_pm_runtime_enable(struct device *dev) { pm_runtime_enable(dev); return devm_add_action_or_reset(dev, pm_runtime_disable_action, dev); } EXPORT_SYMBOL_GPL(devm_pm_runtime_enable); /** * pm_runtime_forbid - Block runtime PM of a device. * @dev: Device to handle. * * Increase the device's usage count and clear its power.runtime_auto flag, * so that it cannot be suspended at run time until pm_runtime_allow() is called * for it. */ void pm_runtime_forbid(struct device *dev) { spin_lock_irq(&dev->power.lock); if (!dev->power.runtime_auto) goto out; dev->power.runtime_auto = false; atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_forbid); /** * pm_runtime_allow - Unblock runtime PM of a device. * @dev: Device to handle. * * Decrease the device's usage count and set its power.runtime_auto flag. */ void pm_runtime_allow(struct device *dev) { int ret; spin_lock_irq(&dev->power.lock); if (dev->power.runtime_auto) goto out; dev->power.runtime_auto = true; ret = rpm_drop_usage_count(dev); if (ret == 0) rpm_idle(dev, RPM_AUTO | RPM_ASYNC); else if (ret > 0) trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC); out: spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_allow); /** * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device. * @dev: Device to handle. * * Set the power.no_callbacks flag, which tells the PM core that this * device is power-managed through its parent and has no runtime PM * callbacks of its own. The runtime sysfs attributes will be removed. */ void pm_runtime_no_callbacks(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.no_callbacks = 1; spin_unlock_irq(&dev->power.lock); if (device_is_registered(dev)) rpm_sysfs_remove(dev); } EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); /** * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. * @dev: Device to handle * * Set the power.irq_safe flag, which tells the PM core that the * ->runtime_suspend() and ->runtime_resume() callbacks for this device should * always be invoked with the spinlock held and interrupts disabled. It also * causes the parent's usage counter to be permanently incremented, preventing * the parent from runtime suspending -- otherwise an irq-safe child might have * to wait for a non-irq-safe parent. */ void pm_runtime_irq_safe(struct device *dev) { if (dev->parent) pm_runtime_get_sync(dev->parent); spin_lock_irq(&dev->power.lock); dev->power.irq_safe = 1; spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); /** * update_autosuspend - Handle a change to a device's autosuspend settings. * @dev: Device to handle. * @old_delay: The former autosuspend_delay value. * @old_use: The former use_autosuspend value. * * Prevent runtime suspend if the new delay is negative and use_autosuspend is * set; otherwise allow it. Send an idle notification if suspends are allowed. * * This function must be called under dev->power.lock with interrupts disabled. */ static void update_autosuspend(struct device *dev, int old_delay, int old_use) { int delay = dev->power.autosuspend_delay; /* Should runtime suspend be prevented now? */ if (dev->power.use_autosuspend && delay < 0) { /* If it used to be allowed then prevent it. */ if (!old_use || old_delay >= 0) { atomic_inc(&dev->power.usage_count); rpm_resume(dev, 0); } else { trace_rpm_usage(dev, 0); } } /* Runtime suspend should be allowed now. */ else { /* If it used to be prevented then allow it. */ if (old_use && old_delay < 0) atomic_dec(&dev->power.usage_count); /* Maybe we can autosuspend now. */ rpm_idle(dev, RPM_AUTO); } } /** * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. * @dev: Device to handle. * @delay: Value of the new delay in milliseconds. * * Set the device's power.autosuspend_delay value. If it changes to negative * and the power.use_autosuspend flag is set, prevent runtime suspends. If it * changes the other way, allow runtime suspends. */ void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) { int old_delay, old_use; spin_lock_irq(&dev->power.lock); old_delay = dev->power.autosuspend_delay; old_use = dev->power.use_autosuspend; dev->power.autosuspend_delay = delay; update_autosuspend(dev, old_delay, old_use); spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); /** * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. * @dev: Device to handle. * @use: New value for use_autosuspend. * * Set the device's power.use_autosuspend flag, and allow or prevent runtime * suspends as needed. */ void __pm_runtime_use_autosuspend(struct device *dev, bool use) { int old_delay, old_use; spin_lock_irq(&dev->power.lock); old_delay = dev->power.autosuspend_delay; old_use = dev->power.use_autosuspend; dev->power.use_autosuspend = use; update_autosuspend(dev, old_delay, old_use); spin_unlock_irq(&dev->power.lock); } EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); /** * pm_runtime_init - Initialize runtime PM fields in given device object. * @dev: Device object to initialize. */ void pm_runtime_init(struct device *dev) { dev->power.runtime_status = RPM_SUSPENDED; dev->power.last_status = RPM_INVALID; dev->power.idle_notification = false; dev->power.disable_depth = 1; atomic_set(&dev->power.usage_count, 0); dev->power.runtime_error = 0; atomic_set(&dev->power.child_count, 0); pm_suspend_ignore_children(dev, false); dev->power.runtime_auto = true; dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; dev->power.deferred_resume = false; dev->power.needs_force_resume = 0; INIT_WORK(&dev->power.work, pm_runtime_work); dev->power.timer_expires = 0; hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); dev->power.suspend_timer.function = pm_suspend_timer_fn; init_waitqueue_head(&dev->power.wait_queue); } /** * pm_runtime_reinit - Re-initialize runtime PM fields in given device object. * @dev: Device object to re-initialize. */ void pm_runtime_reinit(struct device *dev) { if (!pm_runtime_enabled(dev)) { if (dev->power.runtime_status == RPM_ACTIVE) pm_runtime_set_suspended(dev); if (dev->power.irq_safe) { spin_lock_irq(&dev->power.lock); dev->power.irq_safe = 0; spin_unlock_irq(&dev->power.lock); if (dev->parent) pm_runtime_put(dev->parent); } } } /** * pm_runtime_remove - Prepare for removing a device from device hierarchy. * @dev: Device object being removed from device hierarchy. */ void pm_runtime_remove(struct device *dev) { __pm_runtime_disable(dev, false); pm_runtime_reinit(dev); } /** * pm_runtime_get_suppliers - Resume and reference-count supplier devices. * @dev: Consumer device. */ void pm_runtime_get_suppliers(struct device *dev) { struct device_link *link; int idx; idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) if (link->flags & DL_FLAG_PM_RUNTIME) { link->supplier_preactivated = true; pm_runtime_get_sync(link->supplier); } device_links_read_unlock(idx); } /** * pm_runtime_put_suppliers - Drop references to supplier devices. * @dev: Consumer device. */ void pm_runtime_put_suppliers(struct device *dev) { struct device_link *link; int idx; idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, device_links_read_lock_held()) if (link->supplier_preactivated) { link->supplier_preactivated = false; pm_runtime_put(link->supplier); } device_links_read_unlock(idx); } void pm_runtime_new_link(struct device *dev) { spin_lock_irq(&dev->power.lock); dev->power.links_count++; spin_unlock_irq(&dev->power.lock); } static void pm_runtime_drop_link_count(struct device *dev) { spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); dev->power.links_count--; spin_unlock_irq(&dev->power.lock); } /** * pm_runtime_drop_link - Prepare for device link removal. * @link: Device link going away. * * Drop the link count of the consumer end of @link and decrement the supplier * device's runtime PM usage counter as many times as needed to drop all of the * PM runtime reference to it from the consumer. */ void pm_runtime_drop_link(struct device_link *link) { if (!(link->flags & DL_FLAG_PM_RUNTIME)) return; pm_runtime_drop_link_count(link->consumer); pm_runtime_release_supplier(link); pm_request_idle(link->supplier); } static bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || dev->power.ignore_children); } /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. * * Disable runtime PM so we safely can check the device's runtime PM status and * if it is active, invoke its ->runtime_suspend callback to suspend it and * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's * usage and children counters don't indicate that the device was in use before * the system-wide transition under way, decrement its parent's children counter * (if there is a parent). Keep runtime PM disabled to preserve the state * unless we encounter errors. * * Typically this function may be invoked from a system suspend callback to make * sure the device is put into low power state and it should only be used during * system-wide PM transitions to sleep states. It assumes that the analogous * pm_runtime_force_resume() will be used to resume the device. * * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent * state where this function has called the ->runtime_suspend callback but the * PM core marks the driver as runtime active. */ int pm_runtime_force_suspend(struct device *dev) { int (*callback)(struct device *); int ret; pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) return 0; callback = RPM_GET_CALLBACK(dev, runtime_suspend); dev_pm_enable_wake_irq_check(dev, true); ret = callback ? callback(dev) : 0; if (ret) goto err; dev_pm_enable_wake_irq_complete(dev); /* * If the device can stay in suspend after the system-wide transition * to the working state that will follow, drop the children counter of * its parent, but set its status to RPM_SUSPENDED anyway in case this * function will be called again for it in the meantime. */ if (pm_runtime_need_not_resume(dev)) { pm_runtime_set_suspended(dev); } else { __update_runtime_status(dev, RPM_SUSPENDED); dev->power.needs_force_resume = 1; } return 0; err: dev_pm_disable_wake_irq_check(dev, true); pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); /** * pm_runtime_force_resume - Force a device into resume state if needed. * @dev: Device to resume. * * Prior invoking this function we expect the user to have brought the device * into low power state by a call to pm_runtime_force_suspend(). Here we reverse * those actions and bring the device into full power, if it is expected to be * used on system resume. In the other case, we defer the resume to be managed * via runtime PM. * * Typically this function may be invoked from a system resume callback. */ int pm_runtime_force_resume(struct device *dev) { int (*callback)(struct device *); int ret = 0; if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume) goto out; /* * The value of the parent's children counter is correct already, so * just update the status of the device. */ __update_runtime_status(dev, RPM_ACTIVE); callback = RPM_GET_CALLBACK(dev, runtime_resume); dev_pm_disable_wake_irq_check(dev, false); ret = callback ? callback(dev) : 0; if (ret) { pm_runtime_set_suspended(dev); dev_pm_enable_wake_irq_check(dev, false); goto out; } pm_runtime_mark_last_busy(dev); out: dev->power.needs_force_resume = 0; pm_runtime_enable(dev); return ret; } EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
12 12 559 562 13 13 13 13 13 13 13 13 13 11 68 10 58 177 176 1605 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/fs.h> #include <linux/path.h> #include <linux/slab.h> #include <linux/fs_struct.h> #include "internal.h" /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_root(struct fs_struct *fs, const struct path *path) { struct path old_root; path_get(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_root = fs->root; fs->root = *path; write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_root.dentry) path_put(&old_root); } /* * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_pwd(struct fs_struct *fs, const struct path *path) { struct path old_pwd; path_get(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_pwd.dentry) path_put(&old_pwd); } static inline int replace_path(struct path *p, const struct path *old, const struct path *new) { if (likely(p->dentry != old->dentry || p->mnt != old->mnt)) return 0; *p = *new; return 1; } void chroot_fs_refs(const struct path *old_root, const struct path *new_root) { struct task_struct *g, *p; struct fs_struct *fs; int count = 0; read_lock(&tasklist_lock); for_each_process_thread(g, p) { task_lock(p); fs = p->fs; if (fs) { int hits = 0; spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); hits += replace_path(&fs->root, old_root, new_root); hits += replace_path(&fs->pwd, old_root, new_root); write_seqcount_end(&fs->seq); while (hits--) { count++; path_get(new_root); } spin_unlock(&fs->lock); } task_unlock(p); } read_unlock(&tasklist_lock); while (count--) path_put(old_root); } void free_fs_struct(struct fs_struct *fs) { path_put(&fs->root); path_put(&fs->pwd); kmem_cache_free(fs_cachep, fs); } void exit_fs(struct task_struct *tsk) { struct fs_struct *fs = tsk->fs; if (fs) { int kill; task_lock(tsk); spin_lock(&fs->lock); tsk->fs = NULL; kill = !--fs->users; spin_unlock(&fs->lock); task_unlock(tsk); if (kill) free_fs_struct(fs); } } struct fs_struct *copy_fs_struct(struct fs_struct *old) { struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ if (fs) { fs->users = 1; fs->in_exec = 0; spin_lock_init(&fs->lock); seqcount_spinlock_init(&fs->seq, &fs->lock); fs->umask = old->umask; spin_lock(&old->lock); fs->root = old->root; path_get(&fs->root); fs->pwd = old->pwd; path_get(&fs->pwd); spin_unlock(&old->lock); } return fs; } int unshare_fs_struct(void) { struct fs_struct *fs = current->fs; struct fs_struct *new_fs = copy_fs_struct(fs); int kill; if (!new_fs) return -ENOMEM; task_lock(current); spin_lock(&fs->lock); kill = !--fs->users; current->fs = new_fs; spin_unlock(&fs->lock); task_unlock(current); if (kill) free_fs_struct(fs); return 0; } EXPORT_SYMBOL_GPL(unshare_fs_struct); int current_umask(void) { return current->fs->umask; } EXPORT_SYMBOL(current_umask); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .users = 1, .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), .umask = 0022, };
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 /* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _ASM_X86_GSSEG_H #define _ASM_X86_GSSEG_H #include <linux/types.h> #include <asm/asm.h> #include <asm/cpufeature.h> #include <asm/alternative.h> #include <asm/processor.h> #include <asm/nops.h> #ifdef CONFIG_X86_64 extern asmlinkage void asm_load_gs_index(u16 selector); /* Replace with "lkgs %di" once binutils support LKGS instruction */ #define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7) static inline void native_lkgs(unsigned int selector) { u16 sel = selector; asm_inline volatile("1: " LKGS_DI _ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k[sel]) : [sel] "+D" (sel)); } static inline void native_load_gs_index(unsigned int selector) { if (cpu_feature_enabled(X86_FEATURE_LKGS)) { native_lkgs(selector); } else { unsigned long flags; local_irq_save(flags); asm_load_gs_index(selector); local_irq_restore(flags); } } #endif /* CONFIG_X86_64 */ static inline void __init lkgs_init(void) { #ifdef CONFIG_PARAVIRT_XXL #ifdef CONFIG_X86_64 if (cpu_feature_enabled(X86_FEATURE_LKGS)) pv_ops.cpu.load_gs_index = native_lkgs; #endif #endif } #ifndef CONFIG_PARAVIRT_XXL static inline void load_gs_index(unsigned int selector) { #ifdef CONFIG_X86_64 native_load_gs_index(selector); #else loadsegment(gs, selector); #endif } #endif /* CONFIG_PARAVIRT_XXL */ #endif /* _ASM_X86_GSSEG_H */
15 15 10 86 86 13 14 15 15 15 15 4 4 55 173 189 66 165 9 156 165 9 156 165 56 56 1 55 108 109 2 1 1 109 109 108 79 1 4 5 2 1 43 1 56 173 171 173 172 172 165 9 173 173 172 476 374 2 79 20 81 190 15 59 121 25 5 145 7 9 2 4 2 27 27 1 2 1 2 1 5 10 7 15 1 3 2 19 17 1 12 2 2 3 11 2 308 309 2 2 2 2 100 100 89 88 89 89 88 8 3 1 2 2 98 93 3 1 1 1 4 2 2 2 2 1 3 3 7 7 9 5 13 3 7 1 6 6 2 1 1 63 59 5 92 22 5 67 91 5 77 4 3 80 80 60 3 226 3 2 218 1 2 3 188 10 27 214 4 209 210 27 184 210 23 3 1 1 1 2 2 78 87 20 71 67 5 89 7 89 87 1 89 55 108 32 77 108 20 88 500 500 497 7 9 467 7 1 34 498 499 2 4 4 2 2 2 188 189 3 1 188 189 189 189 6 187 2 188 138 105 189 7 38 38 13 1 105 4 19 1 1 1 119 119 114 19 416 419 393 104 104 102 104 103 104 101 434 8 316 254 428 405 208 207 1 208 118 40 82 68 13 71 77 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 Forwarding Information Base: semantics. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> */ #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/netlink.h> #include <linux/hash.h> #include <linux/nospec.h> #include <net/arp.h> #include <net/inet_dscp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> #include <net/ip6_fib.h> #include <net/nexthop.h> #include <net/netlink.h> #include <net/rtnh.h> #include <net/lwtunnel.h> #include <net/fib_notifier.h> #include <net/addrconf.h> #include "fib_lookup.h" static DEFINE_SPINLOCK(fib_info_lock); static struct hlist_head *fib_info_hash; static struct hlist_head *fib_info_laddrhash; static unsigned int fib_info_hash_size; static unsigned int fib_info_hash_bits; static unsigned int fib_info_cnt; #define DEVINDEX_HASHBITS 8 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS) static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE]; /* for_nexthops and change_nexthops only used when nexthop object * is not set in a fib_info. The logic within can reference fib_nh. */ #ifdef CONFIG_IP_ROUTE_MULTIPATH #define for_nexthops(fi) { \ int nhsel; const struct fib_nh *nh; \ for (nhsel = 0, nh = (fi)->fib_nh; \ nhsel < fib_info_num_path((fi)); \ nh++, nhsel++) #define change_nexthops(fi) { \ int nhsel; struct fib_nh *nexthop_nh; \ for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ nhsel < fib_info_num_path((fi)); \ nexthop_nh++, nhsel++) #else /* CONFIG_IP_ROUTE_MULTIPATH */ /* Hope, that gcc will optimize it to get rid of dummy loop */ #define for_nexthops(fi) { \ int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \ for (nhsel = 0; nhsel < 1; nhsel++) #define change_nexthops(fi) { \ int nhsel; \ struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \ for (nhsel = 0; nhsel < 1; nhsel++) #endif /* CONFIG_IP_ROUTE_MULTIPATH */ #define endfor_nexthops(fi) } const struct fib_prop fib_props[RTN_MAX + 1] = { [RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE, }, [RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE, }, [RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST, }, [RTN_BROADCAST] = { .error = 0, .scope = RT_SCOPE_LINK, }, [RTN_ANYCAST] = { .error = 0, .scope = RT_SCOPE_LINK, }, [RTN_MULTICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE, }, [RTN_BLACKHOLE] = { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE, }, [RTN_UNREACHABLE] = { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE, }, [RTN_PROHIBIT] = { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE, }, [RTN_THROW] = { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE, }, [RTN_NAT] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE, }, [RTN_XRESOLVE] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE, }, }; static void rt_fibinfo_free(struct rtable __rcu **rtp) { struct rtable *rt = rcu_dereference_protected(*rtp, 1); if (!rt) return; /* Not even needed : RCU_INIT_POINTER(*rtp, NULL); * because we waited an RCU grace period before calling * free_fib_info_rcu() */ dst_dev_put(&rt->dst); dst_release_immediate(&rt->dst); } static void free_nh_exceptions(struct fib_nh_common *nhc) { struct fnhe_hash_bucket *hash; int i; hash = rcu_dereference_protected(nhc->nhc_exceptions, 1); if (!hash) return; for (i = 0; i < FNHE_HASH_SIZE; i++) { struct fib_nh_exception *fnhe; fnhe = rcu_dereference_protected(hash[i].chain, 1); while (fnhe) { struct fib_nh_exception *next; next = rcu_dereference_protected(fnhe->fnhe_next, 1); rt_fibinfo_free(&fnhe->fnhe_rth_input); rt_fibinfo_free(&fnhe->fnhe_rth_output); kfree(fnhe); fnhe = next; } } kfree(hash); } static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp) { int cpu; if (!rtp) return; for_each_possible_cpu(cpu) { struct rtable *rt; rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); if (rt) { dst_dev_put(&rt->dst); dst_release_immediate(&rt->dst); } } free_percpu(rtp); } void fib_nh_common_release(struct fib_nh_common *nhc) { netdev_put(nhc->nhc_dev, &nhc->nhc_dev_tracker); lwtstate_put(nhc->nhc_lwtstate); rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output); rt_fibinfo_free(&nhc->nhc_rth_input); free_nh_exceptions(nhc); } EXPORT_SYMBOL_GPL(fib_nh_common_release); void fib_nh_release(struct net *net, struct fib_nh *fib_nh) { #ifdef CONFIG_IP_ROUTE_CLASSID if (fib_nh->nh_tclassid) atomic_dec(&net->ipv4.fib_num_tclassid_users); #endif fib_nh_common_release(&fib_nh->nh_common); } /* Release a nexthop info record */ static void free_fib_info_rcu(struct rcu_head *head) { struct fib_info *fi = container_of(head, struct fib_info, rcu); if (fi->nh) { nexthop_put(fi->nh); } else { change_nexthops(fi) { fib_nh_release(fi->fib_net, nexthop_nh); } endfor_nexthops(fi); } ip_fib_metrics_put(fi->fib_metrics); kfree(fi); } void free_fib_info(struct fib_info *fi) { if (fi->fib_dead == 0) { pr_warn("Freeing alive fib_info %p\n", fi); return; } call_rcu(&fi->rcu, free_fib_info_rcu); } EXPORT_SYMBOL_GPL(free_fib_info); void fib_release_info(struct fib_info *fi) { spin_lock_bh(&fib_info_lock); if (fi && refcount_dec_and_test(&fi->fib_treeref)) { hlist_del(&fi->fib_hash); /* Paired with READ_ONCE() in fib_create_info(). */ WRITE_ONCE(fib_info_cnt, fib_info_cnt - 1); if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); if (fi->nh) { list_del(&fi->nh_list); } else { change_nexthops(fi) { if (!nexthop_nh->fib_nh_dev) continue; hlist_del(&nexthop_nh->nh_hash); } endfor_nexthops(fi) } /* Paired with READ_ONCE() from fib_table_lookup() */ WRITE_ONCE(fi->fib_dead, 1); fib_info_put(fi); } spin_unlock_bh(&fib_info_lock); } static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi) { const struct fib_nh *onh; if (fi->nh || ofi->nh) return nexthop_cmp(fi->nh, ofi->nh) ? 0 : -1; if (ofi->fib_nhs == 0) return 0; for_nexthops(fi) { onh = fib_info_nh(ofi, nhsel); if (nh->fib_nh_oif != onh->fib_nh_oif || nh->fib_nh_gw_family != onh->fib_nh_gw_family || nh->fib_nh_scope != onh->fib_nh_scope || #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->fib_nh_weight != onh->fib_nh_weight || #endif #ifdef CONFIG_IP_ROUTE_CLASSID nh->nh_tclassid != onh->nh_tclassid || #endif lwtunnel_cmp_encap(nh->fib_nh_lws, onh->fib_nh_lws) || ((nh->fib_nh_flags ^ onh->fib_nh_flags) & ~RTNH_COMPARE_MASK)) return -1; if (nh->fib_nh_gw_family == AF_INET && nh->fib_nh_gw4 != onh->fib_nh_gw4) return -1; if (nh->fib_nh_gw_family == AF_INET6 && ipv6_addr_cmp(&nh->fib_nh_gw6, &onh->fib_nh_gw6)) return -1; } endfor_nexthops(fi); return 0; } static inline unsigned int fib_devindex_hashfn(unsigned int val) { return hash_32(val, DEVINDEX_HASHBITS); } static struct hlist_head * fib_info_devhash_bucket(const struct net_device *dev) { u32 val = net_hash_mix(dev_net(dev)) ^ dev->ifindex; return &fib_info_devhash[fib_devindex_hashfn(val)]; } static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, u32 prefsrc, u32 priority) { unsigned int val = init_val; val ^= (protocol << 8) | scope; val ^= prefsrc; val ^= priority; return val; } static unsigned int fib_info_hashfn_result(unsigned int val) { unsigned int mask = (fib_info_hash_size - 1); return (val ^ (val >> 7) ^ (val >> 12)) & mask; } static inline unsigned int fib_info_hashfn(struct fib_info *fi) { unsigned int val; val = fib_info_hashfn_1(fi->fib_nhs, fi->fib_protocol, fi->fib_scope, (__force u32)fi->fib_prefsrc, fi->fib_priority); if (fi->nh) { val ^= fib_devindex_hashfn(fi->nh->id); } else { for_nexthops(fi) { val ^= fib_devindex_hashfn(nh->fib_nh_oif); } endfor_nexthops(fi) } return fib_info_hashfn_result(val); } /* no metrics, only nexthop id */ static struct fib_info *fib_find_info_nh(struct net *net, const struct fib_config *cfg) { struct hlist_head *head; struct fib_info *fi; unsigned int hash; hash = fib_info_hashfn_1(fib_devindex_hashfn(cfg->fc_nh_id), cfg->fc_protocol, cfg->fc_scope, (__force u32)cfg->fc_prefsrc, cfg->fc_priority); hash = fib_info_hashfn_result(hash); head = &fib_info_hash[hash]; hlist_for_each_entry(fi, head, fib_hash) { if (!net_eq(fi->fib_net, net)) continue; if (!fi->nh || fi->nh->id != cfg->fc_nh_id) continue; if (cfg->fc_protocol == fi->fib_protocol && cfg->fc_scope == fi->fib_scope && cfg->fc_prefsrc == fi->fib_prefsrc && cfg->fc_priority == fi->fib_priority && cfg->fc_type == fi->fib_type && cfg->fc_table == fi->fib_tb_id && !((cfg->fc_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK)) return fi; } return NULL; } static struct fib_info *fib_find_info(struct fib_info *nfi) { struct hlist_head *head; struct fib_info *fi; unsigned int hash; hash = fib_info_hashfn(nfi); head = &fib_info_hash[hash]; hlist_for_each_entry(fi, head, fib_hash) { if (!net_eq(fi->fib_net, nfi->fib_net)) continue; if (fi->fib_nhs != nfi->fib_nhs) continue; if (nfi->fib_protocol == fi->fib_protocol && nfi->fib_scope == fi->fib_scope && nfi->fib_prefsrc == fi->fib_prefsrc && nfi->fib_priority == fi->fib_priority && nfi->fib_type == fi->fib_type && nfi->fib_tb_id == fi->fib_tb_id && memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(u32) * RTAX_MAX) == 0 && !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) && nh_comp(fi, nfi) == 0) return fi; } return NULL; } /* Check, that the gateway is already configured. * Used only by redirect accept routine. */ int ip_fib_check_default(__be32 gw, struct net_device *dev) { struct hlist_head *head; struct fib_nh *nh; spin_lock(&fib_info_lock); head = fib_info_devhash_bucket(dev); hlist_for_each_entry(nh, head, nh_hash) { if (nh->fib_nh_dev == dev && nh->fib_nh_gw4 == gw && !(nh->fib_nh_flags & RTNH_F_DEAD)) { spin_unlock(&fib_info_lock); return 0; } } spin_unlock(&fib_info_lock); return -1; } size_t fib_nlmsg_size(struct fib_info *fi) { size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(4) /* RTA_TABLE */ + nla_total_size(4) /* RTA_DST */ + nla_total_size(4) /* RTA_PRIORITY */ + nla_total_size(4) /* RTA_PREFSRC */ + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */ unsigned int nhs = fib_info_num_path(fi); /* space for nested metrics */ payload += nla_total_size((RTAX_MAX * nla_total_size(4))); if (fi->nh) payload += nla_total_size(4); /* RTA_NH_ID */ if (nhs) { size_t nh_encapsize = 0; /* Also handles the special case nhs == 1 */ /* each nexthop is packed in an attribute */ size_t nhsize = nla_total_size(sizeof(struct rtnexthop)); unsigned int i; /* may contain flow and gateway attribute */ nhsize += 2 * nla_total_size(4); /* grab encap info */ for (i = 0; i < fib_info_num_path(fi); i++) { struct fib_nh_common *nhc = fib_info_nhc(fi, i); if (nhc->nhc_lwtstate) { /* RTA_ENCAP_TYPE */ nh_encapsize += lwtunnel_get_encap_size( nhc->nhc_lwtstate); /* RTA_ENCAP */ nh_encapsize += nla_total_size(2); } } /* all nexthops are packed in a nested attribute */ payload += nla_total_size((nhs * nhsize) + nh_encapsize); } return payload; } void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len, u32 tb_id, const struct nl_info *info, unsigned int nlm_flags) { struct fib_rt_info fri; struct sk_buff *skb; u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; int err = -ENOBUFS; skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL); if (!skb) goto errout; fri.fi = fa->fa_info; fri.tb_id = tb_id; fri.dst = key; fri.dst_len = dst_len; fri.dscp = fa->fa_dscp; fri.type = fa->fa_type; fri.offload = READ_ONCE(fa->offload); fri.trap = READ_ONCE(fa->trap); fri.offload_failed = READ_ONCE(fa->offload_failed); err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE, info->nlh, GFP_KERNEL); return; errout: if (err < 0) rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err); } static int fib_detect_death(struct fib_info *fi, int order, struct fib_info **last_resort, int *last_idx, int dflt) { const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); struct neighbour *n; int state = NUD_NONE; if (likely(nhc->nhc_gw_family == AF_INET)) n = neigh_lookup(&arp_tbl, &nhc->nhc_gw.ipv4, nhc->nhc_dev); else if (nhc->nhc_gw_family == AF_INET6) n = neigh_lookup(ipv6_stub->nd_tbl, &nhc->nhc_gw.ipv6, nhc->nhc_dev); else n = NULL; if (n) { state = READ_ONCE(n->nud_state); neigh_release(n); } else { return 0; } if (state == NUD_REACHABLE) return 0; if ((state & NUD_VALID) && order != dflt) return 0; if ((state & NUD_VALID) || (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) { *last_resort = fi; *last_idx = order; } return 1; } int fib_nh_common_init(struct net *net, struct fib_nh_common *nhc, struct nlattr *encap, u16 encap_type, void *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack) { int err; nhc->nhc_pcpu_rth_output = alloc_percpu_gfp(struct rtable __rcu *, gfp_flags); if (!nhc->nhc_pcpu_rth_output) return -ENOMEM; if (encap) { struct lwtunnel_state *lwtstate; if (encap_type == LWTUNNEL_ENCAP_NONE) { NL_SET_ERR_MSG(extack, "LWT encap type not specified"); err = -EINVAL; goto lwt_failure; } err = lwtunnel_build_state(net, encap_type, encap, nhc->nhc_family, cfg, &lwtstate, extack); if (err) goto lwt_failure; nhc->nhc_lwtstate = lwtstate_get(lwtstate); } return 0; lwt_failure: rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output); nhc->nhc_pcpu_rth_output = NULL; return err; } EXPORT_SYMBOL_GPL(fib_nh_common_init); int fib_nh_init(struct net *net, struct fib_nh *nh, struct fib_config *cfg, int nh_weight, struct netlink_ext_ack *extack) { int err; nh->fib_nh_family = AF_INET; err = fib_nh_common_init(net, &nh->nh_common, cfg->fc_encap, cfg->fc_encap_type, cfg, GFP_KERNEL, extack); if (err) return err; nh->fib_nh_oif = cfg->fc_oif; nh->fib_nh_gw_family = cfg->fc_gw_family; if (cfg->fc_gw_family == AF_INET) nh->fib_nh_gw4 = cfg->fc_gw4; else if (cfg->fc_gw_family == AF_INET6) nh->fib_nh_gw6 = cfg->fc_gw6; nh->fib_nh_flags = cfg->fc_flags; #ifdef CONFIG_IP_ROUTE_CLASSID nh->nh_tclassid = cfg->fc_flow; if (nh->nh_tclassid) atomic_inc(&net->ipv4.fib_num_tclassid_users); #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH nh->fib_nh_weight = nh_weight; #endif return 0; } #ifdef CONFIG_IP_ROUTE_MULTIPATH static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining, struct netlink_ext_ack *extack) { int nhs = 0; while (rtnh_ok(rtnh, remaining)) { nhs++; rtnh = rtnh_next(rtnh, &remaining); } /* leftover implies invalid nexthop configuration, discard it */ if (remaining > 0) { NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - extra data after nexthops"); nhs = 0; } return nhs; } static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla, struct netlink_ext_ack *extack) { if (nla_len(nla) < sizeof(*gw)) { NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY"); return -EINVAL; } *gw = nla_get_in_addr(nla); return 0; } /* only called when fib_nh is integrated into fib_info */ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, int remaining, struct fib_config *cfg, struct netlink_ext_ack *extack) { struct net *net = fi->fib_net; struct fib_config fib_cfg; struct fib_nh *nh; int ret; change_nexthops(fi) { int attrlen; memset(&fib_cfg, 0, sizeof(fib_cfg)); if (!rtnh_ok(rtnh, remaining)) { NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - extra data after nexthop"); return -EINVAL; } if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { NL_SET_ERR_MSG(extack, "Invalid flags for nexthop - can not contain DEAD or LINKDOWN"); return -EINVAL; } fib_cfg.fc_flags = (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags; fib_cfg.fc_oif = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); nlav = nla_find(attrs, attrlen, RTA_VIA); if (nla && nlav) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); return -EINVAL; } if (nla) { ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla, extack); if (ret) goto errout; if (fib_cfg.fc_gw4) fib_cfg.fc_gw_family = AF_INET; } else if (nlav) { ret = fib_gw_from_via(&fib_cfg, nlav, extack); if (ret) goto errout; } nla = nla_find(attrs, attrlen, RTA_FLOW); if (nla) { if (nla_len(nla) < sizeof(u32)) { NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW"); return -EINVAL; } fib_cfg.fc_flow = nla_get_u32(nla); } fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); /* RTA_ENCAP_TYPE length checked in * lwtunnel_valid_encap_type_attr */ nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) fib_cfg.fc_encap_type = nla_get_u16(nla); } ret = fib_nh_init(net, nexthop_nh, &fib_cfg, rtnh->rtnh_hops + 1, extack); if (ret) goto errout; rtnh = rtnh_next(rtnh, &remaining); } endfor_nexthops(fi); ret = -EINVAL; nh = fib_info_nh(fi, 0); if (cfg->fc_oif && nh->fib_nh_oif != cfg->fc_oif) { NL_SET_ERR_MSG(extack, "Nexthop device index does not match RTA_OIF"); goto errout; } if (cfg->fc_gw_family) { if (cfg->fc_gw_family != nh->fib_nh_gw_family || (cfg->fc_gw_family == AF_INET && nh->fib_nh_gw4 != cfg->fc_gw4) || (cfg->fc_gw_family == AF_INET6 && ipv6_addr_cmp(&nh->fib_nh_gw6, &cfg->fc_gw6))) { NL_SET_ERR_MSG(extack, "Nexthop gateway does not match RTA_GATEWAY or RTA_VIA"); goto errout; } } #ifdef CONFIG_IP_ROUTE_CLASSID if (cfg->fc_flow && nh->nh_tclassid != cfg->fc_flow) { NL_SET_ERR_MSG(extack, "Nexthop class id does not match RTA_FLOW"); goto errout; } #endif ret = 0; errout: return ret; } /* only called when fib_nh is integrated into fib_info */ static void fib_rebalance(struct fib_info *fi) { int total; int w; if (fib_info_num_path(fi) < 2) return; total = 0; for_nexthops(fi) { if (nh->fib_nh_flags & RTNH_F_DEAD) continue; if (ip_ignore_linkdown(nh->fib_nh_dev) && nh->fib_nh_flags & RTNH_F_LINKDOWN) continue; total += nh->fib_nh_weight; } endfor_nexthops(fi); w = 0; change_nexthops(fi) { int upper_bound; if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD) { upper_bound = -1; } else if (ip_ignore_linkdown(nexthop_nh->fib_nh_dev) && nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN) { upper_bound = -1; } else { w += nexthop_nh->fib_nh_weight; upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1; } atomic_set(&nexthop_nh->fib_nh_upper_bound, upper_bound); } endfor_nexthops(fi); } #else /* CONFIG_IP_ROUTE_MULTIPATH */ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh, int remaining, struct fib_config *cfg, struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "Multipath support not enabled in kernel"); return -EINVAL; } #define fib_rebalance(fi) do { } while (0) #endif /* CONFIG_IP_ROUTE_MULTIPATH */ static int fib_encap_match(struct net *net, u16 encap_type, struct nlattr *encap, const struct fib_nh *nh, const struct fib_config *cfg, struct netlink_ext_ack *extack) { struct lwtunnel_state *lwtstate; int ret, result = 0; if (encap_type == LWTUNNEL_ENCAP_NONE) return 0; ret = lwtunnel_build_state(net, encap_type, encap, AF_INET, cfg, &lwtstate, extack); if (!ret) { result = lwtunnel_cmp_encap(lwtstate, nh->fib_nh_lws); lwtstate_free(lwtstate); } return result; } int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, struct netlink_ext_ack *extack) { #ifdef CONFIG_IP_ROUTE_MULTIPATH struct rtnexthop *rtnh; int remaining; #endif if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority) return 1; if (cfg->fc_nh_id) { if (fi->nh && cfg->fc_nh_id == fi->nh->id) return 0; return 1; } if (fi->nh) { if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_mp) return 1; return 0; } if (cfg->fc_oif || cfg->fc_gw_family) { struct fib_nh *nh; nh = fib_info_nh(fi, 0); if (cfg->fc_encap) { if (fib_encap_match(net, cfg->fc_encap_type, cfg->fc_encap, nh, cfg, extack)) return 1; } #ifdef CONFIG_IP_ROUTE_CLASSID if (cfg->fc_flow && cfg->fc_flow != nh->nh_tclassid) return 1; #endif if ((cfg->fc_oif && cfg->fc_oif != nh->fib_nh_oif) || (cfg->fc_gw_family && cfg->fc_gw_family != nh->fib_nh_gw_family)) return 1; if (cfg->fc_gw_family == AF_INET && cfg->fc_gw4 != nh->fib_nh_gw4) return 1; if (cfg->fc_gw_family == AF_INET6 && ipv6_addr_cmp(&cfg->fc_gw6, &nh->fib_nh_gw6)) return 1; return 0; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (!cfg->fc_mp) return 0; rtnh = cfg->fc_mp; remaining = cfg->fc_mp_len; for_nexthops(fi) { int attrlen; if (!rtnh_ok(rtnh, remaining)) return -EINVAL; if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->fib_nh_oif) return 1; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh); int err; nla = nla_find(attrs, attrlen, RTA_GATEWAY); nlav = nla_find(attrs, attrlen, RTA_VIA); if (nla && nlav) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); return -EINVAL; } if (nla) { __be32 gw; err = fib_gw_from_attr(&gw, nla, extack); if (err) return err; if (nh->fib_nh_gw_family != AF_INET || gw != nh->fib_nh_gw4) return 1; } else if (nlav) { struct fib_config cfg2; err = fib_gw_from_via(&cfg2, nlav, extack); if (err) return err; switch (nh->fib_nh_gw_family) { case AF_INET: if (cfg2.fc_gw_family != AF_INET || cfg2.fc_gw4 != nh->fib_nh_gw4) return 1; break; case AF_INET6: if (cfg2.fc_gw_family != AF_INET6 || ipv6_addr_cmp(&cfg2.fc_gw6, &nh->fib_nh_gw6)) return 1; break; } } #ifdef CONFIG_IP_ROUTE_CLASSID nla = nla_find(attrs, attrlen, RTA_FLOW); if (nla) { if (nla_len(nla) < sizeof(u32)) { NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW"); return -EINVAL; } if (nla_get_u32(nla) != nh->nh_tclassid) return 1; } #endif } rtnh = rtnh_next(rtnh, &remaining); } endfor_nexthops(fi); #endif return 0; } bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) { struct nlattr *nla; int remaining; if (!cfg->fc_mx) return true; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { int type = nla_type(nla); u32 fi_val, val; if (!type) continue; if (type > RTAX_MAX) return false; type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; bool ecn_ca = false; nla_strscpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca); } else { if (nla_len(nla) != sizeof(u32)) return false; val = nla_get_u32(nla); } fi_val = fi->fib_metrics->metrics[type - 1]; if (type == RTAX_FEATURES) fi_val &= ~DST_FEATURE_ECN_CA; if (fi_val != val) return false; } return true; } static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh, u32 table, struct netlink_ext_ack *extack) { struct fib6_config cfg = { .fc_table = table, .fc_flags = nh->fib_nh_flags | RTF_GATEWAY, .fc_ifindex = nh->fib_nh_oif, .fc_gateway = nh->fib_nh_gw6, }; struct fib6_nh fib6_nh = {}; int err; err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack); if (!err) { nh->fib_nh_dev = fib6_nh.fib_nh_dev; netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL); nh->fib_nh_oif = nh->fib_nh_dev->ifindex; nh->fib_nh_scope = RT_SCOPE_LINK; ipv6_stub->fib6_nh_release(&fib6_nh); } return err; } /* * Picture * ------- * * Semantics of nexthop is very messy by historical reasons. * We have to take into account, that: * a) gateway can be actually local interface address, * so that gatewayed route is direct. * b) gateway must be on-link address, possibly * described not by an ifaddr, but also by a direct route. * c) If both gateway and interface are specified, they should not * contradict. * d) If we use tunnel routes, gateway could be not on-link. * * Attempt to reconcile all of these (alas, self-contradictory) conditions * results in pretty ugly and hairy code with obscure logic. * * I chose to generalized it instead, so that the size * of code does not increase practically, but it becomes * much more general. * Every prefix is assigned a "scope" value: "host" is local address, * "link" is direct route, * [ ... "site" ... "interior" ... ] * and "universe" is true gateway route with global meaning. * * Every prefix refers to a set of "nexthop"s (gw, oif), * where gw must have narrower scope. This recursion stops * when gw has LOCAL scope or if "nexthop" is declared ONLINK, * which means that gw is forced to be on link. * * Code is still hairy, but now it is apparently logically * consistent and very flexible. F.e. as by-product it allows * to co-exists in peace independent exterior and interior * routing processes. * * Normally it looks as following. * * {universe prefix} -> (gw, oif) [scope link] * | * |-> {link prefix} -> (gw, oif) [scope local] * | * |-> {local prefix} (terminal node) */ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table, u8 scope, struct netlink_ext_ack *extack) { struct net_device *dev; struct fib_result res; int err = 0; if (nh->fib_nh_flags & RTNH_F_ONLINK) { unsigned int addr_type; if (scope >= RT_SCOPE_LINK) { NL_SET_ERR_MSG(extack, "Nexthop has invalid scope"); return -EINVAL; } dev = __dev_get_by_index(net, nh->fib_nh_oif); if (!dev) { NL_SET_ERR_MSG(extack, "Nexthop device required for onlink"); return -ENODEV; } if (!(dev->flags & IFF_UP)) { NL_SET_ERR_MSG(extack, "Nexthop device is not up"); return -ENETDOWN; } addr_type = inet_addr_type_dev_table(net, dev, nh->fib_nh_gw4); if (addr_type != RTN_UNICAST) { NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway"); return -EINVAL; } if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; nh->fib_nh_dev = dev; netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_LINK; return 0; } rcu_read_lock(); { struct fib_table *tbl = NULL; struct flowi4 fl4 = { .daddr = nh->fib_nh_gw4, .flowi4_scope = scope + 1, .flowi4_oif = nh->fib_nh_oif, .flowi4_iif = LOOPBACK_IFINDEX, }; /* It is not necessary, but requires a bit of thinking */ if (fl4.flowi4_scope < RT_SCOPE_LINK) fl4.flowi4_scope = RT_SCOPE_LINK; if (table && table != RT_TABLE_MAIN) tbl = fib_get_table(net, table); if (tbl) err = fib_table_lookup(tbl, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE | FIB_LOOKUP_NOREF); /* on error or if no table given do full lookup. This * is needed for example when nexthops are in the local * table rather than the given table */ if (!tbl || err) { err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE); } if (err) { NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway"); goto out; } } err = -EINVAL; if (res.type != RTN_UNICAST && res.type != RTN_LOCAL) { NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway"); goto out; } nh->fib_nh_scope = res.scope; nh->fib_nh_oif = FIB_RES_OIF(res); nh->fib_nh_dev = dev = FIB_RES_DEV(res); if (!dev) { NL_SET_ERR_MSG(extack, "No egress device for nexthop gateway"); goto out; } netdev_hold(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); if (!netif_carrier_ok(dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN; out: rcu_read_unlock(); return err; } static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, struct netlink_ext_ack *extack) { struct in_device *in_dev; int err; if (nh->fib_nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK)) { NL_SET_ERR_MSG(extack, "Invalid flags for nexthop - PERVASIVE and ONLINK can not be set"); return -EINVAL; } rcu_read_lock(); err = -ENODEV; in_dev = inetdev_by_index(net, nh->fib_nh_oif); if (!in_dev) goto out; err = -ENETDOWN; if (!(in_dev->dev->flags & IFF_UP)) { NL_SET_ERR_MSG(extack, "Device for nexthop is not up"); goto out; } nh->fib_nh_dev = in_dev->dev; netdev_hold(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); nh->fib_nh_scope = RT_SCOPE_HOST; if (!netif_carrier_ok(nh->fib_nh_dev)) nh->fib_nh_flags |= RTNH_F_LINKDOWN; err = 0; out: rcu_read_unlock(); return err; } int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, struct netlink_ext_ack *extack) { int err; if (nh->fib_nh_gw_family == AF_INET) err = fib_check_nh_v4_gw(net, nh, table, scope, extack); else if (nh->fib_nh_gw_family == AF_INET6) err = fib_check_nh_v6_gw(net, nh, table, extack); else err = fib_check_nh_nongw(net, nh, extack); return err; } static struct hlist_head * fib_info_laddrhash_bucket(const struct net *net, __be32 val) { u32 slot = hash_32(net_hash_mix(net) ^ (__force u32)val, fib_info_hash_bits); return &fib_info_laddrhash[slot]; } static void fib_info_hash_move(struct hlist_head *new_info_hash, struct hlist_head *new_laddrhash, unsigned int new_size) { struct hlist_head *old_info_hash, *old_laddrhash; unsigned int old_size = fib_info_hash_size; unsigned int i; spin_lock_bh(&fib_info_lock); old_info_hash = fib_info_hash; old_laddrhash = fib_info_laddrhash; fib_info_hash_size = new_size; fib_info_hash_bits = ilog2(new_size); for (i = 0; i < old_size; i++) { struct hlist_head *head = &fib_info_hash[i]; struct hlist_node *n; struct fib_info *fi; hlist_for_each_entry_safe(fi, n, head, fib_hash) { struct hlist_head *dest; unsigned int new_hash; new_hash = fib_info_hashfn(fi); dest = &new_info_hash[new_hash]; hlist_add_head(&fi->fib_hash, dest); } } fib_info_hash = new_info_hash; fib_info_laddrhash = new_laddrhash; for (i = 0; i < old_size; i++) { struct hlist_head *lhead = &old_laddrhash[i]; struct hlist_node *n; struct fib_info *fi; hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { struct hlist_head *ldest; ldest = fib_info_laddrhash_bucket(fi->fib_net, fi->fib_prefsrc); hlist_add_head(&fi->fib_lhash, ldest); } } spin_unlock_bh(&fib_info_lock); kvfree(old_info_hash); kvfree(old_laddrhash); } __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc, unsigned char scope) { struct fib_nh *nh; __be32 saddr; if (nhc->nhc_family != AF_INET) return inet_select_addr(nhc->nhc_dev, 0, scope); nh = container_of(nhc, struct fib_nh, nh_common); saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope); WRITE_ONCE(nh->nh_saddr, saddr); WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid)); return saddr; } __be32 fib_result_prefsrc(struct net *net, struct fib_result *res) { struct fib_nh_common *nhc = res->nhc; if (res->fi->fib_prefsrc) return res->fi->fib_prefsrc; if (nhc->nhc_family == AF_INET) { struct fib_nh *nh; nh = container_of(nhc, struct fib_nh, nh_common); if (READ_ONCE(nh->nh_saddr_genid) == atomic_read(&net->ipv4.dev_addr_genid)) return READ_ONCE(nh->nh_saddr); } return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope); } static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc) { if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst || fib_prefsrc != cfg->fc_dst) { u32 tb_id = cfg->fc_table; int rc; if (tb_id == RT_TABLE_MAIN) tb_id = RT_TABLE_LOCAL; rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, fib_prefsrc, tb_id); if (rc != RTN_LOCAL && tb_id != RT_TABLE_LOCAL) { rc = inet_addr_type_table(cfg->fc_nlinfo.nl_net, fib_prefsrc, RT_TABLE_LOCAL); } if (rc != RTN_LOCAL) return false; } return true; } struct fib_info *fib_create_info(struct fib_config *cfg, struct netlink_ext_ack *extack) { int err; struct fib_info *fi = NULL; struct nexthop *nh = NULL; struct fib_info *ofi; int nhs = 1; struct net *net = cfg->fc_nlinfo.nl_net; if (cfg->fc_type > RTN_MAX) goto err_inval; /* Fast check to catch the most weird cases */ if (fib_props[cfg->fc_type].scope > cfg->fc_scope) { NL_SET_ERR_MSG(extack, "Invalid scope"); goto err_inval; } if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) { NL_SET_ERR_MSG(extack, "Invalid rtm_flags - can not contain DEAD or LINKDOWN"); goto err_inval; } if (cfg->fc_nh_id) { if (!cfg->fc_mx) { fi = fib_find_info_nh(net, cfg); if (fi) { refcount_inc(&fi->fib_treeref); return fi; } } nh = nexthop_find_by_id(net, cfg->fc_nh_id); if (!nh) { NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); goto err_inval; } nhs = 0; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (cfg->fc_mp) { nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len, extack); if (nhs == 0) goto err_inval; } #endif err = -ENOBUFS; /* Paired with WRITE_ONCE() in fib_release_info() */ if (READ_ONCE(fib_info_cnt) >= fib_info_hash_size) { unsigned int new_size = fib_info_hash_size << 1; struct hlist_head *new_info_hash; struct hlist_head *new_laddrhash; size_t bytes; if (!new_size) new_size = 16; bytes = (size_t)new_size * sizeof(struct hlist_head *); new_info_hash = kvzalloc(bytes, GFP_KERNEL); new_laddrhash = kvzalloc(bytes, GFP_KERNEL); if (!new_info_hash || !new_laddrhash) { kvfree(new_info_hash); kvfree(new_laddrhash); } else { fib_info_hash_move(new_info_hash, new_laddrhash, new_size); } if (!fib_info_hash_size) goto failure; } fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); if (!fi) goto failure; fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx, cfg->fc_mx_len, extack); if (IS_ERR(fi->fib_metrics)) { err = PTR_ERR(fi->fib_metrics); kfree(fi); return ERR_PTR(err); } fi->fib_net = net; fi->fib_protocol = cfg->fc_protocol; fi->fib_scope = cfg->fc_scope; fi->fib_flags = cfg->fc_flags; fi->fib_priority = cfg->fc_priority; fi->fib_prefsrc = cfg->fc_prefsrc; fi->fib_type = cfg->fc_type; fi->fib_tb_id = cfg->fc_table; fi->fib_nhs = nhs; if (nh) { if (!nexthop_get(nh)) { NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); err = -EINVAL; } else { err = 0; fi->nh = nh; } } else { change_nexthops(fi) { nexthop_nh->nh_parent = fi; } endfor_nexthops(fi) if (cfg->fc_mp) err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg, extack); else err = fib_nh_init(net, fi->fib_nh, cfg, 1, extack); } if (err != 0) goto failure; if (fib_props[cfg->fc_type].error) { if (cfg->fc_gw_family || cfg->fc_oif || cfg->fc_mp) { NL_SET_ERR_MSG(extack, "Gateway, device and multipath can not be specified for this route type"); goto err_inval; } goto link_it; } else { switch (cfg->fc_type) { case RTN_UNICAST: case RTN_LOCAL: case RTN_BROADCAST: case RTN_ANYCAST: case RTN_MULTICAST: break; default: NL_SET_ERR_MSG(extack, "Invalid route type"); goto err_inval; } } if (cfg->fc_scope > RT_SCOPE_HOST) { NL_SET_ERR_MSG(extack, "Invalid scope"); goto err_inval; } if (fi->nh) { err = fib_check_nexthop(fi->nh, cfg->fc_scope, extack); if (err) goto failure; } else if (cfg->fc_scope == RT_SCOPE_HOST) { struct fib_nh *nh = fi->fib_nh; /* Local address is added. */ if (nhs != 1) { NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops"); goto err_inval; } if (nh->fib_nh_gw_family) { NL_SET_ERR_MSG(extack, "Route with host scope can not have a gateway"); goto err_inval; } nh->fib_nh_scope = RT_SCOPE_NOWHERE; nh->fib_nh_dev = dev_get_by_index(net, nh->fib_nh_oif); err = -ENODEV; if (!nh->fib_nh_dev) goto failure; netdev_tracker_alloc(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL); } else { int linkdown = 0; change_nexthops(fi) { err = fib_check_nh(cfg->fc_nlinfo.nl_net, nexthop_nh, cfg->fc_table, cfg->fc_scope, extack); if (err != 0) goto failure; if (nexthop_nh->fib_nh_flags & RTNH_F_LINKDOWN) linkdown++; } endfor_nexthops(fi) if (linkdown == fi->fib_nhs) fi->fib_flags |= RTNH_F_LINKDOWN; } if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) { NL_SET_ERR_MSG(extack, "Invalid prefsrc address"); goto err_inval; } if (!fi->nh) { change_nexthops(fi) { fib_info_update_nhc_saddr(net, &nexthop_nh->nh_common, fi->fib_scope); if (nexthop_nh->fib_nh_gw_family == AF_INET6) fi->fib_nh_is_v6 = true; } endfor_nexthops(fi) fib_rebalance(fi); } link_it: ofi = fib_find_info(fi); if (ofi) { /* fib_table_lookup() should not see @fi yet. */ fi->fib_dead = 1; free_fib_info(fi); refcount_inc(&ofi->fib_treeref); return ofi; } refcount_set(&fi->fib_treeref, 1); refcount_set(&fi->fib_clntref, 1); spin_lock_bh(&fib_info_lock); fib_info_cnt++; hlist_add_head(&fi->fib_hash, &fib_info_hash[fib_info_hashfn(fi)]); if (fi->fib_prefsrc) { struct hlist_head *head; head = fib_info_laddrhash_bucket(net, fi->fib_prefsrc); hlist_add_head(&fi->fib_lhash, head); } if (fi->nh) { list_add(&fi->nh_list, &nh->fi_list); } else { change_nexthops(fi) { struct hlist_head *head; if (!nexthop_nh->fib_nh_dev) continue; head = fib_info_devhash_bucket(nexthop_nh->fib_nh_dev); hlist_add_head(&nexthop_nh->nh_hash, head); } endfor_nexthops(fi) } spin_unlock_bh(&fib_info_lock); return fi; err_inval: err = -EINVAL; failure: if (fi) { /* fib_table_lookup() should not see @fi yet. */ fi->fib_dead = 1; free_fib_info(fi); } return ERR_PTR(err); } int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc, u8 rt_family, unsigned char *flags, bool skip_oif) { if (nhc->nhc_flags & RTNH_F_DEAD) *flags |= RTNH_F_DEAD; if (nhc->nhc_flags & RTNH_F_LINKDOWN) { *flags |= RTNH_F_LINKDOWN; rcu_read_lock(); switch (nhc->nhc_family) { case AF_INET: if (ip_ignore_linkdown(nhc->nhc_dev)) *flags |= RTNH_F_DEAD; break; case AF_INET6: if (ip6_ignore_linkdown(nhc->nhc_dev)) *flags |= RTNH_F_DEAD; break; } rcu_read_unlock(); } switch (nhc->nhc_gw_family) { case AF_INET: if (nla_put_in_addr(skb, RTA_GATEWAY, nhc->nhc_gw.ipv4)) goto nla_put_failure; break; case AF_INET6: /* if gateway family does not match nexthop family * gateway is encoded as RTA_VIA */ if (rt_family != nhc->nhc_gw_family) { int alen = sizeof(struct in6_addr); struct nlattr *nla; struct rtvia *via; nla = nla_reserve(skb, RTA_VIA, alen + 2); if (!nla) goto nla_put_failure; via = nla_data(nla); via->rtvia_family = AF_INET6; memcpy(via->rtvia_addr, &nhc->nhc_gw.ipv6, alen); } else if (nla_put_in6_addr(skb, RTA_GATEWAY, &nhc->nhc_gw.ipv6) < 0) { goto nla_put_failure; } break; } *flags |= (nhc->nhc_flags & (RTNH_F_ONLINK | RTNH_F_OFFLOAD | RTNH_F_TRAP)); if (!skip_oif && nhc->nhc_dev && nla_put_u32(skb, RTA_OIF, nhc->nhc_dev->ifindex)) goto nla_put_failure; if (nhc->nhc_lwtstate && lwtunnel_fill_encap(skb, nhc->nhc_lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } EXPORT_SYMBOL_GPL(fib_nexthop_info); #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6) int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, int nh_weight, u8 rt_family, u32 nh_tclassid) { const struct net_device *dev = nhc->nhc_dev; struct rtnexthop *rtnh; unsigned char flags = 0; rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); if (!rtnh) goto nla_put_failure; rtnh->rtnh_hops = nh_weight - 1; rtnh->rtnh_ifindex = dev ? dev->ifindex : 0; if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid)) goto nla_put_failure; /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; return 0; nla_put_failure: return -EMSGSIZE; } EXPORT_SYMBOL_GPL(fib_add_nexthop); #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi) { struct nlattr *mp; mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; if (unlikely(fi->nh)) { if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0) goto nla_put_failure; goto mp_end; } for_nexthops(fi) { u32 nh_tclassid = 0; #ifdef CONFIG_IP_ROUTE_CLASSID nh_tclassid = nh->nh_tclassid; #endif if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight, AF_INET, nh_tclassid) < 0) goto nla_put_failure; } endfor_nexthops(fi); mp_end: nla_nest_end(skb, mp); return 0; nla_put_failure: return -EMSGSIZE; } #else static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi) { return 0; } #endif int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, const struct fib_rt_info *fri, unsigned int flags) { unsigned int nhs = fib_info_num_path(fri->fi); struct fib_info *fi = fri->fi; u32 tb_id = fri->tb_id; struct nlmsghdr *nlh; struct rtmsg *rtm; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET; rtm->rtm_dst_len = fri->dst_len; rtm->rtm_src_len = 0; rtm->rtm_tos = inet_dscp_to_dsfield(fri->dscp); if (tb_id < 256) rtm->rtm_table = tb_id; else rtm->rtm_table = RT_TABLE_COMPAT; if (nla_put_u32(skb, RTA_TABLE, tb_id)) goto nla_put_failure; rtm->rtm_type = fri->type; rtm->rtm_flags = fi->fib_flags; rtm->rtm_scope = fi->fib_scope; rtm->rtm_protocol = fi->fib_protocol; if (rtm->rtm_dst_len && nla_put_in_addr(skb, RTA_DST, fri->dst)) goto nla_put_failure; if (fi->fib_priority && nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) goto nla_put_failure; if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0) goto nla_put_failure; if (fi->fib_prefsrc && nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc)) goto nla_put_failure; if (fi->nh) { if (nla_put_u32(skb, RTA_NH_ID, fi->nh->id)) goto nla_put_failure; if (nexthop_is_blackhole(fi->nh)) rtm->rtm_type = RTN_BLACKHOLE; if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode)) goto offload; } if (nhs == 1) { const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); unsigned char flags = 0; if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0) goto nla_put_failure; rtm->rtm_flags = flags; #ifdef CONFIG_IP_ROUTE_CLASSID if (nhc->nhc_family == AF_INET) { struct fib_nh *nh; nh = container_of(nhc, struct fib_nh, nh_common); if (nh->nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid)) goto nla_put_failure; } #endif } else { if (fib_add_multipath(skb, fi) < 0) goto nla_put_failure; } offload: if (fri->offload) rtm->rtm_flags |= RTM_F_OFFLOAD; if (fri->trap) rtm->rtm_flags |= RTM_F_TRAP; if (fri->offload_failed) rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } /* * Update FIB if: * - local address disappeared -> we must delete all the entries * referring to it. * - device went down -> we must shutdown all nexthops going via it. */ int fib_sync_down_addr(struct net_device *dev, __be32 local) { int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; struct net *net = dev_net(dev); struct hlist_head *head; struct fib_info *fi; int ret = 0; if (!fib_info_laddrhash || local == 0) return 0; head = fib_info_laddrhash_bucket(net, local); hlist_for_each_entry(fi, head, fib_lhash) { if (!net_eq(fi->fib_net, net) || fi->fib_tb_id != tb_id) continue; if (fi->fib_prefsrc == local) { fi->fib_flags |= RTNH_F_DEAD; fi->pfsrc_removed = true; ret++; } } return ret; } static int call_fib_nh_notifiers(struct fib_nh *nh, enum fib_event_type event_type) { bool ignore_link_down = ip_ignore_linkdown(nh->fib_nh_dev); struct fib_nh_notifier_info info = { .fib_nh = nh, }; switch (event_type) { case FIB_EVENT_NH_ADD: if (nh->fib_nh_flags & RTNH_F_DEAD) break; if (ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) break; return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type, &info.info); case FIB_EVENT_NH_DEL: if ((ignore_link_down && nh->fib_nh_flags & RTNH_F_LINKDOWN) || (nh->fib_nh_flags & RTNH_F_DEAD)) return call_fib4_notifiers(dev_net(nh->fib_nh_dev), event_type, &info.info); break; default: break; } return NOTIFY_DONE; } /* Update the PMTU of exceptions when: * - the new MTU of the first hop becomes smaller than the PMTU * - the old MTU was the same as the PMTU, and it limited discovery of * larger MTUs on the path. With that limit raised, we can now * discover larger MTUs * A special case is locked exceptions, for which the PMTU is smaller * than the minimal accepted PMTU: * - if the new MTU is greater than the PMTU, don't make any change * - otherwise, unlock and set PMTU */ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig) { struct fnhe_hash_bucket *bucket; int i; bucket = rcu_dereference_protected(nhc->nhc_exceptions, 1); if (!bucket) return; for (i = 0; i < FNHE_HASH_SIZE; i++) { struct fib_nh_exception *fnhe; for (fnhe = rcu_dereference_protected(bucket[i].chain, 1); fnhe; fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) { if (fnhe->fnhe_mtu_locked) { if (new <= fnhe->fnhe_pmtu) { fnhe->fnhe_pmtu = new; fnhe->fnhe_mtu_locked = false; } } else if (new < fnhe->fnhe_pmtu || orig == fnhe->fnhe_pmtu) { fnhe->fnhe_pmtu = new; } } } } void fib_sync_mtu(struct net_device *dev, u32 orig_mtu) { struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_nh *nh; hlist_for_each_entry(nh, head, nh_hash) { if (nh->fib_nh_dev == dev) fib_nhc_update_mtu(&nh->nh_common, dev->mtu, orig_mtu); } } /* Event force Flags Description * NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host * NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host * NETDEV_DOWN 1 LINKDOWN|DEAD Last address removed * NETDEV_UNREGISTER 1 LINKDOWN|DEAD Device removed * * only used when fib_nh is built into fib_info */ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force) { struct hlist_head *head = fib_info_devhash_bucket(dev); struct fib_info *prev_fi = NULL; int scope = RT_SCOPE_NOWHERE; struct fib_nh *nh; int ret = 0; if (force) scope = -1; hlist_for_each_entry(nh, head, nh_hash) { struct fib_info *fi = nh->nh_parent; int dead; BUG_ON(!fi->fib_nhs); if (nh->fib_nh_dev != dev || fi == prev_fi) continue; prev_fi = fi; dead = 0; change_nexthops(fi) { if (nexthop_nh->fib_nh_flags & RTNH_F_DEAD) dead++; else if (nexthop_nh->fib_nh_dev == dev && nexthop_nh->fib_nh_scope != scope) { switch (event) { case NETDEV_DOWN: case NETDEV_UNREGISTER: nexthop_nh->fib_nh_flags |= RTNH_F_DEAD; fallthrough; case NETDEV_CHANGE: nexthop_nh->fib_nh_flags |= RTNH_F_LINKDOWN; break; } call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_DEL); dead++; } #ifdef CONFIG_IP_ROUTE_MULTIPATH if (event == NETDEV_UNREGISTER && nexthop_nh->fib_nh_dev == dev) { dead = fi->fib_nhs; break; } #endif } endfor_nexthops(fi) if (dead == fi->fib_nhs) { switch (event) { case NETDEV_DOWN: case NETDEV_UNREGISTER: fi->fib_flags |= RTNH_F_DEAD; fallthrough; case NETDEV_CHANGE: fi->fib_flags |= RTNH_F_LINKDOWN; break; } ret++; } fib_rebalance(fi); } return ret; } /* Must be invoked inside of an RCU protected region. */ static void fib_select_default(const struct flowi4 *flp, struct fib_result *res) { struct fib_info *fi = NULL, *last_resort = NULL; struct hlist_head *fa_head = res->fa_head; struct fib_table *tb = res->table; u8 slen = 32 - res->prefixlen; int order = -1, last_idx = -1; struct fib_alias *fa, *fa1 = NULL; u32 last_prio = res->fi->fib_priority; dscp_t last_dscp = 0; hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; struct fib_nh_common *nhc; if (fa->fa_slen != slen) continue; if (fa->fa_dscp && fa->fa_dscp != inet_dsfield_to_dscp(flp->flowi4_tos)) continue; if (fa->tb_id != tb->tb_id) continue; if (next_fi->fib_priority > last_prio && fa->fa_dscp == last_dscp) { if (last_dscp) continue; break; } if (next_fi->fib_flags & RTNH_F_DEAD) continue; last_dscp = fa->fa_dscp; last_prio = next_fi->fib_priority; if (next_fi->fib_scope != res->scope || fa->fa_type != RTN_UNICAST) continue; nhc = fib_info_nhc(next_fi, 0); if (!nhc->nhc_gw_family || nhc->nhc_scope != RT_SCOPE_LINK) continue; fib_alias_accessed(fa); if (!fi) { if (next_fi != res->fi) break; fa1 = fa; } else if (!fib_detect_death(fi, order, &last_resort, &last_idx, fa1->fa_default)) { fib_result_assign(res, fi); fa1->fa_default = order; goto out; } fi = next_fi; order++; } if (order <= 0 || !fi) { if (fa1) fa1->fa_default = -1; goto out; } if (!fib_detect_death(fi, order, &last_resort, &last_idx, fa1->fa_default)) { fib_result_assign(res, fi); fa1->fa_default = order; goto out; } if (last_idx >= 0) fib_result_assign(res, last_resort); fa1->fa_default = last_idx; out: return; } /* * Dead device goes up. We wake up dead nexthops. * It takes sense only on multipath routes. * * only used when fib_nh is built into fib_info */ int fib_sync_up(struct net_device *dev, unsigned char nh_flags) { struct fib_info *prev_fi; struct hlist_head *head; struct fib_nh *nh; int ret; if (!(dev->flags & IFF_UP)) return 0; if (nh_flags & RTNH_F_DEAD) { unsigned int flags = dev_get_flags(dev); if (flags & (IFF_RUNNING | IFF_LOWER_UP)) nh_flags |= RTNH_F_LINKDOWN; } prev_fi = NULL; head = fib_info_devhash_bucket(dev); ret = 0; hlist_for_each_entry(nh, head, nh_hash) { struct fib_info *fi = nh->nh_parent; int alive; BUG_ON(!fi->fib_nhs); if (nh->fib_nh_dev != dev || fi == prev_fi) continue; prev_fi = fi; alive = 0; change_nexthops(fi) { if (!(nexthop_nh->fib_nh_flags & nh_flags)) { alive++; continue; } if (!nexthop_nh->fib_nh_dev || !(nexthop_nh->fib_nh_dev->flags & IFF_UP)) continue; if (nexthop_nh->fib_nh_dev != dev || !__in_dev_get_rtnl(dev)) continue; alive++; nexthop_nh->fib_nh_flags &= ~nh_flags; call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD); } endfor_nexthops(fi) if (alive > 0) { fi->fib_flags &= ~nh_flags; ret++; } fib_rebalance(fi); } return ret; } #ifdef CONFIG_IP_ROUTE_MULTIPATH static bool fib_good_nh(const struct fib_nh *nh) { int state = NUD_REACHABLE; if (nh->fib_nh_scope == RT_SCOPE_LINK) { struct neighbour *n; rcu_read_lock(); if (likely(nh->fib_nh_gw_family == AF_INET)) n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev, (__force u32)nh->fib_nh_gw4); else if (nh->fib_nh_gw_family == AF_INET6) n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6); else n = NULL; if (n) state = READ_ONCE(n->nud_state); rcu_read_unlock(); } return !!(state & NUD_VALID); } void fib_select_multipath(struct fib_result *res, int hash) { struct fib_info *fi = res->fi; struct net *net = fi->fib_net; bool first = false; if (unlikely(res->fi->nh)) { nexthop_path_fib_result(res, hash); return; } change_nexthops(fi) { if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) { if (!fib_good_nh(nexthop_nh)) continue; if (!first) { res->nh_sel = nhsel; res->nhc = &nexthop_nh->nh_common; first = true; } } if (hash > atomic_read(&nexthop_nh->fib_nh_upper_bound)) continue; res->nh_sel = nhsel; res->nhc = &nexthop_nh->nh_common; return; } endfor_nexthops(fi); } #endif void fib_select_path(struct net *net, struct fib_result *res, struct flowi4 *fl4, const struct sk_buff *skb) { if (fl4->flowi4_oif) goto check_saddr; #ifdef CONFIG_IP_ROUTE_MULTIPATH if (fib_info_num_path(res->fi) > 1) { int h = fib_multipath_hash(net, fl4, skb, NULL); fib_select_multipath(res, h); } else #endif if (!res->prefixlen && res->table->tb_num_default > 1 && res->type == RTN_UNICAST) fib_select_default(fl4, res); check_saddr: if (!fl4->saddr) fl4->saddr = fib_result_prefsrc(net, res); }
3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 // SPDX-License-Identifier: GPL-2.0-or-later /* * sch_plug.c Queue traffic until an explicit release command * * There are two ways to use this qdisc: * 1. A simple "instantaneous" plug/unplug operation, by issuing an alternating * sequence of TCQ_PLUG_BUFFER & TCQ_PLUG_RELEASE_INDEFINITE commands. * * 2. For network output buffering (a.k.a output commit) functionality. * Output commit property is commonly used by applications using checkpoint * based fault-tolerance to ensure that the checkpoint from which a system * is being restored is consistent w.r.t outside world. * * Consider for e.g. Remus - a Virtual Machine checkpointing system, * wherein a VM is checkpointed, say every 50ms. The checkpoint is replicated * asynchronously to the backup host, while the VM continues executing the * next epoch speculatively. * * The following is a typical sequence of output buffer operations: * 1.At epoch i, start_buffer(i) * 2. At end of epoch i (i.e. after 50ms): * 2.1 Stop VM and take checkpoint(i). * 2.2 start_buffer(i+1) and Resume VM * 3. While speculatively executing epoch(i+1), asynchronously replicate * checkpoint(i) to backup host. * 4. When checkpoint_ack(i) is received from backup, release_buffer(i) * Thus, this Qdisc would receive the following sequence of commands: * TCQ_PLUG_BUFFER (epoch i) * .. TCQ_PLUG_BUFFER (epoch i+1) * ....TCQ_PLUG_RELEASE_ONE (epoch i) * ......TCQ_PLUG_BUFFER (epoch i+2) * ........ */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> /* * State of the queue, when used for network output buffering: * * plug(i+1) plug(i) head * ------------------+--------------------+----------------> * | | * | | * pkts_current_epoch| pkts_last_epoch |pkts_to_release * ----------------->|<--------+--------->|+---------------> * v v * */ struct plug_sched_data { /* If true, the dequeue function releases all packets * from head to end of the queue. The queue turns into * a pass-through queue for newly arriving packets. */ bool unplug_indefinite; bool throttled; /* Queue Limit in bytes */ u32 limit; /* Number of packets (output) from the current speculatively * executing epoch. */ u32 pkts_current_epoch; /* Number of packets corresponding to the recently finished * epoch. These will be released when we receive a * TCQ_PLUG_RELEASE_ONE command. This command is typically * issued after committing a checkpoint at the target. */ u32 pkts_last_epoch; /* * Number of packets from the head of the queue, that can * be released (committed checkpoint). */ u32 pkts_to_release; }; static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); if (likely(sch->qstats.backlog + skb->len <= q->limit)) { if (!q->unplug_indefinite) q->pkts_current_epoch++; return qdisc_enqueue_tail(skb, sch); } return qdisc_drop(skb, sch, to_free); } static struct sk_buff *plug_dequeue(struct Qdisc *sch) { struct plug_sched_data *q = qdisc_priv(sch); if (q->throttled) return NULL; if (!q->unplug_indefinite) { if (!q->pkts_to_release) { /* No more packets to dequeue. Block the queue * and wait for the next release command. */ q->throttled = true; return NULL; } q->pkts_to_release--; } return qdisc_dequeue_head(sch); } static int plug_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct plug_sched_data *q = qdisc_priv(sch); q->pkts_current_epoch = 0; q->pkts_last_epoch = 0; q->pkts_to_release = 0; q->unplug_indefinite = false; if (opt == NULL) { q->limit = qdisc_dev(sch)->tx_queue_len * psched_mtu(qdisc_dev(sch)); } else { struct tc_plug_qopt *ctl = nla_data(opt); if (nla_len(opt) < sizeof(*ctl)) return -EINVAL; q->limit = ctl->limit; } q->throttled = true; return 0; } /* Receives 4 types of messages: * TCQ_PLUG_BUFFER: Inset a plug into the queue and * buffer any incoming packets * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head * to beginning of the next plug. * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue. * Stop buffering packets until the next TCQ_PLUG_BUFFER * command is received (just act as a pass-thru queue). * TCQ_PLUG_LIMIT: Increase/decrease queue size */ static int plug_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct plug_sched_data *q = qdisc_priv(sch); struct tc_plug_qopt *msg; msg = nla_data(opt); if (nla_len(opt) < sizeof(*msg)) return -EINVAL; switch (msg->action) { case TCQ_PLUG_BUFFER: /* Save size of the current buffer */ q->pkts_last_epoch = q->pkts_current_epoch; q->pkts_current_epoch = 0; if (q->unplug_indefinite) q->throttled = true; q->unplug_indefinite = false; break; case TCQ_PLUG_RELEASE_ONE: /* Add packets from the last complete buffer to the * packets to be released set. */ q->pkts_to_release += q->pkts_last_epoch; q->pkts_last_epoch = 0; q->throttled = false; netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_RELEASE_INDEFINITE: q->unplug_indefinite = true; q->pkts_to_release = 0; q->pkts_last_epoch = 0; q->pkts_current_epoch = 0; q->throttled = false; netif_schedule_queue(sch->dev_queue); break; case TCQ_PLUG_LIMIT: /* Limit is supplied in bytes */ q->limit = msg->limit; break; default: return -EINVAL; } return 0; } static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .id = "plug", .priv_size = sizeof(struct plug_sched_data), .enqueue = plug_enqueue, .dequeue = plug_dequeue, .peek = qdisc_peek_dequeued, .init = plug_init, .change = plug_change, .reset = qdisc_reset_queue, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("plug"); static int __init plug_module_init(void) { return register_qdisc(&plug_qdisc_ops); } static void __exit plug_module_exit(void) { unregister_qdisc(&plug_qdisc_ops); } module_init(plug_module_init) module_exit(plug_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Qdisc to plug and unplug traffic via netlink control");
218 217 3 3 3 3251 3248 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 // SPDX-License-Identifier: GPL-2.0 /* * Block rq-qos policy for assigning an I/O priority class to requests. * * Using an rq-qos policy for assigning I/O priority class has two advantages * over using the ioprio_set() system call: * * - This policy is cgroup based so it has all the advantages of cgroups. * - While ioprio_set() does not affect page cache writeback I/O, this rq-qos * controller affects page cache writeback I/O for filesystems that support * assiociating a cgroup with writeback I/O. See also * Documentation/admin-guide/cgroup-v2.rst. */ #include <linux/blk-mq.h> #include <linux/blk_types.h> #include <linux/kernel.h> #include <linux/module.h> #include "blk-cgroup.h" #include "blk-ioprio.h" #include "blk-rq-qos.h" /** * enum prio_policy - I/O priority class policy. * @POLICY_NO_CHANGE: (default) do not modify the I/O priority class. * @POLICY_PROMOTE_TO_RT: modify no-IOPRIO_CLASS_RT to IOPRIO_CLASS_RT. * @POLICY_RESTRICT_TO_BE: modify IOPRIO_CLASS_NONE and IOPRIO_CLASS_RT into * IOPRIO_CLASS_BE. * @POLICY_ALL_TO_IDLE: change the I/O priority class into IOPRIO_CLASS_IDLE. * @POLICY_NONE_TO_RT: an alias for POLICY_PROMOTE_TO_RT. * * See also <linux/ioprio.h>. */ enum prio_policy { POLICY_NO_CHANGE = 0, POLICY_PROMOTE_TO_RT = 1, POLICY_RESTRICT_TO_BE = 2, POLICY_ALL_TO_IDLE = 3, POLICY_NONE_TO_RT = 4, }; static const char *policy_name[] = { [POLICY_NO_CHANGE] = "no-change", [POLICY_PROMOTE_TO_RT] = "promote-to-rt", [POLICY_RESTRICT_TO_BE] = "restrict-to-be", [POLICY_ALL_TO_IDLE] = "idle", [POLICY_NONE_TO_RT] = "none-to-rt", }; static struct blkcg_policy ioprio_policy; /** * struct ioprio_blkg - Per (cgroup, request queue) data. * @pd: blkg_policy_data structure. */ struct ioprio_blkg { struct blkg_policy_data pd; }; /** * struct ioprio_blkcg - Per cgroup data. * @cpd: blkcg_policy_data structure. * @prio_policy: One of the IOPRIO_CLASS_* values. See also <linux/ioprio.h>. */ struct ioprio_blkcg { struct blkcg_policy_data cpd; enum prio_policy prio_policy; }; static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd) { return pd ? container_of(pd, struct ioprio_blkg, pd) : NULL; } static struct ioprio_blkcg *blkcg_to_ioprio_blkcg(struct blkcg *blkcg) { return container_of(blkcg_to_cpd(blkcg, &ioprio_policy), struct ioprio_blkcg, cpd); } static struct ioprio_blkcg * ioprio_blkcg_from_css(struct cgroup_subsys_state *css) { return blkcg_to_ioprio_blkcg(css_to_blkcg(css)); } static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio) { struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy); if (!pd) return NULL; return blkcg_to_ioprio_blkcg(pd->blkg->blkcg); } static int ioprio_show_prio_policy(struct seq_file *sf, void *v) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(seq_css(sf)); seq_printf(sf, "%s\n", policy_name[blkcg->prio_policy]); return 0; } static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_css(of_css(of)); int ret; if (off != 0) return -EIO; /* kernfs_fop_write_iter() terminates 'buf' with '\0'. */ ret = sysfs_match_string(policy_name, buf); if (ret < 0) return ret; blkcg->prio_policy = ret; return nbytes; } static struct blkg_policy_data * ioprio_alloc_pd(struct gendisk *disk, struct blkcg *blkcg, gfp_t gfp) { struct ioprio_blkg *ioprio_blkg; ioprio_blkg = kzalloc(sizeof(*ioprio_blkg), gfp); if (!ioprio_blkg) return NULL; return &ioprio_blkg->pd; } static void ioprio_free_pd(struct blkg_policy_data *pd) { struct ioprio_blkg *ioprio_blkg = pd_to_ioprio(pd); kfree(ioprio_blkg); } static struct blkcg_policy_data *ioprio_alloc_cpd(gfp_t gfp) { struct ioprio_blkcg *blkcg; blkcg = kzalloc(sizeof(*blkcg), gfp); if (!blkcg) return NULL; blkcg->prio_policy = POLICY_NO_CHANGE; return &blkcg->cpd; } static void ioprio_free_cpd(struct blkcg_policy_data *cpd) { struct ioprio_blkcg *blkcg = container_of(cpd, typeof(*blkcg), cpd); kfree(blkcg); } #define IOPRIO_ATTRS \ { \ .name = "prio.class", \ .seq_show = ioprio_show_prio_policy, \ .write = ioprio_set_prio_policy, \ }, \ { } /* sentinel */ /* cgroup v2 attributes */ static struct cftype ioprio_files[] = { IOPRIO_ATTRS }; /* cgroup v1 attributes */ static struct cftype ioprio_legacy_files[] = { IOPRIO_ATTRS }; static struct blkcg_policy ioprio_policy = { .dfl_cftypes = ioprio_files, .legacy_cftypes = ioprio_legacy_files, .cpd_alloc_fn = ioprio_alloc_cpd, .cpd_free_fn = ioprio_free_cpd, .pd_alloc_fn = ioprio_alloc_pd, .pd_free_fn = ioprio_free_pd, }; void blkcg_set_ioprio(struct bio *bio) { struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); u16 prio; if (!blkcg || blkcg->prio_policy == POLICY_NO_CHANGE) return; if (blkcg->prio_policy == POLICY_PROMOTE_TO_RT || blkcg->prio_policy == POLICY_NONE_TO_RT) { /* * For RT threads, the default priority level is 4 because * task_nice is 0. By promoting non-RT io-priority to RT-class * and default level 4, those requests that are already * RT-class but need a higher io-priority can use ioprio_set() * to achieve this. */ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) != IOPRIO_CLASS_RT) bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 4); return; } /* * Except for IOPRIO_CLASS_NONE, higher I/O priority numbers * correspond to a lower priority. Hence, the max_t() below selects * the lower priority of bi_ioprio and the cgroup I/O priority class. * If the bio I/O priority equals IOPRIO_CLASS_NONE, the cgroup I/O * priority is assigned to the bio. */ prio = max_t(u16, bio->bi_ioprio, IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0)); if (prio > bio->bi_ioprio) bio->bi_ioprio = prio; } void blk_ioprio_exit(struct gendisk *disk) { blkcg_deactivate_policy(disk, &ioprio_policy); } int blk_ioprio_init(struct gendisk *disk) { return blkcg_activate_policy(disk, &ioprio_policy); } static int __init ioprio_init(void) { return blkcg_policy_register(&ioprio_policy); } static void __exit ioprio_exit(void) { blkcg_policy_unregister(&ioprio_policy); } module_init(ioprio_init); module_exit(ioprio_exit);
626 219 451 9 4 4 4 35 367 74 451 219 519 458 317 519 518 441 441 441 428 78 3285 22 5 112 127 75 75 23 3226 3224 1 3232 112 3229 627 450 626 2799 263 177 175 73 73 29 7 23 2 23 2691 28 45 2630 29 4 442 440 5 440 180 102 82 180 181 69 69 39 422 213 189 19 5 13 62 4 62 13 11 13 59 2754 2804 2806 892 892 350 8 891 11 11 11 11 743 743 46 46 35 8 43 25 25 95 87 2 96 108 96 2 96 96 96 96 2 134 25 1 108 109 109 33 436 112 112 112 112 118 118 13 13 13 3 3 3 189 29 26 190 191 188 119 186 4 27 190 112 111 112 112 69 69 4 4 4 4 9 9 8 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> */ #include <linux/mm.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/uio.h> #include <linux/iocontext.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mempool.h> #include <linux/workqueue.h> #include <linux/cgroup.h> #include <linux/highmem.h> #include <linux/blk-crypto.h> #include <linux/xarray.h> #include <trace/events/block.h> #include "blk.h" #include "blk-rq-qos.h" #include "blk-cgroup.h" #define ALLOC_CACHE_THRESHOLD 16 #define ALLOC_CACHE_MAX 256 struct bio_alloc_cache { struct bio *free_list; struct bio *free_list_irq; unsigned int nr; unsigned int nr_irq; }; static struct biovec_slab { int nr_vecs; char *name; struct kmem_cache *slab; } bvec_slabs[] __read_mostly = { { .nr_vecs = 16, .name = "biovec-16" }, { .nr_vecs = 64, .name = "biovec-64" }, { .nr_vecs = 128, .name = "biovec-128" }, { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, }; static struct biovec_slab *biovec_slab(unsigned short nr_vecs) { switch (nr_vecs) { /* smaller bios use inline vecs */ case 5 ... 16: return &bvec_slabs[0]; case 17 ... 64: return &bvec_slabs[1]; case 65 ... 128: return &bvec_slabs[2]; case 129 ... BIO_MAX_VECS: return &bvec_slabs[3]; default: BUG(); return NULL; } } /* * fs_bio_set is the bio_set containing bio and iovec memory pools used by * IO code that does not need private memory pools. */ struct bio_set fs_bio_set; EXPORT_SYMBOL(fs_bio_set); /* * Our slab pool management */ struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; char name[8]; }; static DEFINE_MUTEX(bio_slab_lock); static DEFINE_XARRAY(bio_slabs); static struct bio_slab *create_bio_slab(unsigned int size) { struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); if (!bslab) return NULL; snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); bslab->slab = kmem_cache_create(bslab->name, size, ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); if (!bslab->slab) goto fail_alloc_slab; bslab->slab_ref = 1; bslab->slab_size = size; if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) return bslab; kmem_cache_destroy(bslab->slab); fail_alloc_slab: kfree(bslab); return NULL; } static inline unsigned int bs_bio_slab_size(struct bio_set *bs) { return bs->front_pad + sizeof(struct bio) + bs->back_pad; } static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) { unsigned int size = bs_bio_slab_size(bs); struct bio_slab *bslab; mutex_lock(&bio_slab_lock); bslab = xa_load(&bio_slabs, size); if (bslab) bslab->slab_ref++; else bslab = create_bio_slab(size); mutex_unlock(&bio_slab_lock); if (bslab) return bslab->slab; return NULL; } static void bio_put_slab(struct bio_set *bs) { struct bio_slab *bslab = NULL; unsigned int slab_size = bs_bio_slab_size(bs); mutex_lock(&bio_slab_lock); bslab = xa_load(&bio_slabs, slab_size); if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) goto out; WARN_ON_ONCE(bslab->slab != bs->bio_slab); WARN_ON(!bslab->slab_ref); if (--bslab->slab_ref) goto out; xa_erase(&bio_slabs, slab_size); kmem_cache_destroy(bslab->slab); kfree(bslab); out: mutex_unlock(&bio_slab_lock); } void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) { BUG_ON(nr_vecs > BIO_MAX_VECS); if (nr_vecs == BIO_MAX_VECS) mempool_free(bv, pool); else if (nr_vecs > BIO_INLINE_VECS) kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); } /* * Make the first allocation restricted and don't dump info on allocation * failures, since we'll fall back to the mempool in case of failure. */ static inline gfp_t bvec_alloc_gfp(gfp_t gfp) { return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; } struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, gfp_t gfp_mask) { struct biovec_slab *bvs = biovec_slab(*nr_vecs); if (WARN_ON_ONCE(!bvs)) return NULL; /* * Upgrade the nr_vecs request to take full advantage of the allocation. * We also rely on this in the bvec_free path. */ *nr_vecs = bvs->nr_vecs; /* * Try a slab allocation first for all smaller allocations. If that * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. * The mempool is sized to handle up to BIO_MAX_VECS entries. */ if (*nr_vecs < BIO_MAX_VECS) { struct bio_vec *bvl; bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) return bvl; *nr_vecs = BIO_MAX_VECS; } return mempool_alloc(pool, gfp_mask); } void bio_uninit(struct bio *bio) { #ifdef CONFIG_BLK_CGROUP if (bio->bi_blkg) { blkg_put(bio->bi_blkg); bio->bi_blkg = NULL; } #endif if (bio_integrity(bio)) bio_integrity_free(bio); bio_crypt_free_ctx(bio); } EXPORT_SYMBOL(bio_uninit); static void bio_free(struct bio *bio) { struct bio_set *bs = bio->bi_pool; void *p = bio; WARN_ON_ONCE(!bs); bio_uninit(bio); bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); mempool_free(p - bs->front_pad, &bs->bio_pool); } /* * Users of this function have their own bio allocation. Subsequently, * they must remember to pair any call to bio_init() with bio_uninit() * when IO has completed, or when the bio is released. */ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, unsigned short max_vecs, blk_opf_t opf) { bio->bi_next = NULL; bio->bi_bdev = bdev; bio->bi_opf = opf; bio->bi_flags = 0; bio->bi_ioprio = 0; bio->bi_write_hint = 0; bio->bi_status = 0; bio->bi_iter.bi_sector = 0; bio->bi_iter.bi_size = 0; bio->bi_iter.bi_idx = 0; bio->bi_iter.bi_bvec_done = 0; bio->bi_end_io = NULL; bio->bi_private = NULL; #ifdef CONFIG_BLK_CGROUP bio->bi_blkg = NULL; bio->bi_issue.value = 0; if (bdev) bio_associate_blkg(bio); #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif #endif #ifdef CONFIG_BLK_INLINE_ENCRYPTION bio->bi_crypt_context = NULL; #endif #ifdef CONFIG_BLK_DEV_INTEGRITY bio->bi_integrity = NULL; #endif bio->bi_vcnt = 0; atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_cnt, 1); bio->bi_cookie = BLK_QC_T_NONE; bio->bi_max_vecs = max_vecs; bio->bi_io_vec = table; bio->bi_pool = NULL; } EXPORT_SYMBOL(bio_init); /** * bio_reset - reinitialize a bio * @bio: bio to reset * @bdev: block device to use the bio for * @opf: operation and flags for bio * * Description: * After calling bio_reset(), @bio will be in the same state as a freshly * allocated bio returned bio bio_alloc_bioset() - the only fields that are * preserved are the ones that are initialized by bio_alloc_bioset(). See * comment in struct bio. */ void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) { bio_uninit(bio); memset(bio, 0, BIO_RESET_BYTES); atomic_set(&bio->__bi_remaining, 1); bio->bi_bdev = bdev; if (bio->bi_bdev) bio_associate_blkg(bio); bio->bi_opf = opf; } EXPORT_SYMBOL(bio_reset); static struct bio *__bio_chain_endio(struct bio *bio) { struct bio *parent = bio->bi_private; if (bio->bi_status && !parent->bi_status) parent->bi_status = bio->bi_status; bio_put(bio); return parent; } static void bio_chain_endio(struct bio *bio) { bio_endio(__bio_chain_endio(bio)); } /** * bio_chain - chain bio completions * @bio: the target bio * @parent: the parent bio of @bio * * The caller won't have a bi_end_io called when @bio completes - instead, * @parent's bi_end_io won't be called until both @parent and @bio have * completed; the chained bio will also be freed when it completes. * * The caller must not set bi_private or bi_end_io in @bio. */ void bio_chain(struct bio *bio, struct bio *parent) { BUG_ON(bio->bi_private || bio->bi_end_io); bio->bi_private = parent; bio->bi_end_io = bio_chain_endio; bio_inc_remaining(parent); } EXPORT_SYMBOL(bio_chain); struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) { struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); if (bio) { bio_chain(bio, new); submit_bio(bio); } return new; } EXPORT_SYMBOL_GPL(blk_next_bio); static void bio_alloc_rescue(struct work_struct *work) { struct bio_set *bs = container_of(work, struct bio_set, rescue_work); struct bio *bio; while (1) { spin_lock(&bs->rescue_lock); bio = bio_list_pop(&bs->rescue_list); spin_unlock(&bs->rescue_lock); if (!bio) break; submit_bio_noacct(bio); } } static void punt_bios_to_rescuer(struct bio_set *bs) { struct bio_list punt, nopunt; struct bio *bio; if (WARN_ON_ONCE(!bs->rescue_workqueue)) return; /* * In order to guarantee forward progress we must punt only bios that * were allocated from this bio_set; otherwise, if there was a bio on * there for a stacking driver higher up in the stack, processing it * could require allocating bios from this bio_set, and doing that from * our own rescuer would be bad. * * Since bio lists are singly linked, pop them all instead of trying to * remove from the middle of the list: */ bio_list_init(&punt); bio_list_init(&nopunt); while ((bio = bio_list_pop(&current->bio_list[0]))) bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); current->bio_list[0] = nopunt; bio_list_init(&nopunt); while ((bio = bio_list_pop(&current->bio_list[1]))) bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); current->bio_list[1] = nopunt; spin_lock(&bs->rescue_lock); bio_list_merge(&bs->rescue_list, &punt); spin_unlock(&bs->rescue_lock); queue_work(bs->rescue_workqueue, &bs->rescue_work); } static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) { unsigned long flags; /* cache->free_list must be empty */ if (WARN_ON_ONCE(cache->free_list)) return; local_irq_save(flags); cache->free_list = cache->free_list_irq; cache->free_list_irq = NULL; cache->nr += cache->nr_irq; cache->nr_irq = 0; local_irq_restore(flags); } static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, struct bio_set *bs) { struct bio_alloc_cache *cache; struct bio *bio; cache = per_cpu_ptr(bs->cache, get_cpu()); if (!cache->free_list) { if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) bio_alloc_irq_cache_splice(cache); if (!cache->free_list) { put_cpu(); return NULL; } } bio = cache->free_list; cache->free_list = bio->bi_next; cache->nr--; put_cpu(); bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); bio->bi_pool = bs; return bio; } /** * bio_alloc_bioset - allocate a bio for I/O * @bdev: block device to allocate the bio for (can be %NULL) * @nr_vecs: number of bvecs to pre-allocate * @opf: operation and flags for bio * @gfp_mask: the GFP_* mask given to the slab allocator * @bs: the bio_set to allocate from. * * Allocate a bio from the mempools in @bs. * * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to * allocate a bio. This is due to the mempool guarantees. To make this work, * callers must never allocate more than 1 bio at a time from the general pool. * Callers that need to allocate more than 1 bio must always submit the * previously allocated bio for IO before attempting to allocate a new one. * Failure to do so can cause deadlocks under memory pressure. * * Note that when running under submit_bio_noacct() (i.e. any block driver), * bios are not submitted until after you return - see the code in * submit_bio_noacct() that converts recursion into iteration, to prevent * stack overflows. * * This would normally mean allocating multiple bios under submit_bio_noacct() * would be susceptible to deadlocks, but we have * deadlock avoidance code that resubmits any blocked bios from a rescuer * thread. * * However, we do not guarantee forward progress for allocations from other * mempools. Doing multiple allocations from the same mempool under * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad * for per bio allocations. * * Returns: Pointer to new bio on success, NULL on failure. */ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask, struct bio_set *bs) { gfp_t saved_gfp = gfp_mask; struct bio *bio; void *p; /* should not use nobvec bioset for nr_vecs > 0 */ if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) return NULL; if (opf & REQ_ALLOC_CACHE) { if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, gfp_mask, bs); if (bio) return bio; /* * No cached bio available, bio returned below marked with * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. */ } else { opf &= ~REQ_ALLOC_CACHE; } } /* * submit_bio_noacct() converts recursion to iteration; this means if * we're running beneath it, any bios we allocate and submit will not be * submitted (and thus freed) until after we return. * * This exposes us to a potential deadlock if we allocate multiple bios * from the same bio_set() while running underneath submit_bio_noacct(). * If we were to allocate multiple bios (say a stacking block driver * that was splitting bios), we would deadlock if we exhausted the * mempool's reserve. * * We solve this, and guarantee forward progress, with a rescuer * workqueue per bio_set. If we go to allocate and there are bios on * current->bio_list, we first try the allocation without * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be * blocking to the rescuer workqueue before we retry with the original * gfp_flags. */ if (current->bio_list && (!bio_list_empty(&current->bio_list[0]) || !bio_list_empty(&current->bio_list[1])) && bs->rescue_workqueue) gfp_mask &= ~__GFP_DIRECT_RECLAIM; p = mempool_alloc(&bs->bio_pool, gfp_mask); if (!p && gfp_mask != saved_gfp) { punt_bios_to_rescuer(bs); gfp_mask = saved_gfp; p = mempool_alloc(&bs->bio_pool, gfp_mask); } if (unlikely(!p)) return NULL; if (!mempool_is_saturated(&bs->bio_pool)) opf &= ~REQ_ALLOC_CACHE; bio = p + bs->front_pad; if (nr_vecs > BIO_INLINE_VECS) { struct bio_vec *bvl = NULL; bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); if (!bvl && gfp_mask != saved_gfp) { punt_bios_to_rescuer(bs); gfp_mask = saved_gfp; bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); } if (unlikely(!bvl)) goto err_free; bio_init(bio, bdev, bvl, nr_vecs, opf); } else if (nr_vecs) { bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); } else { bio_init(bio, bdev, NULL, 0, opf); } bio->bi_pool = bs; return bio; err_free: mempool_free(p, &bs->bio_pool); return NULL; } EXPORT_SYMBOL(bio_alloc_bioset); /** * bio_kmalloc - kmalloc a bio * @nr_vecs: number of bio_vecs to allocate * @gfp_mask: the GFP_* mask given to the slab allocator * * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized * using bio_init() before use. To free a bio returned from this function use * kfree() after calling bio_uninit(). A bio returned from this function can * be reused by calling bio_uninit() before calling bio_init() again. * * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this * function are not backed by a mempool can fail. Do not use this function * for allocations in the file system I/O path. * * Returns: Pointer to new bio on success, NULL on failure. */ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) { struct bio *bio; if (nr_vecs > UIO_MAXIOV) return NULL; return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); } EXPORT_SYMBOL(bio_kmalloc); void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) { struct bio_vec bv; struct bvec_iter iter; __bio_for_each_segment(bv, bio, iter, start) memzero_bvec(&bv); } EXPORT_SYMBOL(zero_fill_bio_iter); /** * bio_truncate - truncate the bio to small size of @new_size * @bio: the bio to be truncated * @new_size: new size for truncating the bio * * Description: * Truncate the bio to new size of @new_size. If bio_op(bio) is * REQ_OP_READ, zero the truncated part. This function should only * be used for handling corner cases, such as bio eod. */ static void bio_truncate(struct bio *bio, unsigned new_size) { struct bio_vec bv; struct bvec_iter iter; unsigned int done = 0; bool truncated = false; if (new_size >= bio->bi_iter.bi_size) return; if (bio_op(bio) != REQ_OP_READ) goto exit; bio_for_each_segment(bv, bio, iter) { if (done + bv.bv_len > new_size) { unsigned offset; if (!truncated) offset = new_size - done; else offset = 0; zero_user(bv.bv_page, bv.bv_offset + offset, bv.bv_len - offset); truncated = true; } done += bv.bv_len; } exit: /* * Don't touch bvec table here and make it really immutable, since * fs bio user has to retrieve all pages via bio_for_each_segment_all * in its .end_bio() callback. * * It is enough to truncate bio by updating .bi_size since we can make * correct bvec with the updated .bi_size for drivers. */ bio->bi_iter.bi_size = new_size; } /** * guard_bio_eod - truncate a BIO to fit the block device * @bio: bio to truncate * * This allows us to do IO even on the odd last sectors of a device, even if the * block size is some multiple of the physical sector size. * * We'll just truncate the bio to the size of the device, and clear the end of * the buffer head manually. Truly out-of-range accesses will turn into actual * I/O errors, this only handles the "we need to be able to do I/O at the final * sector" case. */ void guard_bio_eod(struct bio *bio) { sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); if (!maxsector) return; /* * If the *whole* IO is past the end of the device, * let it through, and the IO layer will turn it into * an EIO. */ if (unlikely(bio->bi_iter.bi_sector >= maxsector)) return; maxsector -= bio->bi_iter.bi_sector; if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) return; bio_truncate(bio, maxsector << 9); } static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, unsigned int nr) { unsigned int i = 0; struct bio *bio; while ((bio = cache->free_list) != NULL) { cache->free_list = bio->bi_next; cache->nr--; bio_free(bio); if (++i == nr) break; } return i; } static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, unsigned int nr) { nr -= __bio_alloc_cache_prune(cache, nr); if (!READ_ONCE(cache->free_list)) { bio_alloc_irq_cache_splice(cache); __bio_alloc_cache_prune(cache, nr); } } static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct bio_set *bs; bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); if (bs->cache) { struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); bio_alloc_cache_prune(cache, -1U); } return 0; } static void bio_alloc_cache_destroy(struct bio_set *bs) { int cpu; if (!bs->cache) return; cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); for_each_possible_cpu(cpu) { struct bio_alloc_cache *cache; cache = per_cpu_ptr(bs->cache, cpu); bio_alloc_cache_prune(cache, -1U); } free_percpu(bs->cache); bs->cache = NULL; } static inline void bio_put_percpu_cache(struct bio *bio) { struct bio_alloc_cache *cache; cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) goto out_free; if (in_task()) { bio_uninit(bio); bio->bi_next = cache->free_list; /* Not necessary but helps not to iopoll already freed bios */ bio->bi_bdev = NULL; cache->free_list = bio; cache->nr++; } else if (in_hardirq()) { lockdep_assert_irqs_disabled(); bio_uninit(bio); bio->bi_next = cache->free_list_irq; cache->free_list_irq = bio; cache->nr_irq++; } else { goto out_free; } put_cpu(); return; out_free: put_cpu(); bio_free(bio); } /** * bio_put - release a reference to a bio * @bio: bio to release reference to * * Description: * Put a reference to a &struct bio, either one you have gotten with * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. **/ void bio_put(struct bio *bio) { if (unlikely(bio_flagged(bio, BIO_REFFED))) { BUG_ON(!atomic_read(&bio->__bi_cnt)); if (!atomic_dec_and_test(&bio->__bi_cnt)) return; } if (bio->bi_opf & REQ_ALLOC_CACHE) bio_put_percpu_cache(bio); else bio_free(bio); } EXPORT_SYMBOL(bio_put); static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) { bio_set_flag(bio, BIO_CLONED); bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_iter = bio_src->bi_iter; if (bio->bi_bdev) { if (bio->bi_bdev == bio_src->bi_bdev && bio_flagged(bio_src, BIO_REMAPPED)) bio_set_flag(bio, BIO_REMAPPED); bio_clone_blkg_association(bio, bio_src); } if (bio_crypt_clone(bio, bio_src, gfp) < 0) return -ENOMEM; if (bio_integrity(bio_src) && bio_integrity_clone(bio, bio_src, gfp) < 0) return -ENOMEM; return 0; } /** * bio_alloc_clone - clone a bio that shares the original bio's biovec * @bdev: block_device to clone onto * @bio_src: bio to clone from * @gfp: allocation priority * @bs: bio_set to allocate from * * Allocate a new bio that is a clone of @bio_src. The caller owns the returned * bio, but not the actual data it points to. * * The caller must ensure that the return bio is not freed before @bio_src. */ struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, gfp_t gfp, struct bio_set *bs) { struct bio *bio; bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); if (!bio) return NULL; if (__bio_clone(bio, bio_src, gfp) < 0) { bio_put(bio); return NULL; } bio->bi_io_vec = bio_src->bi_io_vec; return bio; } EXPORT_SYMBOL(bio_alloc_clone); /** * bio_init_clone - clone a bio that shares the original bio's biovec * @bdev: block_device to clone onto * @bio: bio to clone into * @bio_src: bio to clone from * @gfp: allocation priority * * Initialize a new bio in caller provided memory that is a clone of @bio_src. * The caller owns the returned bio, but not the actual data it points to. * * The caller must ensure that @bio_src is not freed before @bio. */ int bio_init_clone(struct block_device *bdev, struct bio *bio, struct bio *bio_src, gfp_t gfp) { int ret; bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); ret = __bio_clone(bio, bio_src, gfp); if (ret) bio_uninit(bio); return ret; } EXPORT_SYMBOL(bio_init_clone); /** * bio_full - check if the bio is full * @bio: bio to check * @len: length of one segment to be added * * Return true if @bio is full and one segment with @len bytes can't be * added to the bio, otherwise return false */ static inline bool bio_full(struct bio *bio, unsigned len) { if (bio->bi_vcnt >= bio->bi_max_vecs) return true; if (bio->bi_iter.bi_size > UINT_MAX - len) return true; return false; } static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, unsigned int len, unsigned int off, bool *same_page) { size_t bv_end = bv->bv_offset + bv->bv_len; phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; phys_addr_t page_addr = page_to_phys(page); if (vec_end_addr + 1 != page_addr + off) return false; if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) return false; if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) return false; *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); if (!*same_page) { if (IS_ENABLED(CONFIG_KMSAN)) return false; if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) return false; } bv->bv_len += len; return true; } /* * Try to merge a page into a segment, while obeying the hardware segment * size limit. This is not for normal read/write bios, but for passthrough * or Zone Append operations that we can't split. */ bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, struct page *page, unsigned len, unsigned offset, bool *same_page) { unsigned long mask = queue_segment_boundary(q); phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; if ((addr1 | mask) != (addr2 | mask)) return false; if (len > queue_max_segment_size(q) - bv->bv_len) return false; return bvec_try_merge_page(bv, page, len, offset, same_page); } /** * bio_add_hw_page - attempt to add a page to a bio with hw constraints * @q: the target queue * @bio: destination bio * @page: page to add * @len: vec entry length * @offset: vec entry offset * @max_sectors: maximum number of sectors that can be added * @same_page: return if the segment has been merged inside the same page * * Add a page to a bio while respecting the hardware max_sectors, max_segment * and gap limitations. */ int bio_add_hw_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset, unsigned int max_sectors, bool *same_page) { unsigned int max_size = max_sectors << SECTOR_SHIFT; if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return 0; len = min3(len, max_size, queue_max_segment_size(q)); if (len > max_size - bio->bi_iter.bi_size) return 0; if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (bvec_try_merge_hw_page(q, bv, page, len, offset, same_page)) { bio->bi_iter.bi_size += len; return len; } if (bio->bi_vcnt >= min(bio->bi_max_vecs, queue_max_segments(q))) return 0; /* * If the queue doesn't support SG gaps and adding this segment * would create a gap, disallow it. */ if (bvec_gap_to_prev(&q->limits, bv, offset)) return 0; } bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); bio->bi_vcnt++; bio->bi_iter.bi_size += len; return len; } /** * bio_add_pc_page - attempt to add page to passthrough bio * @q: the target queue * @bio: destination bio * @page: page to add * @len: vec entry length * @offset: vec entry offset * * Attempt to add a page to the bio_vec maplist. This can fail for a * number of reasons, such as the bio being full or target block device * limitations. The target block device must allow bio's up to PAGE_SIZE, * so it is always possible to add a single page to an empty bio. * * This should only be used by passthrough bios. */ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { bool same_page = false; return bio_add_hw_page(q, bio, page, len, offset, queue_max_hw_sectors(q), &same_page); } EXPORT_SYMBOL(bio_add_pc_page); /** * bio_add_zone_append_page - attempt to add page to zone-append bio * @bio: destination bio * @page: page to add * @len: vec entry length * @offset: vec entry offset * * Attempt to add a page to the bio_vec maplist of a bio that will be submitted * for a zone-append request. This can fail for a number of reasons, such as the * bio being full or the target block device is not a zoned block device or * other limitations of the target block device. The target block device must * allow bio's up to PAGE_SIZE, so it is always possible to add a single page * to an empty bio. * * Returns: number of bytes added to the bio, or 0 in case of a failure. */ int bio_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); bool same_page = false; if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) return 0; if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) return 0; return bio_add_hw_page(q, bio, page, len, offset, queue_max_zone_append_sectors(q), &same_page); } EXPORT_SYMBOL_GPL(bio_add_zone_append_page); /** * __bio_add_page - add page(s) to a bio in a new segment * @bio: destination bio * @page: start page to add * @len: length of the data to add, may cross pages * @off: offset of the data relative to @page, may cross pages * * Add the data at @page + @off to @bio as a new bvec. The caller must ensure * that @bio has space for another bvec. */ void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); WARN_ON_ONCE(bio_full(bio, len)); bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); bio->bi_iter.bi_size += len; bio->bi_vcnt++; } EXPORT_SYMBOL_GPL(__bio_add_page); /** * bio_add_page - attempt to add page(s) to bio * @bio: destination bio * @page: start page to add * @len: vec entry length, may cross pages * @offset: vec entry offset relative to @page, may cross pages * * Attempt to add page(s) to the bio_vec maplist. This will only fail * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. */ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { bool same_page = false; if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return 0; if (bio->bi_iter.bi_size > UINT_MAX - len) return 0; if (bio->bi_vcnt > 0 && bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], page, len, offset, &same_page)) { bio->bi_iter.bi_size += len; return len; } if (bio->bi_vcnt >= bio->bi_max_vecs) return 0; __bio_add_page(bio, page, len, offset); return len; } EXPORT_SYMBOL(bio_add_page); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, size_t off) { WARN_ON_ONCE(len > UINT_MAX); WARN_ON_ONCE(off > UINT_MAX); __bio_add_page(bio, &folio->page, len, off); } /** * bio_add_folio - Attempt to add part of a folio to a bio. * @bio: BIO to add to. * @folio: Folio to add. * @len: How many bytes from the folio to add. * @off: First byte in this folio to add. * * Filesystems that use folios can call this function instead of calling * bio_add_page() for each page in the folio. If @off is bigger than * PAGE_SIZE, this function can create a bio_vec that starts in a page * after the bv_page. BIOs do not support folios that are 4GiB or larger. * * Return: Whether the addition was successful. */ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off) { if (len > UINT_MAX || off > UINT_MAX) return false; return bio_add_page(bio, &folio->page, len, off) > 0; } EXPORT_SYMBOL(bio_add_folio); void __bio_release_pages(struct bio *bio, bool mark_dirty) { struct folio_iter fi; bio_for_each_folio_all(fi, bio) { struct page *page; size_t nr_pages; if (mark_dirty) { folio_lock(fi.folio); folio_mark_dirty(fi.folio); folio_unlock(fi.folio); } page = folio_page(fi.folio, fi.offset / PAGE_SIZE); nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - fi.offset / PAGE_SIZE + 1; do { bio_release_page(bio, page++); } while (--nr_pages != 0); } } EXPORT_SYMBOL_GPL(__bio_release_pages); void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) { size_t size = iov_iter_count(iter); WARN_ON_ONCE(bio->bi_max_vecs); if (bio_op(bio) == REQ_OP_ZONE_APPEND) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); size_t max_sectors = queue_max_zone_append_sectors(q); size = min(size, max_sectors << SECTOR_SHIFT); } bio->bi_vcnt = iter->nr_segs; bio->bi_io_vec = (struct bio_vec *)iter->bvec; bio->bi_iter.bi_bvec_done = iter->iov_offset; bio->bi_iter.bi_size = size; bio_set_flag(bio, BIO_CLONED); } static int bio_iov_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { bool same_page = false; if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) return -EIO; if (bio->bi_vcnt > 0 && bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], page, len, offset, &same_page)) { bio->bi_iter.bi_size += len; if (same_page) bio_release_page(bio, page); return 0; } __bio_add_page(bio, page, len, offset); return 0; } static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); bool same_page = false; if (bio_add_hw_page(q, bio, page, len, offset, queue_max_zone_append_sectors(q), &same_page) != len) return -EINVAL; if (same_page) bio_release_page(bio, page); return 0; } #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) /** * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio * @bio: bio to add pages to * @iter: iov iterator describing the region to be mapped * * Extracts pages from *iter and appends them to @bio's bvec array. The pages * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag. * For a multi-segment *iter, this function only adds pages from the next * non-empty segment of the iov iterator. */ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { iov_iter_extraction_t extraction_flags = 0; unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; struct page **pages = (struct page **)bv; ssize_t size, left; unsigned len, i = 0; size_t offset; int ret = 0; /* * Move page array up in the allocated memory for the bio vecs as far as * possible so that we can start filling biovecs from the beginning * without overwriting the temporary page array. */ BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) extraction_flags |= ITER_ALLOW_P2PDMA; /* * Each segment in the iov is required to be a block size multiple. * However, we may not be able to get the entire segment if it spans * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the * result to ensure the bio's total size is correct. The remainder of * the iov data will be picked up in the next bio iteration. */ size = iov_iter_extract_pages(iter, &pages, UINT_MAX - bio->bi_iter.bi_size, nr_pages, extraction_flags, &offset); if (unlikely(size <= 0)) return size ? size : -EFAULT; nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); if (bio->bi_bdev) { size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); iov_iter_revert(iter, trim); size -= trim; } if (unlikely(!size)) { ret = -EFAULT; goto out; } for (left = size, i = 0; left > 0; left -= len, i++) { struct page *page = pages[i]; len = min_t(size_t, PAGE_SIZE - offset, left); if (bio_op(bio) == REQ_OP_ZONE_APPEND) { ret = bio_iov_add_zone_append_page(bio, page, len, offset); if (ret) break; } else bio_iov_add_page(bio, page, len, offset); offset = 0; } iov_iter_revert(iter, left); out: while (i < nr_pages) bio_release_page(bio, pages[i++]); return ret; } /** * bio_iov_iter_get_pages - add user or kernel pages to a bio * @bio: bio to add pages to * @iter: iov iterator describing the region to be added * * This takes either an iterator pointing to user memory, or one pointing to * kernel pages (BVEC iterator). If we're adding user pages, we pin them and * map them into the kernel. On IO completion, the caller should put those * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs * to ensure the bvecs and pages stay referenced until the submitted I/O is * completed by a call to ->ki_complete() or returns with an error other than * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF * on IO completion. If it isn't, then pages should be released. * * The function tries, but does not guarantee, to pin as many pages as * fit into the bio, or are requested in @iter, whatever is smaller. If * MM encounters an error pinning the requested pages, it stops. Error * is returned only if 0 pages could be pinned. */ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) { int ret = 0; if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return -EIO; if (iov_iter_is_bvec(iter)) { bio_iov_bvec_set(bio, iter); iov_iter_advance(iter, bio->bi_iter.bi_size); return 0; } if (iov_iter_extract_will_pin(iter)) bio_set_flag(bio, BIO_PAGE_PINNED); do { ret = __bio_iov_iter_get_pages(bio, iter); } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); return bio->bi_vcnt ? 0 : ret; } EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); static void submit_bio_wait_endio(struct bio *bio) { complete(bio->bi_private); } /** * submit_bio_wait - submit a bio, and wait until it completes * @bio: The &struct bio which describes the I/O * * Simple wrapper around submit_bio(). Returns 0 on success, or the error from * bio_endio() on failure. * * WARNING: Unlike to how submit_bio() is usually used, this function does not * result in bio reference to be consumed. The caller must drop the reference * on his own. */ int submit_bio_wait(struct bio *bio) { DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_bdev->bd_disk->lockdep_map); bio->bi_private = &done; bio->bi_end_io = submit_bio_wait_endio; bio->bi_opf |= REQ_SYNC; submit_bio(bio); blk_wait_io(&done); return blk_status_to_errno(bio->bi_status); } EXPORT_SYMBOL(submit_bio_wait); void __bio_advance(struct bio *bio, unsigned bytes) { if (bio_integrity(bio)) bio_integrity_advance(bio, bytes); bio_crypt_advance(bio, bytes); bio_advance_iter(bio, &bio->bi_iter, bytes); } EXPORT_SYMBOL(__bio_advance); void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter) { while (src_iter->bi_size && dst_iter->bi_size) { struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); void *src_buf = bvec_kmap_local(&src_bv); void *dst_buf = bvec_kmap_local(&dst_bv); memcpy(dst_buf, src_buf, bytes); kunmap_local(dst_buf); kunmap_local(src_buf); bio_advance_iter_single(src, src_iter, bytes); bio_advance_iter_single(dst, dst_iter, bytes); } } EXPORT_SYMBOL(bio_copy_data_iter); /** * bio_copy_data - copy contents of data buffers from one bio to another * @src: source bio * @dst: destination bio * * Stops when it reaches the end of either @src or @dst - that is, copies * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). */ void bio_copy_data(struct bio *dst, struct bio *src) { struct bvec_iter src_iter = src->bi_iter; struct bvec_iter dst_iter = dst->bi_iter; bio_copy_data_iter(dst, &dst_iter, src, &src_iter); } EXPORT_SYMBOL(bio_copy_data); void bio_free_pages(struct bio *bio) { struct bio_vec *bvec; struct bvec_iter_all iter_all; bio_for_each_segment_all(bvec, bio, iter_all) __free_page(bvec->bv_page); } EXPORT_SYMBOL(bio_free_pages); /* * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions * for performing direct-IO in BIOs. * * The problem is that we cannot run folio_mark_dirty() from interrupt context * because the required locks are not interrupt-safe. So what we can do is to * mark the pages dirty _before_ performing IO. And in interrupt context, * check that the pages are still dirty. If so, fine. If not, redirty them * in process context. * * Note that this code is very hard to test under normal circumstances because * direct-io pins the pages with get_user_pages(). This makes * is_page_cache_freeable return false, and the VM will not clean the pages. * But other code (eg, flusher threads) could clean the pages if they are mapped * pagecache. * * Simply disabling the call to bio_set_pages_dirty() is a good way to test the * deferred bio dirtying paths. */ /* * bio_set_pages_dirty() will mark all the bio's pages as dirty. */ void bio_set_pages_dirty(struct bio *bio) { struct folio_iter fi; bio_for_each_folio_all(fi, bio) { folio_lock(fi.folio); folio_mark_dirty(fi.folio); folio_unlock(fi.folio); } } EXPORT_SYMBOL_GPL(bio_set_pages_dirty); /* * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. * If they are, then fine. If, however, some pages are clean then they must * have been written out during the direct-IO read. So we take another ref on * the BIO and re-dirty the pages in process context. * * It is expected that bio_check_pages_dirty() will wholly own the BIO from * here on. It will unpin each page and will run one bio_put() against the * BIO. */ static void bio_dirty_fn(struct work_struct *work); static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); static DEFINE_SPINLOCK(bio_dirty_lock); static struct bio *bio_dirty_list; /* * This runs in process context */ static void bio_dirty_fn(struct work_struct *work) { struct bio *bio, *next; spin_lock_irq(&bio_dirty_lock); next = bio_dirty_list; bio_dirty_list = NULL; spin_unlock_irq(&bio_dirty_lock); while ((bio = next) != NULL) { next = bio->bi_private; bio_release_pages(bio, true); bio_put(bio); } } void bio_check_pages_dirty(struct bio *bio) { struct folio_iter fi; unsigned long flags; bio_for_each_folio_all(fi, bio) { if (!folio_test_dirty(fi.folio)) goto defer; } bio_release_pages(bio, false); bio_put(bio); return; defer: spin_lock_irqsave(&bio_dirty_lock, flags); bio->bi_private = bio_dirty_list; bio_dirty_list = bio; spin_unlock_irqrestore(&bio_dirty_lock, flags); schedule_work(&bio_dirty_work); } EXPORT_SYMBOL_GPL(bio_check_pages_dirty); static inline bool bio_remaining_done(struct bio *bio) { /* * If we're not chaining, then ->__bi_remaining is always 1 and * we always end io on the first invocation. */ if (!bio_flagged(bio, BIO_CHAIN)) return true; BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); if (atomic_dec_and_test(&bio->__bi_remaining)) { bio_clear_flag(bio, BIO_CHAIN); return true; } return false; } /** * bio_endio - end I/O on a bio * @bio: bio * * Description: * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred * way to end I/O on a bio. No one should call bi_end_io() directly on a * bio unless they own it and thus know that it has an end_io function. * * bio_endio() can be called several times on a bio that has been chained * using bio_chain(). The ->bi_end_io() function will only be called the * last time. **/ void bio_endio(struct bio *bio) { again: if (!bio_remaining_done(bio)) return; if (!bio_integrity_endio(bio)) return; rq_qos_done_bio(bio); if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); bio_clear_flag(bio, BIO_TRACE_COMPLETION); } /* * Need to have a real endio function for chained bios, otherwise * various corner cases will break (like stacking block devices that * save/restore bi_end_io) - however, we want to avoid unbounded * recursion and blowing the stack. Tail call optimization would * handle this, but compiling with frame pointers also disables * gcc's sibling call optimization. */ if (bio->bi_end_io == bio_chain_endio) { bio = __bio_chain_endio(bio); goto again; } blk_throtl_bio_endio(bio); /* release cgroup info */ bio_uninit(bio); if (bio->bi_end_io) bio->bi_end_io(bio); } EXPORT_SYMBOL(bio_endio); /** * bio_split - split a bio * @bio: bio to split * @sectors: number of sectors to split from the front of @bio * @gfp: gfp mask * @bs: bio set to allocate from * * Allocates and returns a new bio which represents @sectors from the start of * @bio, and updates @bio to represent the remaining sectors. * * Unless this is a discard request the newly allocated bio will point * to @bio's bi_io_vec. It is the caller's responsibility to ensure that * neither @bio nor @bs are freed before the split bio. */ struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) { struct bio *split; BUG_ON(sectors <= 0); BUG_ON(sectors >= bio_sectors(bio)); /* Zone append commands cannot be split */ if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) return NULL; split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); if (!split) return NULL; split->bi_iter.bi_size = sectors << 9; if (bio_integrity(split)) bio_integrity_trim(split); bio_advance(bio, split->bi_iter.bi_size); if (bio_flagged(bio, BIO_TRACE_COMPLETION)) bio_set_flag(split, BIO_TRACE_COMPLETION); return split; } EXPORT_SYMBOL(bio_split); /** * bio_trim - trim a bio * @bio: bio to trim * @offset: number of sectors to trim from the front of @bio * @size: size we want to trim @bio to, in sectors * * This function is typically used for bios that are cloned and submitted * to the underlying device in parts. */ void bio_trim(struct bio *bio, sector_t offset, sector_t size) { if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || offset + size > bio_sectors(bio))) return; size <<= 9; if (offset == 0 && size == bio->bi_iter.bi_size) return; bio_advance(bio, offset << 9); bio->bi_iter.bi_size = size; if (bio_integrity(bio)) bio_integrity_trim(bio); } EXPORT_SYMBOL_GPL(bio_trim); /* * create memory pools for biovec's in a bio_set. * use the global biovec slabs created for general use. */ int biovec_init_pool(mempool_t *pool, int pool_entries) { struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; return mempool_init_slab_pool(pool, pool_entries, bp->slab); } /* * bioset_exit - exit a bioset initialized with bioset_init() * * May be called on a zeroed but uninitialized bioset (i.e. allocated with * kzalloc()). */ void bioset_exit(struct bio_set *bs) { bio_alloc_cache_destroy(bs); if (bs->rescue_workqueue) destroy_workqueue(bs->rescue_workqueue); bs->rescue_workqueue = NULL; mempool_exit(&bs->bio_pool); mempool_exit(&bs->bvec_pool); bioset_integrity_free(bs); if (bs->bio_slab) bio_put_slab(bs); bs->bio_slab = NULL; } EXPORT_SYMBOL(bioset_exit); /** * bioset_init - Initialize a bio_set * @bs: pool to initialize * @pool_size: Number of bio and bio_vecs to cache in the mempool * @front_pad: Number of bytes to allocate in front of the returned bio * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS * and %BIOSET_NEED_RESCUER * * Description: * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller * to ask for a number of bytes to be allocated in front of the bio. * Front pad allocation is useful for embedding the bio inside * another structure, to avoid allocating extra data to go with the bio. * Note that the bio must be embedded at the END of that structure always, * or things will break badly. * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used * to dispatch queued requests when the mempool runs out of space. * */ int bioset_init(struct bio_set *bs, unsigned int pool_size, unsigned int front_pad, int flags) { bs->front_pad = front_pad; if (flags & BIOSET_NEED_BVECS) bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); else bs->back_pad = 0; spin_lock_init(&bs->rescue_lock); bio_list_init(&bs->rescue_list); INIT_WORK(&bs->rescue_work, bio_alloc_rescue); bs->bio_slab = bio_find_or_create_slab(bs); if (!bs->bio_slab) return -ENOMEM; if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) goto bad; if ((flags & BIOSET_NEED_BVECS) && biovec_init_pool(&bs->bvec_pool, pool_size)) goto bad; if (flags & BIOSET_NEED_RESCUER) { bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); if (!bs->rescue_workqueue) goto bad; } if (flags & BIOSET_PERCPU_CACHE) { bs->cache = alloc_percpu(struct bio_alloc_cache); if (!bs->cache) goto bad; cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); } return 0; bad: bioset_exit(bs); return -ENOMEM; } EXPORT_SYMBOL(bioset_init); static int __init init_bio(void) { int i; BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); bio_integrity_init(); for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { struct biovec_slab *bvs = bvec_slabs + i; bvs->slab = kmem_cache_create(bvs->name, bvs->nr_vecs * sizeof(struct bio_vec), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); } cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, bio_cpu_dead); if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) panic("bio: can't allocate bios\n"); if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) panic("bio: can't create integrity pool\n"); return 0; } subsys_initcall(init_bio);
27 28 28 25 21 6 4 7 5 7 26 26 26 22 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 // SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API. * * SHA-3, as specified in * https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf * * SHA-3 code by Jeff Garzik <jeff@garzik.org> * Ard Biesheuvel <ard.biesheuvel@linaro.org> */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <crypto/sha3.h> #include <asm/unaligned.h> /* * On some 32-bit architectures (h8300), GCC ends up using * over 1 KB of stack if we inline the round calculation into the loop * in keccakf(). On the other hand, on 64-bit architectures with plenty * of [64-bit wide] general purpose registers, not inlining it severely * hurts performance. So let's use 64-bitness as a heuristic to decide * whether to inline or not. */ #ifdef CONFIG_64BIT #define SHA3_INLINE inline #else #define SHA3_INLINE noinline #endif #define KECCAK_ROUNDS 24 static const u64 keccakf_rndc[24] = { 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL, 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL, 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL }; /* update the state with given number of rounds */ static SHA3_INLINE void keccakf_round(u64 st[25]) { u64 t[5], tt, bc[5]; /* Theta */ bc[0] = st[0] ^ st[5] ^ st[10] ^ st[15] ^ st[20]; bc[1] = st[1] ^ st[6] ^ st[11] ^ st[16] ^ st[21]; bc[2] = st[2] ^ st[7] ^ st[12] ^ st[17] ^ st[22]; bc[3] = st[3] ^ st[8] ^ st[13] ^ st[18] ^ st[23]; bc[4] = st[4] ^ st[9] ^ st[14] ^ st[19] ^ st[24]; t[0] = bc[4] ^ rol64(bc[1], 1); t[1] = bc[0] ^ rol64(bc[2], 1); t[2] = bc[1] ^ rol64(bc[3], 1); t[3] = bc[2] ^ rol64(bc[4], 1); t[4] = bc[3] ^ rol64(bc[0], 1); st[0] ^= t[0]; /* Rho Pi */ tt = st[1]; st[ 1] = rol64(st[ 6] ^ t[1], 44); st[ 6] = rol64(st[ 9] ^ t[4], 20); st[ 9] = rol64(st[22] ^ t[2], 61); st[22] = rol64(st[14] ^ t[4], 39); st[14] = rol64(st[20] ^ t[0], 18); st[20] = rol64(st[ 2] ^ t[2], 62); st[ 2] = rol64(st[12] ^ t[2], 43); st[12] = rol64(st[13] ^ t[3], 25); st[13] = rol64(st[19] ^ t[4], 8); st[19] = rol64(st[23] ^ t[3], 56); st[23] = rol64(st[15] ^ t[0], 41); st[15] = rol64(st[ 4] ^ t[4], 27); st[ 4] = rol64(st[24] ^ t[4], 14); st[24] = rol64(st[21] ^ t[1], 2); st[21] = rol64(st[ 8] ^ t[3], 55); st[ 8] = rol64(st[16] ^ t[1], 45); st[16] = rol64(st[ 5] ^ t[0], 36); st[ 5] = rol64(st[ 3] ^ t[3], 28); st[ 3] = rol64(st[18] ^ t[3], 21); st[18] = rol64(st[17] ^ t[2], 15); st[17] = rol64(st[11] ^ t[1], 10); st[11] = rol64(st[ 7] ^ t[2], 6); st[ 7] = rol64(st[10] ^ t[0], 3); st[10] = rol64( tt ^ t[1], 1); /* Chi */ bc[ 0] = ~st[ 1] & st[ 2]; bc[ 1] = ~st[ 2] & st[ 3]; bc[ 2] = ~st[ 3] & st[ 4]; bc[ 3] = ~st[ 4] & st[ 0]; bc[ 4] = ~st[ 0] & st[ 1]; st[ 0] ^= bc[ 0]; st[ 1] ^= bc[ 1]; st[ 2] ^= bc[ 2]; st[ 3] ^= bc[ 3]; st[ 4] ^= bc[ 4]; bc[ 0] = ~st[ 6] & st[ 7]; bc[ 1] = ~st[ 7] & st[ 8]; bc[ 2] = ~st[ 8] & st[ 9]; bc[ 3] = ~st[ 9] & st[ 5]; bc[ 4] = ~st[ 5] & st[ 6]; st[ 5] ^= bc[ 0]; st[ 6] ^= bc[ 1]; st[ 7] ^= bc[ 2]; st[ 8] ^= bc[ 3]; st[ 9] ^= bc[ 4]; bc[ 0] = ~st[11] & st[12]; bc[ 1] = ~st[12] & st[13]; bc[ 2] = ~st[13] & st[14]; bc[ 3] = ~st[14] & st[10]; bc[ 4] = ~st[10] & st[11]; st[10] ^= bc[ 0]; st[11] ^= bc[ 1]; st[12] ^= bc[ 2]; st[13] ^= bc[ 3]; st[14] ^= bc[ 4]; bc[ 0] = ~st[16] & st[17]; bc[ 1] = ~st[17] & st[18]; bc[ 2] = ~st[18] & st[19]; bc[ 3] = ~st[19] & st[15]; bc[ 4] = ~st[15] & st[16]; st[15] ^= bc[ 0]; st[16] ^= bc[ 1]; st[17] ^= bc[ 2]; st[18] ^= bc[ 3]; st[19] ^= bc[ 4]; bc[ 0] = ~st[21] & st[22]; bc[ 1] = ~st[22] & st[23]; bc[ 2] = ~st[23] & st[24]; bc[ 3] = ~st[24] & st[20]; bc[ 4] = ~st[20] & st[21]; st[20] ^= bc[ 0]; st[21] ^= bc[ 1]; st[22] ^= bc[ 2]; st[23] ^= bc[ 3]; st[24] ^= bc[ 4]; } static void keccakf(u64 st[25]) { int round; for (round = 0; round < KECCAK_ROUNDS; round++) { keccakf_round(st); /* Iota */ st[0] ^= keccakf_rndc[round]; } } int crypto_sha3_init(struct shash_desc *desc) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int digest_size = crypto_shash_digestsize(desc->tfm); sctx->rsiz = 200 - 2 * digest_size; sctx->rsizw = sctx->rsiz / 8; sctx->partial = 0; memset(sctx->st, 0, sizeof(sctx->st)); return 0; } EXPORT_SYMBOL(crypto_sha3_init); int crypto_sha3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int done; const u8 *src; done = 0; src = data; if ((sctx->partial + len) > (sctx->rsiz - 1)) { if (sctx->partial) { done = -sctx->partial; memcpy(sctx->buf + sctx->partial, data, done + sctx->rsiz); src = sctx->buf; } do { unsigned int i; for (i = 0; i < sctx->rsizw; i++) sctx->st[i] ^= get_unaligned_le64(src + 8 * i); keccakf(sctx->st); done += sctx->rsiz; src = data + done; } while (done + (sctx->rsiz - 1) < len); sctx->partial = 0; } memcpy(sctx->buf + sctx->partial, src, len - done); sctx->partial += (len - done); return 0; } EXPORT_SYMBOL(crypto_sha3_update); int crypto_sha3_final(struct shash_desc *desc, u8 *out) { struct sha3_state *sctx = shash_desc_ctx(desc); unsigned int i, inlen = sctx->partial; unsigned int digest_size = crypto_shash_digestsize(desc->tfm); __le64 *digest = (__le64 *)out; sctx->buf[inlen++] = 0x06; memset(sctx->buf + inlen, 0, sctx->rsiz - inlen); sctx->buf[sctx->rsiz - 1] |= 0x80; for (i = 0; i < sctx->rsizw; i++) sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i); keccakf(sctx->st); for (i = 0; i < digest_size / 8; i++) put_unaligned_le64(sctx->st[i], digest++); if (digest_size & 4) put_unaligned_le32(sctx->st[i], (__le32 *)digest); memset(sctx, 0, sizeof(*sctx)); return 0; } EXPORT_SYMBOL(crypto_sha3_final); static struct shash_alg algs[] = { { .digestsize = SHA3_224_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-224", .base.cra_driver_name = "sha3-224-generic", .base.cra_blocksize = SHA3_224_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_256_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-256", .base.cra_driver_name = "sha3-256-generic", .base.cra_blocksize = SHA3_256_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_384_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-384", .base.cra_driver_name = "sha3-384-generic", .base.cra_blocksize = SHA3_384_BLOCK_SIZE, .base.cra_module = THIS_MODULE, }, { .digestsize = SHA3_512_DIGEST_SIZE, .init = crypto_sha3_init, .update = crypto_sha3_update, .final = crypto_sha3_final, .descsize = sizeof(struct sha3_state), .base.cra_name = "sha3-512", .base.cra_driver_name = "sha3-512-generic", .base.cra_blocksize = SHA3_512_BLOCK_SIZE, .base.cra_module = THIS_MODULE, } }; static int __init sha3_generic_mod_init(void) { return crypto_register_shashes(algs, ARRAY_SIZE(algs)); } static void __exit sha3_generic_mod_fini(void) { crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); } subsys_initcall(sha3_generic_mod_init); module_exit(sha3_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-3 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sha3-224"); MODULE_ALIAS_CRYPTO("sha3-224-generic"); MODULE_ALIAS_CRYPTO("sha3-256"); MODULE_ALIAS_CRYPTO("sha3-256-generic"); MODULE_ALIAS_CRYPTO("sha3-384"); MODULE_ALIAS_CRYPTO("sha3-384-generic"); MODULE_ALIAS_CRYPTO("sha3-512"); MODULE_ALIAS_CRYPTO("sha3-512-generic");
15 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 /* * DRBG based on NIST SP800-90A * * Copyright Stephan Mueller <smueller@chronox.de>, 2014 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL are * required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #ifndef _DRBG_H #define _DRBG_H #include <linux/random.h> #include <linux/scatterlist.h> #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/slab.h> #include <crypto/internal/rng.h> #include <crypto/rng.h> #include <linux/fips.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/workqueue.h> /* * Concatenation Helper and string operation helper * * SP800-90A requires the concatenation of different data. To avoid copying * buffers around or allocate additional memory, the following data structure * is used to point to the original memory with its size. In addition, it * is used to build a linked list. The linked list defines the concatenation * of individual buffers. The order of memory block referenced in that * linked list determines the order of concatenation. */ struct drbg_string { const unsigned char *buf; size_t len; struct list_head list; }; static inline void drbg_string_fill(struct drbg_string *string, const unsigned char *buf, size_t len) { string->buf = buf; string->len = len; INIT_LIST_HEAD(&string->list); } struct drbg_state; typedef uint32_t drbg_flag_t; struct drbg_core { drbg_flag_t flags; /* flags for the cipher */ __u8 statelen; /* maximum state length */ __u8 blocklen_bytes; /* block size of output in bytes */ char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ /* kernel crypto API backend cipher name */ char backend_cra_name[CRYPTO_MAX_ALG_NAME]; }; struct drbg_state_ops { int (*update)(struct drbg_state *drbg, struct list_head *seed, int reseed); int (*generate)(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl); int (*crypto_init)(struct drbg_state *drbg); int (*crypto_fini)(struct drbg_state *drbg); }; struct drbg_test_data { struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ }; enum drbg_seed_state { DRBG_SEED_STATE_UNSEEDED, DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ DRBG_SEED_STATE_FULL, }; struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ unsigned char *Vbuf; /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; unsigned char *Cbuf; /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ size_t reseed_ctr; size_t reseed_threshold; /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; unsigned char *scratchpadbuf; void *priv_data; /* Cipher handle */ struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ struct skcipher_request *ctr_req; /* CTR mode request handle */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ struct crypto_wait ctr_wait; /* CTR mode async wait obj */ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ enum drbg_seed_state seeded; /* DRBG fully seeded? */ unsigned long last_seed_time; bool pr; /* Prediction resistance enabled? */ bool fips_primed; /* Continuous test primed? */ unsigned char *prev; /* FIPS 140-2 continuous test value */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; }; static inline __u8 drbg_statelen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->statelen; return 0; } static inline __u8 drbg_blocklen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->blocklen_bytes; return 0; } static inline __u8 drbg_keylen(struct drbg_state *drbg) { if (drbg && drbg->core) return (drbg->core->statelen - drbg->core->blocklen_bytes); return 0; } static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) { /* SP800-90A requires the limit 2**19 bits, but we return bytes */ return (1 << 16); } static inline size_t drbg_max_addtl(struct drbg_state *drbg) { /* SP800-90A requires 2**35 bytes additional info str / pers str */ #if (__BITS_PER_LONG == 32) /* * SP800-90A allows smaller maximum numbers to be returned -- we * return SIZE_MAX - 1 to allow the verification of the enforcement * of this value in drbg_healthcheck_sanity. */ return (SIZE_MAX - 1); #else return (1UL<<35); #endif } static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ return (1<<20); } /* * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data. * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl) { return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data and * allow furnishing of test_data * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_reset() to allow the caller to provide test_data * * @drng DRBG handle -- see crypto_rng_reset * @pers personalization string input buffer * @perslen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_reset */ static inline int crypto_drbg_reset_test(struct crypto_rng *drng, struct drbg_string *pers, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_reset(drng, pers->buf, pers->len); } /* DRBG type flags */ #define DRBG_CTR ((drbg_flag_t)1<<0) #define DRBG_HMAC ((drbg_flag_t)1<<1) #define DRBG_HASH ((drbg_flag_t)1<<2) #define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) /* DRBG strength flags */ #define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) #define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) #define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) #define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ DRBG_STRENGTH256) enum drbg_prefixes { DRBG_PREFIX0 = 0x00, DRBG_PREFIX1, DRBG_PREFIX2, DRBG_PREFIX3 }; #endif /* _DRBG_H */
7 7 7 3 5 1 18 13 8 12 34 33 3 5 5 6 6 5 1 4 5 5 6 6 3 3 4 1 3 2 3 3 1 2 3 5 1 4 3 3 3 3 3 3 3 1 1 1 1 3 3 3 3 3 5 1 3 1 4 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 /* * Copyright © 2012 Red Hat * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Airlie <airlied@redhat.com> * Rob Clark <rob.clark@linaro.org> * */ #include <linux/export.h> #include <linux/dma-buf.h> #include <linux/rbtree.h> #include <linux/module.h> #include <drm/drm.h> #include <drm/drm_drv.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> #include "drm_internal.h" MODULE_IMPORT_NS(DMA_BUF); /** * DOC: overview and lifetime rules * * Similar to GEM global names, PRIME file descriptors are also used to share * buffer objects across processes. They offer additional security: as file * descriptors must be explicitly sent over UNIX domain sockets to be shared * between applications, they can't be guessed like the globally unique GEM * names. * * Drivers that support the PRIME API implement the drm_gem_object_funcs.export * and &drm_driver.gem_prime_import hooks. &dma_buf_ops implementations for * drivers are all individually exported for drivers which need to overwrite * or reimplement some of them. * * Reference Counting for GEM Drivers * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * On the export the &dma_buf holds a reference to the exported buffer object, * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD * IOCTL, when it first calls &drm_gem_object_funcs.export * and stores the exporting GEM object in the &dma_buf.priv field. This * reference needs to be released when the final reference to the &dma_buf * itself is dropped and its &dma_buf_ops.release function is called. For * GEM-based drivers, the &dma_buf should be exported using * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release(). * * Thus the chain of references always flows in one direction, avoiding loops: * importing GEM object -> dma-buf -> exported GEM bo. A further complication * are the lookup caches for import and export. These are required to guarantee * that any given object will always have only one unique userspace handle. This * is required to allow userspace to detect duplicated imports, since some GEM * drivers do fail command submissions if a given buffer object is listed more * than once. These import and export caches in &drm_prime_file_private only * retain a weak reference, which is cleaned up when the corresponding object is * released. * * Self-importing: If userspace is using PRIME as a replacement for flink then * it will get a fd->handle request for a GEM object that it created. Drivers * should detect this situation and return back the underlying object from the * dma-buf private. For GEM based drivers this is handled in * drm_gem_prime_import() already. */ struct drm_prime_member { struct dma_buf *dma_buf; uint32_t handle; struct rb_node dmabuf_rb; struct rb_node handle_rb; }; static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) { struct drm_prime_member *member; struct rb_node **p, *rb; member = kmalloc(sizeof(*member), GFP_KERNEL); if (!member) return -ENOMEM; get_dma_buf(dma_buf); member->dma_buf = dma_buf; member->handle = handle; rb = NULL; p = &prime_fpriv->dmabufs.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (dma_buf > pos->dma_buf) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->dmabuf_rb, rb, p); rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs); rb = NULL; p = &prime_fpriv->handles.rb_node; while (*p) { struct drm_prime_member *pos; rb = *p; pos = rb_entry(rb, struct drm_prime_member, handle_rb); if (handle > pos->handle) p = &rb->rb_right; else p = &rb->rb_left; } rb_link_node(&member->handle_rb, rb, p); rb_insert_color(&member->handle_rb, &prime_fpriv->handles); return 0; } static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) return member->dma_buf; else if (member->handle < handle) rb = rb->rb_right; else rb = rb->rb_left; } return NULL; } static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) { struct rb_node *rb; rb = prime_fpriv->dmabufs.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); if (member->dma_buf == dma_buf) { *handle = member->handle; return 0; } else if (member->dma_buf < dma_buf) { rb = rb->rb_right; } else { rb = rb->rb_left; } } return -ENOENT; } void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, uint32_t handle) { struct rb_node *rb; mutex_lock(&prime_fpriv->lock); rb = prime_fpriv->handles.rb_node; while (rb) { struct drm_prime_member *member; member = rb_entry(rb, struct drm_prime_member, handle_rb); if (member->handle == handle) { rb_erase(&member->handle_rb, &prime_fpriv->handles); rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs); dma_buf_put(member->dma_buf); kfree(member); break; } else if (member->handle < handle) { rb = rb->rb_right; } else { rb = rb->rb_left; } } mutex_unlock(&prime_fpriv->lock); } void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv) { mutex_init(&prime_fpriv->lock); prime_fpriv->dmabufs = RB_ROOT; prime_fpriv->handles = RB_ROOT; } void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) { /* by now drm_gem_release should've made sure the list is empty */ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs)); } /** * drm_gem_dmabuf_export - &dma_buf export implementation for GEM * @dev: parent device for the exported dmabuf * @exp_info: the export information used by dma_buf_export() * * This wraps dma_buf_export() for use by generic GEM drivers that are using * drm_gem_dmabuf_release(). In addition to calling dma_buf_export(), we take * a reference to the &drm_device and the exported &drm_gem_object (stored in * &dma_buf_export_info.priv) which is released by drm_gem_dmabuf_release(). * * Returns the new dmabuf. */ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev, struct dma_buf_export_info *exp_info) { struct drm_gem_object *obj = exp_info->priv; struct dma_buf *dma_buf; dma_buf = dma_buf_export(exp_info); if (IS_ERR(dma_buf)) return dma_buf; drm_dev_get(dev); drm_gem_object_get(obj); dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping; return dma_buf; } EXPORT_SYMBOL(drm_gem_dmabuf_export); /** * drm_gem_dmabuf_release - &dma_buf release implementation for GEM * @dma_buf: buffer to be released * * Generic release function for dma_bufs exported as PRIME buffers. GEM drivers * must use this in their &dma_buf_ops structure as the release callback. * drm_gem_dmabuf_release() should be used in conjunction with * drm_gem_dmabuf_export(). */ void drm_gem_dmabuf_release(struct dma_buf *dma_buf) { struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; /* drop the reference on the export fd holds */ drm_gem_object_put(obj); drm_dev_put(dev); } EXPORT_SYMBOL(drm_gem_dmabuf_release); /** * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers * @dev: drm_device to import into * @file_priv: drm file-private structure * @prime_fd: fd id of the dma-buf which should be imported * @handle: pointer to storage for the handle of the imported buffer object * * This is the PRIME import function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual importing of GEM object from the dma-buf is done through the * &drm_driver.gem_prime_import driver callback. * * Returns 0 on success or a negative error code on failure. */ int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle) { struct dma_buf *dma_buf; struct drm_gem_object *obj; int ret; dma_buf = dma_buf_get(prime_fd); if (IS_ERR(dma_buf)) return PTR_ERR(dma_buf); mutex_lock(&file_priv->prime.lock); ret = drm_prime_lookup_buf_handle(&file_priv->prime, dma_buf, handle); if (ret == 0) goto out_put; /* never seen this one, need to import */ mutex_lock(&dev->object_name_lock); if (dev->driver->gem_prime_import) obj = dev->driver->gem_prime_import(dev, dma_buf); else obj = drm_gem_prime_import(dev, dma_buf); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } if (obj->dma_buf) { WARN_ON(obj->dma_buf != dma_buf); } else { obj->dma_buf = dma_buf; get_dma_buf(dma_buf); } /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ ret = drm_gem_handle_create_tail(file_priv, obj, handle); drm_gem_object_put(obj); if (ret) goto out_put; ret = drm_prime_add_buf_handle(&file_priv->prime, dma_buf, *handle); mutex_unlock(&file_priv->prime.lock); if (ret) goto fail; dma_buf_put(dma_buf); return 0; fail: /* hmm, if driver attached, we are relying on the free-object path * to detach.. which seems ok.. */ drm_gem_handle_delete(file_priv, *handle); dma_buf_put(dma_buf); return ret; out_unlock: mutex_unlock(&dev->object_name_lock); out_put: mutex_unlock(&file_priv->prime.lock); dma_buf_put(dma_buf); return ret; } EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; if (dev->driver->prime_fd_to_handle) { return dev->driver->prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } return drm_gem_prime_fd_to_handle(dev, file_priv, args->fd, &args->handle); } static struct dma_buf *export_and_register_object(struct drm_device *dev, struct drm_gem_object *obj, uint32_t flags) { struct dma_buf *dmabuf; /* prevent races with concurrent gem_close. */ if (obj->handle_count == 0) { dmabuf = ERR_PTR(-ENOENT); return dmabuf; } if (obj->funcs && obj->funcs->export) dmabuf = obj->funcs->export(obj, flags); else dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ return dmabuf; } /* * Note that callers do not need to clean up the export cache * since the check for obj->handle_count guarantees that someone * will clean it up. */ obj->dma_buf = dmabuf; get_dma_buf(obj->dma_buf); return dmabuf; } /** * drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers * @dev: dev to export the buffer from * @file_priv: drm file-private structure * @handle: buffer handle to export * @flags: flags like DRM_CLOEXEC * @prime_fd: pointer to storage for the fd id of the create dma-buf * * This is the PRIME export function which must be used mandatorily by GEM * drivers to ensure correct lifetime management of the underlying GEM object. * The actual exporting from GEM object to a dma-buf is done through the * &drm_gem_object_funcs.export callback. */ int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) { struct drm_gem_object *obj; int ret = 0; struct dma_buf *dmabuf; mutex_lock(&file_priv->prime.lock); obj = drm_gem_object_lookup(file_priv, handle); if (!obj) { ret = -ENOENT; goto out_unlock; } dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle); if (dmabuf) { get_dma_buf(dmabuf); goto out_have_handle; } mutex_lock(&dev->object_name_lock); /* re-export the original imported object */ if (obj->import_attach) { dmabuf = obj->import_attach->dmabuf; get_dma_buf(dmabuf); goto out_have_obj; } if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; goto out_have_obj; } dmabuf = export_and_register_object(dev, obj, flags); if (IS_ERR(dmabuf)) { /* normally the created dma-buf takes ownership of the ref, * but if that fails then drop the ref */ ret = PTR_ERR(dmabuf); mutex_unlock(&dev->object_name_lock); goto out; } out_have_obj: /* * If we've exported this buffer then cheat and add it to the import list * so we get the correct handle back. We must do this under the * protection of dev->object_name_lock to ensure that a racing gem close * ioctl doesn't miss to remove this buffer handle from the cache. */ ret = drm_prime_add_buf_handle(&file_priv->prime, dmabuf, handle); mutex_unlock(&dev->object_name_lock); if (ret) goto fail_put_dmabuf; out_have_handle: ret = dma_buf_fd(dmabuf, flags); /* * We must _not_ remove the buffer from the handle cache since the newly * created dma buf is already linked in the global obj->dma_buf pointer, * and that is invariant as long as a userspace gem handle exists. * Closing the handle will clean out the cache anyway, so we don't leak. */ if (ret < 0) { goto fail_put_dmabuf; } else { *prime_fd = ret; ret = 0; } goto out; fail_put_dmabuf: dma_buf_put(dmabuf); out: drm_gem_object_put(obj); out_unlock: mutex_unlock(&file_priv->prime.lock); return ret; } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; /* check flags are valid */ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL; if (dev->driver->prime_handle_to_fd) { return dev->driver->prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } return drm_gem_prime_handle_to_fd(dev, file_priv, args->handle, args->flags, &args->fd); } /** * DOC: PRIME Helpers * * Drivers can implement &drm_gem_object_funcs.export and * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions * implement dma-buf support in terms of some lower-level helpers, which are * again exported for drivers to use individually: * * Exporting buffers * ~~~~~~~~~~~~~~~~~ * * Optional pinning of buffers is handled at dma-buf attach and detach time in * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on * &drm_gem_object_funcs.get_sg_table. If &drm_gem_object_funcs.get_sg_table is * unimplemented, exports into another device are rejected. * * For kernel-internal access there's drm_gem_dmabuf_vmap() and * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by * drm_gem_dmabuf_mmap(). * * Note that these export helpers can only be used if the underlying backing * storage is fully coherent and either permanently pinned, or it is safe to pin * it indefinitely. * * FIXME: The underlying helper functions are named rather inconsistently. * * Importing buffers * ~~~~~~~~~~~~~~~~~ * * Importing dma-bufs using drm_gem_prime_import() relies on * &drm_driver.gem_prime_import_sg_table. * * Note that similarly to the export helpers this permanently pins the * underlying backing storage. Which is ok for scanout, but is not the best * option for sharing lots of buffers for rendering. */ /** * drm_gem_map_attach - dma_buf attach implementation for GEM * @dma_buf: buffer to attach device to * @attach: buffer attachment data * * Calls &drm_gem_object_funcs.pin for device specific handling. This can be * used as the &dma_buf_ops.attach callback. Must be used together with * drm_gem_map_detach(). * * Returns 0 on success, negative error code on failure. */ int drm_gem_map_attach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; /* * drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers * that implement their own ->map_dma_buf() do not. */ if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf && !obj->funcs->get_sg_table) return -ENOSYS; return drm_gem_pin(obj); } EXPORT_SYMBOL(drm_gem_map_attach); /** * drm_gem_map_detach - dma_buf detach implementation for GEM * @dma_buf: buffer to detach from * @attach: attachment to be detached * * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the * &dma_buf_ops.detach callback. */ void drm_gem_map_detach(struct dma_buf *dma_buf, struct dma_buf_attachment *attach) { struct drm_gem_object *obj = dma_buf->priv; drm_gem_unpin(obj); } EXPORT_SYMBOL(drm_gem_map_detach); /** * drm_gem_map_dma_buf - map_dma_buf implementation for GEM * @attach: attachment whose scatterlist is to be returned * @dir: direction of DMA transfer * * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together * with drm_gem_unmap_dma_buf(). * * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR * on error. May return -EINTR if it is interrupted by a signal. */ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct drm_gem_object *obj = attach->dmabuf->priv; struct sg_table *sgt; int ret; if (WARN_ON(dir == DMA_NONE)) return ERR_PTR(-EINVAL); if (WARN_ON(!obj->funcs->get_sg_table)) return ERR_PTR(-ENOSYS); sgt = obj->funcs->get_sg_table(obj); if (IS_ERR(sgt)) return sgt; ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); if (ret) { sg_free_table(sgt); kfree(sgt); sgt = ERR_PTR(ret); } return sgt; } EXPORT_SYMBOL(drm_gem_map_dma_buf); /** * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM * @attach: attachment to unmap buffer from * @sgt: scatterlist info of the buffer to unmap * @dir: direction of DMA transfer * * This can be used as the &dma_buf_ops.unmap_dma_buf callback. */ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { if (!sgt) return; dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC); sg_free_table(sgt); kfree(sgt); } EXPORT_SYMBOL(drm_gem_unmap_dma_buf); /** * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM * @dma_buf: buffer to be mapped * @map: the virtual address of the buffer * * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling. * The kernel virtual address is returned in map. * * Returns 0 on success or a negative errno code otherwise. */ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_vmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); /** * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM * @dma_buf: buffer to be unmapped * @map: the virtual address of the buffer * * Releases a kernel virtual mapping. This can be used as the * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling. */ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; drm_gem_vunmap(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); /** * drm_gem_prime_mmap - PRIME mmap function for GEM drivers * @obj: GEM object * @vma: Virtual address range * * This function sets up a userspace mapping for PRIME exported buffers using * the same codepath that is used for regular GEM buffer mapping on the DRM fd. * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is * called to set up the mapping. */ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_file *priv; struct file *fil; int ret; /* Add the fake offset */ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); if (obj->funcs && obj->funcs->mmap) { vma->vm_ops = obj->funcs->vm_ops; drm_gem_object_get(obj); ret = obj->funcs->mmap(obj, vma); if (ret) { drm_gem_object_put(obj); return ret; } vma->vm_private_data = obj; return 0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); fil = kzalloc(sizeof(*fil), GFP_KERNEL); if (!priv || !fil) { ret = -ENOMEM; goto out; } /* Used by drm_gem_mmap() to lookup the GEM object */ priv->minor = obj->dev->primary; fil->private_data = priv; ret = drm_vma_node_allow(&obj->vma_node, priv); if (ret) goto out; ret = obj->dev->driver->fops->mmap(fil, vma); drm_vma_node_revoke(&obj->vma_node, priv); out: kfree(priv); kfree(fil); return ret; } EXPORT_SYMBOL(drm_gem_prime_mmap); /** * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM * @dma_buf: buffer to be mapped * @vma: virtual address range * * Provides memory mapping for the buffer. This can be used as the * &dma_buf_ops.mmap callback. It just forwards to drm_gem_prime_mmap(). * * Returns 0 on success or a negative error code on failure. */ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { struct drm_gem_object *obj = dma_buf->priv; return drm_gem_prime_mmap(obj, vma); } EXPORT_SYMBOL(drm_gem_dmabuf_mmap); static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { .cache_sgt_mapping = true, .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, .unmap_dma_buf = drm_gem_unmap_dma_buf, .release = drm_gem_dmabuf_release, .mmap = drm_gem_dmabuf_mmap, .vmap = drm_gem_dmabuf_vmap, .vunmap = drm_gem_dmabuf_vunmap, }; /** * drm_prime_pages_to_sg - converts a page array into an sg list * @dev: DRM device * @pages: pointer to the array of page pointers to convert * @nr_pages: length of the page vector * * This helper creates an sg table object from a set of pages * the driver is responsible for mapping the pages into the * importers address space for use with dma_buf itself. * * This is useful for implementing &drm_gem_object_funcs.get_sg_table. */ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { struct sg_table *sg; size_t max_segment = 0; int err; sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!sg) return ERR_PTR(-ENOMEM); if (dev) max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, (unsigned long)nr_pages << PAGE_SHIFT, max_segment, GFP_KERNEL); if (err) { kfree(sg); sg = ERR_PTR(err); } return sg; } EXPORT_SYMBOL(drm_prime_pages_to_sg); /** * drm_prime_get_contiguous_size - returns the contiguous size of the buffer * @sgt: sg_table describing the buffer to check * * This helper calculates the contiguous size in the DMA address space * of the buffer described by the provided sg_table. * * This is useful for implementing * &drm_gem_object_funcs.gem_prime_import_sg_table. */ unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt) { dma_addr_t expected = sg_dma_address(sgt->sgl); struct scatterlist *sg; unsigned long size = 0; int i; for_each_sgtable_dma_sg(sgt, sg, i) { unsigned int len = sg_dma_len(sg); if (!len) break; if (sg_dma_address(sg) != expected) break; expected += len; size += len; } return size; } EXPORT_SYMBOL(drm_prime_get_contiguous_size); /** * drm_gem_prime_export - helper library implementation of the export callback * @obj: GEM object to export * @flags: flags like DRM_CLOEXEC and DRM_RDWR * * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers * using the PRIME helpers. It is used as the default in * drm_gem_prime_handle_to_fd(). */ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj, int flags) { struct drm_device *dev = obj->dev; struct dma_buf_export_info exp_info = { .exp_name = KBUILD_MODNAME, /* white lie for debug */ .owner = dev->driver->fops->owner, .ops = &drm_gem_prime_dmabuf_ops, .size = obj->size, .flags = flags, .priv = obj, .resv = obj->resv, }; return drm_gem_dmabuf_export(dev, &exp_info); } EXPORT_SYMBOL(drm_gem_prime_export); /** * drm_gem_prime_import_dev - core implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * @attach_dev: struct device to dma_buf attach * * This is the core of drm_gem_prime_import(). It's designed to be called by * drivers who want to use a different device structure than &drm_device.dev for * attaching via dma_buf. This function calls * &drm_driver.gem_prime_import_sg_table internally. * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev, struct dma_buf *dma_buf, struct device *attach_dev) { struct dma_buf_attachment *attach; struct sg_table *sgt; struct drm_gem_object *obj; int ret; if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { obj = dma_buf->priv; if (obj->dev == dev) { /* * Importing dmabuf exported from our own gem increases * refcount on gem itself instead of f_count of dmabuf. */ drm_gem_object_get(obj); return obj; } } if (!dev->driver->gem_prime_import_sg_table) return ERR_PTR(-EINVAL); attach = dma_buf_attach(dma_buf, attach_dev); if (IS_ERR(attach)) return ERR_CAST(attach); get_dma_buf(dma_buf); sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); goto fail_detach; } obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto fail_unmap; } obj->import_attach = attach; obj->resv = dma_buf->resv; return obj; fail_unmap: dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); return ERR_PTR(ret); } EXPORT_SYMBOL(drm_gem_prime_import_dev); /** * drm_gem_prime_import - helper library implementation of the import callback * @dev: drm_device to import into * @dma_buf: dma-buf object to import * * This is the implementation of the gem_prime_import functions for GEM drivers * using the PRIME helpers. Drivers can use this as their * &drm_driver.gem_prime_import implementation. It is used as the default * implementation in drm_gem_prime_fd_to_handle(). * * Drivers must arrange to call drm_prime_gem_destroy() from their * &drm_gem_object_funcs.free hook when using this function. */ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); } EXPORT_SYMBOL(drm_gem_prime_import); /** * drm_prime_sg_to_page_array - convert an sg table into a page array * @sgt: scatter-gather table to convert * @pages: array of page pointers to store the pages in * @max_entries: size of the passed-in array * * Exports an sg table into an array of pages. * * This function is deprecated and strongly discouraged to be used. * The page array is only useful for page faults and those can corrupt fields * in the struct page if they are not handled by the exporting driver. */ int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, struct page **pages, int max_entries) { struct sg_page_iter page_iter; struct page **p = pages; for_each_sgtable_page(sgt, &page_iter, 0) { if (WARN_ON(p - pages >= max_entries)) return -1; *p++ = sg_page_iter_page(&page_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_page_array); /** * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array * @sgt: scatter-gather table to convert * @addrs: array to store the dma bus address of each page * @max_entries: size of both the passed-in arrays * * Exports an sg table into an array of addresses. * * Drivers should use this in their &drm_driver.gem_prime_import_sg_table * implementation. */ int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, int max_entries) { struct sg_dma_page_iter dma_iter; dma_addr_t *a = addrs; for_each_sgtable_dma_page(sgt, &dma_iter, 0) { if (WARN_ON(a - addrs >= max_entries)) return -1; *a++ = sg_page_iter_dma_address(&dma_iter); } return 0; } EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); /** * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object * @obj: GEM object which was created from a dma-buf * @sg: the sg-table which was pinned at import time * * This is the cleanup functions which GEM drivers need to call when they use * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs. */ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg) { struct dma_buf_attachment *attach; struct dma_buf *dma_buf; attach = obj->import_attach; if (sg) dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL); dma_buf = attach->dmabuf; dma_buf_detach(attach->dmabuf, attach); /* remove the reference */ dma_buf_put(dma_buf); } EXPORT_SYMBOL(drm_prime_gem_destroy);
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 // SPDX-License-Identifier: GPL-2.0-or-later /* * cx2341x - generic code for cx23415/6/8 based devices * * Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/tuner.h> #include <media/drv-intf/cx2341x.h> #include <media/v4l2-common.h> MODULE_DESCRIPTION("cx23415/6/8 driver"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /********************** COMMON CODE *********************/ /* definitions for audio properties bits 29-28 */ #define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0 #define CX2341X_AUDIO_ENCODING_METHOD_AC3 1 #define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2 static const char *cx2341x_get_name(u32 id) { switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return "Spatial Filter Mode"; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: return "Spatial Filter"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: return "Spatial Luma Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: return "Spatial Chroma Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return "Temporal Filter Mode"; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: return "Temporal Filter"; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return "Median Filter Type"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: return "Median Luma Filter Maximum"; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: return "Median Luma Filter Minimum"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: return "Median Chroma Filter Maximum"; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: return "Median Chroma Filter Minimum"; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return "Insert Navigation Packets"; } return NULL; } static const char **cx2341x_get_menu(u32 id) { static const char *cx2341x_video_spatial_filter_mode_menu[] = { "Manual", "Auto", NULL }; static const char *cx2341x_video_luma_spatial_filter_type_menu[] = { "Off", "1D Horizontal", "1D Vertical", "2D H/V Separable", "2D Symmetric non-separable", NULL }; static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = { "Off", "1D Horizontal", NULL }; static const char *cx2341x_video_temporal_filter_mode_menu[] = { "Manual", "Auto", NULL }; static const char *cx2341x_video_median_filter_type_menu[] = { "Off", "Horizontal", "Vertical", "Horizontal/Vertical", "Diagonal", NULL }; switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return cx2341x_video_spatial_filter_mode_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: return cx2341x_video_luma_spatial_filter_type_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: return cx2341x_video_chroma_spatial_filter_type_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return cx2341x_video_temporal_filter_mode_menu; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_video_median_filter_type_menu; } return NULL; } static void cx2341x_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type, s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags) { *name = cx2341x_get_name(id); *flags = 0; switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: *type = V4L2_CTRL_TYPE_MENU; *min = 0; *step = 0; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: *type = V4L2_CTRL_TYPE_BOOLEAN; *min = 0; *max = *step = 1; break; default: *type = V4L2_CTRL_TYPE_INTEGER; break; } switch (id) { case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: *flags |= V4L2_CTRL_FLAG_UPDATE; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: *flags |= V4L2_CTRL_FLAG_SLIDER; break; case V4L2_CID_MPEG_VIDEO_ENCODING: *flags |= V4L2_CTRL_FLAG_READ_ONLY; break; } } /********************** OLD CODE *********************/ /* Must be sorted from low to high control ID! */ const u32 cx2341x_mpeg_ctrls[] = { V4L2_CID_CODEC_CLASS, V4L2_CID_MPEG_STREAM_TYPE, V4L2_CID_MPEG_STREAM_VBI_FMT, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_CID_MPEG_AUDIO_L2_BITRATE, V4L2_CID_MPEG_AUDIO_MODE, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, V4L2_CID_MPEG_AUDIO_EMPHASIS, V4L2_CID_MPEG_AUDIO_CRC, V4L2_CID_MPEG_AUDIO_MUTE, V4L2_CID_MPEG_AUDIO_AC3_BITRATE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_B_FRAMES, V4L2_CID_MPEG_VIDEO_GOP_SIZE, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_CID_MPEG_VIDEO_BITRATE, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, V4L2_CID_MPEG_VIDEO_MUTE, V4L2_CID_MPEG_VIDEO_MUTE_YUV, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, 0 }; EXPORT_SYMBOL(cx2341x_mpeg_ctrls); static const struct cx2341x_mpeg_params default_params = { /* misc */ .capabilities = 0, .port = CX2341X_PORT_MEMORY, .width = 720, .height = 480, .is_50hz = 0, /* stream */ .stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS, .stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE, .stream_insert_nav_packets = 0, /* audio */ .audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000, .audio_encoding = V4L2_MPEG_AUDIO_ENCODING_LAYER_2, .audio_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_224K, .audio_ac3_bitrate = V4L2_MPEG_AUDIO_AC3_BITRATE_224K, .audio_mode = V4L2_MPEG_AUDIO_MODE_STEREO, .audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, .audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE, .audio_crc = V4L2_MPEG_AUDIO_CRC_NONE, .audio_mute = 0, /* video */ .video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2, .video_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3, .video_b_frames = 2, .video_gop_size = 12, .video_gop_closure = 1, .video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, .video_bitrate = 6000000, .video_bitrate_peak = 8000000, .video_temporal_decimation = 0, .video_mute = 0, .video_mute_yuv = 0x008080, /* YCbCr value for black */ /* encoding filters */ .video_spatial_filter_mode = V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, .video_spatial_filter = 0, .video_luma_spatial_filter_type = V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR, .video_chroma_spatial_filter_type = V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, .video_temporal_filter_mode = V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, .video_temporal_filter = 8, .video_median_filter_type = V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, .video_luma_median_filter_top = 255, .video_luma_median_filter_bottom = 0, .video_chroma_median_filter_top = 255, .video_chroma_median_filter_bottom = 0, }; /* Map the control ID to the correct field in the cx2341x_mpeg_params struct. Return -EINVAL if the ID is unknown, else return 0. */ static int cx2341x_get_ctrl(const struct cx2341x_mpeg_params *params, struct v4l2_ext_control *ctrl) { switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: ctrl->value = params->audio_sampling_freq; break; case V4L2_CID_MPEG_AUDIO_ENCODING: ctrl->value = params->audio_encoding; break; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: ctrl->value = params->audio_l2_bitrate; break; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: ctrl->value = params->audio_ac3_bitrate; break; case V4L2_CID_MPEG_AUDIO_MODE: ctrl->value = params->audio_mode; break; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: ctrl->value = params->audio_mode_extension; break; case V4L2_CID_MPEG_AUDIO_EMPHASIS: ctrl->value = params->audio_emphasis; break; case V4L2_CID_MPEG_AUDIO_CRC: ctrl->value = params->audio_crc; break; case V4L2_CID_MPEG_AUDIO_MUTE: ctrl->value = params->audio_mute; break; case V4L2_CID_MPEG_VIDEO_ENCODING: ctrl->value = params->video_encoding; break; case V4L2_CID_MPEG_VIDEO_ASPECT: ctrl->value = params->video_aspect; break; case V4L2_CID_MPEG_VIDEO_B_FRAMES: ctrl->value = params->video_b_frames; break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctrl->value = params->video_gop_size; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: ctrl->value = params->video_gop_closure; break; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: ctrl->value = params->video_bitrate_mode; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctrl->value = params->video_bitrate; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: ctrl->value = params->video_bitrate_peak; break; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: ctrl->value = params->video_temporal_decimation; break; case V4L2_CID_MPEG_VIDEO_MUTE: ctrl->value = params->video_mute; break; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: ctrl->value = params->video_mute_yuv; break; case V4L2_CID_MPEG_STREAM_TYPE: ctrl->value = params->stream_type; break; case V4L2_CID_MPEG_STREAM_VBI_FMT: ctrl->value = params->stream_vbi_fmt; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: ctrl->value = params->video_spatial_filter_mode; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: ctrl->value = params->video_spatial_filter; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: ctrl->value = params->video_luma_spatial_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: ctrl->value = params->video_chroma_spatial_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: ctrl->value = params->video_temporal_filter_mode; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: ctrl->value = params->video_temporal_filter; break; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: ctrl->value = params->video_median_filter_type; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: ctrl->value = params->video_luma_median_filter_top; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: ctrl->value = params->video_luma_median_filter_bottom; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: ctrl->value = params->video_chroma_median_filter_top; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: ctrl->value = params->video_chroma_median_filter_bottom; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: ctrl->value = params->stream_insert_nav_packets; break; default: return -EINVAL; } return 0; } /* Map the control ID to the correct field in the cx2341x_mpeg_params struct. Return -EINVAL if the ID is unknown, else return 0. */ static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, int busy, struct v4l2_ext_control *ctrl) { switch (ctrl->id) { case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: if (busy) return -EBUSY; params->audio_sampling_freq = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_ENCODING: if (busy) return -EBUSY; if (params->capabilities & CX2341X_CAP_HAS_AC3) if (ctrl->value != V4L2_MPEG_AUDIO_ENCODING_LAYER_2 && ctrl->value != V4L2_MPEG_AUDIO_ENCODING_AC3) return -ERANGE; params->audio_encoding = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_L2_BITRATE: if (busy) return -EBUSY; params->audio_l2_bitrate = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: if (busy) return -EBUSY; if (!(params->capabilities & CX2341X_CAP_HAS_AC3)) return -EINVAL; params->audio_ac3_bitrate = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MODE: params->audio_mode = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: params->audio_mode_extension = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_EMPHASIS: params->audio_emphasis = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_CRC: params->audio_crc = ctrl->value; break; case V4L2_CID_MPEG_AUDIO_MUTE: params->audio_mute = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_ASPECT: params->video_aspect = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_B_FRAMES: { int b = ctrl->value + 1; int gop = params->video_gop_size; params->video_b_frames = ctrl->value; params->video_gop_size = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (params->video_gop_size > 34) params->video_gop_size -= b; break; } case V4L2_CID_MPEG_VIDEO_GOP_SIZE: { int b = params->video_b_frames + 1; int gop = ctrl->value; params->video_gop_size = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (params->video_gop_size > 34) params->video_gop_size -= b; ctrl->value = params->video_gop_size; break; } case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: params->video_gop_closure = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: if (busy) return -EBUSY; /* MPEG-1 only allows CBR */ if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1 && ctrl->value != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) return -EINVAL; params->video_bitrate_mode = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE: if (busy) return -EBUSY; params->video_bitrate = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: if (busy) return -EBUSY; params->video_bitrate_peak = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: params->video_temporal_decimation = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_MUTE: params->video_mute = (ctrl->value != 0); break; case V4L2_CID_MPEG_VIDEO_MUTE_YUV: params->video_mute_yuv = ctrl->value; break; case V4L2_CID_MPEG_STREAM_TYPE: if (busy) return -EBUSY; params->stream_type = ctrl->value; params->video_encoding = (params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : V4L2_MPEG_VIDEO_ENCODING_MPEG_2; if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) /* MPEG-1 implies CBR */ params->video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; break; case V4L2_CID_MPEG_STREAM_VBI_FMT: params->stream_vbi_fmt = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: params->video_spatial_filter_mode = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: params->video_spatial_filter = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: params->video_luma_spatial_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: params->video_chroma_spatial_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: params->video_temporal_filter_mode = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: params->video_temporal_filter = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: params->video_median_filter_type = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: params->video_luma_median_filter_top = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: params->video_luma_median_filter_bottom = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: params->video_chroma_median_filter_top = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: params->video_chroma_median_filter_bottom = ctrl->value; break; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: params->stream_insert_nav_packets = ctrl->value; break; default: return -EINVAL; } return 0; } static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def) { const char *name; switch (qctrl->id) { /* MPEG controls */ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: cx2341x_ctrl_fill(qctrl->id, &name, &qctrl->type, &min, &max, &step, &def, &qctrl->flags); qctrl->minimum = min; qctrl->maximum = max; qctrl->step = step; qctrl->default_value = def; qctrl->reserved[0] = qctrl->reserved[1] = 0; strscpy(qctrl->name, name, sizeof(qctrl->name)); return 0; default: return v4l2_ctrl_query_fill(qctrl, min, max, step, def); } } int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params, struct v4l2_queryctrl *qctrl) { int err; switch (qctrl->id) { case V4L2_CID_CODEC_CLASS: return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0); case V4L2_CID_MPEG_STREAM_TYPE: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_TYPE_MPEG2_PS, V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, 1, V4L2_MPEG_STREAM_TYPE_MPEG2_PS); case V4L2_CID_MPEG_STREAM_VBI_FMT: if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI) return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_VBI_FMT_NONE, V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1, V4L2_MPEG_STREAM_VBI_FMT_NONE); return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_STREAM_VBI_FMT_NONE, V4L2_MPEG_STREAM_VBI_FMT_NONE, 1, default_params.stream_vbi_fmt); case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100, V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 1, V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); case V4L2_CID_MPEG_AUDIO_ENCODING: if (params->capabilities & CX2341X_CAP_HAS_AC3) { /* * The state of L2 & AC3 bitrate controls can change * when this control changes, but v4l2_ctrl_query_fill() * already sets V4L2_CTRL_FLAG_UPDATE for * V4L2_CID_MPEG_AUDIO_ENCODING, so we don't here. */ return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, V4L2_MPEG_AUDIO_ENCODING_AC3, 1, default_params.audio_encoding); } return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, V4L2_MPEG_AUDIO_ENCODING_LAYER_2, 1, default_params.audio_encoding); case V4L2_CID_MPEG_AUDIO_L2_BITRATE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_L2_BITRATE_192K, V4L2_MPEG_AUDIO_L2_BITRATE_384K, 1, default_params.audio_l2_bitrate); if (err) return err; if (params->capabilities & CX2341X_CAP_HAS_AC3 && params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_LAYER_2) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_AUDIO_MODE: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_MODE_STEREO, V4L2_MPEG_AUDIO_MODE_MONO, 1, V4L2_MPEG_AUDIO_MODE_STEREO); case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 1, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); if (err == 0 && params->audio_mode != V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_AUDIO_EMPHASIS: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_EMPHASIS_NONE, V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 1, V4L2_MPEG_AUDIO_EMPHASIS_NONE); case V4L2_CID_MPEG_AUDIO_CRC: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_CRC_NONE, V4L2_MPEG_AUDIO_CRC_CRC16, 1, V4L2_MPEG_AUDIO_CRC_NONE); case V4L2_CID_MPEG_AUDIO_MUTE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_AUDIO_AC3_BITRATE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_AUDIO_AC3_BITRATE_48K, V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 1, default_params.audio_ac3_bitrate); if (err) return err; if (params->capabilities & CX2341X_CAP_HAS_AC3) { if (params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_AC3) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; } else qctrl->flags |= V4L2_CTRL_FLAG_DISABLED; return 0; case V4L2_CID_MPEG_VIDEO_ENCODING: /* this setting is read-only for the cx2341x since the V4L2_CID_MPEG_STREAM_TYPE really determines the MPEG-1/2 setting */ err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); if (err == 0) qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; return err; case V4L2_CID_MPEG_VIDEO_ASPECT: return v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_ASPECT_1x1, V4L2_MPEG_VIDEO_ASPECT_221x100, 1, V4L2_MPEG_VIDEO_ASPECT_4x3); case V4L2_CID_MPEG_VIDEO_B_FRAMES: return v4l2_ctrl_query_fill(qctrl, 0, 33, 1, 2); case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return v4l2_ctrl_query_fill(qctrl, 1, 34, 1, params->is_50hz ? 12 : 15); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1); case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: err = v4l2_ctrl_query_fill(qctrl, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 1, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); if (err == 0 && params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_VIDEO_BITRATE: return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 6000000); case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK: err = v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000); if (err == 0 && params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return err; case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0); case V4L2_CID_MPEG_VIDEO_MUTE: return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */ return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080); /* CX23415/6 specific */ case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 1, default_params.video_spatial_filter_mode); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: cx2341x_ctrl_query_fill(qctrl, 0, 15, 1, default_params.video_spatial_filter); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, 1, default_params.video_luma_spatial_filter_type); if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, 1, default_params.video_chroma_spatial_filter_type); if (params->video_spatial_filter_mode == V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 1, default_params.video_temporal_filter_mode); case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER: cx2341x_ctrl_query_fill(qctrl, 0, 31, 1, default_params.video_temporal_filter); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_temporal_filter_mode == V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_ctrl_query_fill(qctrl, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 1, default_params.video_median_filter_type); case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_luma_median_filter_top); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_luma_median_filter_bottom); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_chroma_median_filter_top); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM: cx2341x_ctrl_query_fill(qctrl, 0, 255, 1, default_params.video_chroma_median_filter_bottom); qctrl->flags |= V4L2_CTRL_FLAG_SLIDER; if (params->video_median_filter_type == V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF) qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; return 0; case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1, default_params.stream_insert_nav_packets); default: return -EINVAL; } } EXPORT_SYMBOL(cx2341x_ctrl_query); const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id) { static const char * const mpeg_stream_type_without_ts[] = { "MPEG-2 Program Stream", "", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char *mpeg_stream_type_with_ts[] = { "MPEG-2 Program Stream", "MPEG-2 Transport Stream", "MPEG-1 System Stream", "MPEG-2 DVD-compatible Stream", "MPEG-1 VCD-compatible Stream", "MPEG-2 SVCD-compatible Stream", NULL }; static const char *mpeg_audio_encoding_l2_ac3[] = { "", "MPEG-1/2 Layer II", "", "", "AC-3", NULL }; switch (id) { case V4L2_CID_MPEG_STREAM_TYPE: return (p->capabilities & CX2341X_CAP_HAS_TS) ? mpeg_stream_type_with_ts : mpeg_stream_type_without_ts; case V4L2_CID_MPEG_AUDIO_ENCODING: return (p->capabilities & CX2341X_CAP_HAS_AC3) ? mpeg_audio_encoding_l2_ac3 : v4l2_ctrl_get_menu(id); case V4L2_CID_MPEG_AUDIO_L1_BITRATE: case V4L2_CID_MPEG_AUDIO_L3_BITRATE: return NULL; case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE: case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE: case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE: return cx2341x_get_menu(id); default: return v4l2_ctrl_get_menu(id); } } EXPORT_SYMBOL(cx2341x_ctrl_get_menu); static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params) { params->audio_properties = (params->audio_sampling_freq << 0) | (params->audio_mode << 8) | (params->audio_mode_extension << 10) | (((params->audio_emphasis == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) ? 3 : params->audio_emphasis) << 12) | (params->audio_crc << 14); if ((params->capabilities & CX2341X_CAP_HAS_AC3) && params->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3) { params->audio_properties |= /* Not sure if this MPEG Layer II setting is required */ ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | (params->audio_ac3_bitrate << 4) | (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); } else { /* Assuming MPEG Layer II */ params->audio_properties |= ((3 - params->audio_encoding) << 2) | ((1 + params->audio_l2_bitrate) << 4); } } /* Check for correctness of the ctrl's value based on the data from struct v4l2_queryctrl and the available menu items. Note that menu_items may be NULL, in that case it is ignored. */ static int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl, const char * const *menu_items) { if (qctrl->flags & V4L2_CTRL_FLAG_DISABLED) return -EINVAL; if (qctrl->flags & V4L2_CTRL_FLAG_GRABBED) return -EBUSY; if (qctrl->type == V4L2_CTRL_TYPE_STRING) return 0; if (qctrl->type == V4L2_CTRL_TYPE_BUTTON || qctrl->type == V4L2_CTRL_TYPE_INTEGER64 || qctrl->type == V4L2_CTRL_TYPE_CTRL_CLASS) return 0; if (ctrl->value < qctrl->minimum || ctrl->value > qctrl->maximum) return -ERANGE; if (qctrl->type == V4L2_CTRL_TYPE_MENU && menu_items != NULL) { if (menu_items[ctrl->value] == NULL || menu_items[ctrl->value][0] == '\0') return -EINVAL; } if (qctrl->type == V4L2_CTRL_TYPE_BITMASK && (ctrl->value & ~qctrl->maximum)) return -ERANGE; return 0; } int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy, struct v4l2_ext_controls *ctrls, unsigned int cmd) { int err = 0; int i; if (cmd == VIDIOC_G_EXT_CTRLS) { for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; err = cx2341x_get_ctrl(params, ctrl); if (err) { ctrls->error_idx = i; break; } } return err; } for (i = 0; i < ctrls->count; i++) { struct v4l2_ext_control *ctrl = ctrls->controls + i; struct v4l2_queryctrl qctrl; const char * const *menu_items = NULL; qctrl.id = ctrl->id; err = cx2341x_ctrl_query(params, &qctrl); if (err) break; if (qctrl.type == V4L2_CTRL_TYPE_MENU) menu_items = cx2341x_ctrl_get_menu(params, qctrl.id); err = v4l2_ctrl_check(ctrl, &qctrl, menu_items); if (err) break; err = cx2341x_set_ctrl(params, busy, ctrl); if (err) break; } if (err == 0 && params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && params->video_bitrate_peak < params->video_bitrate) { err = -ERANGE; ctrls->error_idx = ctrls->count; } if (err) ctrls->error_idx = i; else cx2341x_calc_audio_properties(params); return err; } EXPORT_SYMBOL(cx2341x_ext_ctrls); void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p) { *p = default_params; cx2341x_calc_audio_properties(p); } EXPORT_SYMBOL(cx2341x_fill_defaults); static int cx2341x_api(void *priv, cx2341x_mbox_func func, u32 cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i; va_start(vargs, args); for (i = 0; i < args; i++) data[i] = va_arg(vargs, int); va_end(vargs); return func(priv, cmd, args, 0, data); } #define CMP_FIELD(__old, __new, __field) (__old->__field != __new->__field) int cx2341x_update(void *priv, cx2341x_mbox_func func, const struct cx2341x_mpeg_params *old, const struct cx2341x_mpeg_params *new) { static int mpeg_stream_type[] = { 0, /* MPEG-2 PS */ 1, /* MPEG-2 TS */ 2, /* MPEG-1 SS */ 14, /* DVD */ 11, /* VCD */ 12, /* SVCD */ }; int err; cx2341x_api(priv, func, CX2341X_ENC_SET_OUTPUT_PORT, 2, new->port, 0); if (!old || CMP_FIELD(old, new, is_50hz)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_RATE, 1, new->is_50hz); if (err) return err; } if (!old || CMP_FIELD(old, new, width) || CMP_FIELD(old, new, height) || CMP_FIELD(old, new, video_encoding)) { u16 w = new->width; u16 h = new->height; if (new->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { w /= 2; h /= 2; } err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); if (err) return err; } if (!old || CMP_FIELD(old, new, stream_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[new->stream_type]); if (err) return err; } if (!old || CMP_FIELD(old, new, video_aspect)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_ASPECT_RATIO, 1, 1 + new->video_aspect); if (err) return err; } if (!old || CMP_FIELD(old, new, video_b_frames) || CMP_FIELD(old, new, video_gop_size)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_PROPERTIES, 2, new->video_gop_size, new->video_b_frames + 1); if (err) return err; } if (!old || CMP_FIELD(old, new, video_gop_closure)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_CLOSURE, 1, new->video_gop_closure); if (err) return err; } if (!old || CMP_FIELD(old, new, audio_properties)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, new->audio_properties); if (err) return err; } if (!old || CMP_FIELD(old, new, audio_mute)) { err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1, new->audio_mute); if (err) return err; } if (!old || CMP_FIELD(old, new, video_bitrate_mode) || CMP_FIELD(old, new, video_bitrate) || CMP_FIELD(old, new, video_bitrate_peak)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_BIT_RATE, 5, new->video_bitrate_mode, new->video_bitrate, new->video_bitrate_peak / 400, 0, 0); if (err) return err; } if (!old || CMP_FIELD(old, new, video_spatial_filter_mode) || CMP_FIELD(old, new, video_temporal_filter_mode) || CMP_FIELD(old, new, video_median_filter_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_MODE, 2, new->video_spatial_filter_mode | (new->video_temporal_filter_mode << 1), new->video_median_filter_type); if (err) return err; } if (!old || CMP_FIELD(old, new, video_luma_median_filter_bottom) || CMP_FIELD(old, new, video_luma_median_filter_top) || CMP_FIELD(old, new, video_chroma_median_filter_bottom) || CMP_FIELD(old, new, video_chroma_median_filter_top)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_CORING_LEVELS, 4, new->video_luma_median_filter_bottom, new->video_luma_median_filter_top, new->video_chroma_median_filter_bottom, new->video_chroma_median_filter_top); if (err) return err; } if (!old || CMP_FIELD(old, new, video_luma_spatial_filter_type) || CMP_FIELD(old, new, video_chroma_spatial_filter_type)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2, new->video_luma_spatial_filter_type, new->video_chroma_spatial_filter_type); if (err) return err; } if (!old || CMP_FIELD(old, new, video_spatial_filter) || CMP_FIELD(old, new, video_temporal_filter)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2, new->video_spatial_filter, new->video_temporal_filter); if (err) return err; } if (!old || CMP_FIELD(old, new, video_temporal_decimation)) { err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_DROP_RATE, 1, new->video_temporal_decimation); if (err) return err; } if (!old || CMP_FIELD(old, new, video_mute) || (new->video_mute && CMP_FIELD(old, new, video_mute_yuv))) { err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1, new->video_mute | (new->video_mute_yuv << 8)); if (err) return err; } if (!old || CMP_FIELD(old, new, stream_insert_nav_packets)) { err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2, 7, new->stream_insert_nav_packets); if (err) return err; } return 0; } EXPORT_SYMBOL(cx2341x_update); static const char *cx2341x_menu_item(const struct cx2341x_mpeg_params *p, u32 id) { const char * const *menu = cx2341x_ctrl_get_menu(p, id); struct v4l2_ext_control ctrl; if (menu == NULL) goto invalid; ctrl.id = id; if (cx2341x_get_ctrl(p, &ctrl)) goto invalid; while (ctrl.value-- && *menu) menu++; if (*menu == NULL) goto invalid; return *menu; invalid: return "<invalid>"; } void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix) { int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1; /* Stream */ printk(KERN_INFO "%s: Stream: %s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE)); if (p->stream_insert_nav_packets) printk(KERN_CONT " (with navigation packets)"); printk(KERN_CONT "\n"); printk(KERN_INFO "%s: VBI Format: %s\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT)); /* Video */ printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n", prefix, p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1), p->is_50hz ? 25 : 30, (p->video_mute) ? " (muted)" : ""); printk(KERN_INFO "%s: Video: %s, %s, %s, %d", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING), cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT), cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE), p->video_bitrate); if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) printk(KERN_CONT ", Peak %d", p->video_bitrate_peak); printk(KERN_CONT "\n"); printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n", prefix, p->video_gop_size, p->video_b_frames, p->video_gop_closure ? "" : "No "); if (p->video_temporal_decimation) printk(KERN_INFO "%s: Video: Temporal Decimation %d\n", prefix, p->video_temporal_decimation); /* Audio */ printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING), cx2341x_menu_item(p, p->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3 ? V4L2_CID_MPEG_AUDIO_AC3_BITRATE : V4L2_CID_MPEG_AUDIO_L2_BITRATE), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE), p->audio_mute ? " (muted)" : ""); if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO) printk(KERN_CONT ", %s", cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION)); printk(KERN_CONT ", %s, %s\n", cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS), cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC)); /* Encoding filters */ printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE), cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE), cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE), p->video_spatial_filter); printk(KERN_INFO "%s: Temporal Filter: %s, %d\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE), p->video_temporal_filter); printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n", prefix, cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE), p->video_luma_median_filter_bottom, p->video_luma_median_filter_top, p->video_chroma_median_filter_bottom, p->video_chroma_median_filter_top); } EXPORT_SYMBOL(cx2341x_log_status); /********************** NEW CODE *********************/ static inline struct cx2341x_handler *to_cxhdl(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct cx2341x_handler, hdl); } static int cx2341x_hdl_api(struct cx2341x_handler *hdl, u32 cmd, int args, ...) { u32 data[CX2341X_MBOX_MAX_DATA]; va_list vargs; int i; va_start(vargs, args); for (i = 0; i < args; i++) data[i] = va_arg(vargs, int); va_end(vargs); return hdl->func(hdl->priv, cmd, args, 0, data); } /* ctrl->handler->lock is held, so it is safe to access cur.val */ static inline int cx2341x_neq(struct v4l2_ctrl *ctrl) { return ctrl && ctrl->val != ctrl->cur.val; } static int cx2341x_try_ctrl(struct v4l2_ctrl *ctrl) { struct cx2341x_handler *hdl = to_cxhdl(ctrl); s32 val = ctrl->val; switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_B_FRAMES: { /* video gop cluster */ int b = val + 1; int gop = hdl->video_gop_size->val; gop = b * ((gop + b - 1) / b); /* Max GOP size = 34 */ while (gop > 34) gop -= b; hdl->video_gop_size->val = gop; break; } case V4L2_CID_MPEG_STREAM_TYPE: /* stream type cluster */ hdl->video_encoding->val = (hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_SS || hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ? V4L2_MPEG_VIDEO_ENCODING_MPEG_1 : V4L2_MPEG_VIDEO_ENCODING_MPEG_2; if (hdl->video_encoding->val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) /* MPEG-1 implies CBR */ hdl->video_bitrate_mode->val = V4L2_MPEG_VIDEO_BITRATE_MODE_CBR; /* peak bitrate shall be >= normal bitrate */ if (hdl->video_bitrate_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && hdl->video_bitrate_peak->val < hdl->video_bitrate->val) hdl->video_bitrate_peak->val = hdl->video_bitrate->val; break; } return 0; } static int cx2341x_s_ctrl(struct v4l2_ctrl *ctrl) { static const int mpeg_stream_type[] = { 0, /* MPEG-2 PS */ 1, /* MPEG-2 TS */ 2, /* MPEG-1 SS */ 14, /* DVD */ 11, /* VCD */ 12, /* SVCD */ }; struct cx2341x_handler *hdl = to_cxhdl(ctrl); s32 val = ctrl->val; u32 props; int err; switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_VBI_FMT: if (hdl->ops && hdl->ops->s_stream_vbi_fmt) return hdl->ops->s_stream_vbi_fmt(hdl, val); return 0; case V4L2_CID_MPEG_VIDEO_ASPECT: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_ASPECT_RATIO, 1, val + 1); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_CLOSURE, 1, val); case V4L2_CID_MPEG_AUDIO_MUTE: return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_AUDIO, 1, val); case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION: return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_FRAME_DROP_RATE, 1, val); case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS: return cx2341x_hdl_api(hdl, CX2341X_ENC_MISC, 2, 7, val); case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ: /* audio properties cluster */ props = (hdl->audio_sampling_freq->val << 0) | (hdl->audio_mode->val << 8) | (hdl->audio_mode_extension->val << 10) | (hdl->audio_crc->val << 14); if (hdl->audio_emphasis->val == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17) props |= 3 << 12; else props |= hdl->audio_emphasis->val << 12; if (hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3) { props |= #if 1 /* Not sure if this MPEG Layer II setting is required */ ((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) | #endif (hdl->audio_ac3_bitrate->val << 4) | (CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28); } else { /* Assuming MPEG Layer II */ props |= ((3 - hdl->audio_encoding->val) << 2) | ((1 + hdl->audio_l2_bitrate->val) << 4); } err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, props); if (err) return err; hdl->audio_properties = props; if (hdl->audio_ac3_bitrate) { int is_ac3 = hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3; v4l2_ctrl_activate(hdl->audio_ac3_bitrate, is_ac3); v4l2_ctrl_activate(hdl->audio_l2_bitrate, !is_ac3); } v4l2_ctrl_activate(hdl->audio_mode_extension, hdl->audio_mode->val == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO); if (cx2341x_neq(hdl->audio_sampling_freq) && hdl->ops && hdl->ops->s_audio_sampling_freq) return hdl->ops->s_audio_sampling_freq(hdl, hdl->audio_sampling_freq->val); if (cx2341x_neq(hdl->audio_mode) && hdl->ops && hdl->ops->s_audio_mode) return hdl->ops->s_audio_mode(hdl, hdl->audio_mode->val); return 0; case V4L2_CID_MPEG_VIDEO_B_FRAMES: /* video gop cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_PROPERTIES, 2, hdl->video_gop_size->val, hdl->video_b_frames->val + 1); case V4L2_CID_MPEG_STREAM_TYPE: /* stream type cluster */ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[val]); if (err) return err; err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_BIT_RATE, 5, hdl->video_bitrate_mode->val, hdl->video_bitrate->val, hdl->video_bitrate_peak->val / 400, 0, 0); if (err) return err; v4l2_ctrl_activate(hdl->video_bitrate_mode, hdl->video_encoding->val != V4L2_MPEG_VIDEO_ENCODING_MPEG_1); v4l2_ctrl_activate(hdl->video_bitrate_peak, hdl->video_bitrate_mode->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); if (cx2341x_neq(hdl->video_encoding) && hdl->ops && hdl->ops->s_video_encoding) return hdl->ops->s_video_encoding(hdl, hdl->video_encoding->val); return 0; case V4L2_CID_MPEG_VIDEO_MUTE: /* video mute cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_VIDEO, 1, hdl->video_mute->val | (hdl->video_mute_yuv->val << 8)); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: { int active_filter; /* video filter mode */ err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_MODE, 2, hdl->video_spatial_filter_mode->val | (hdl->video_temporal_filter_mode->val << 1), hdl->video_median_filter_type->val); if (err) return err; active_filter = hdl->video_spatial_filter_mode->val != V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO; v4l2_ctrl_activate(hdl->video_spatial_filter, active_filter); v4l2_ctrl_activate(hdl->video_luma_spatial_filter_type, active_filter); v4l2_ctrl_activate(hdl->video_chroma_spatial_filter_type, active_filter); active_filter = hdl->video_temporal_filter_mode->val != V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO; v4l2_ctrl_activate(hdl->video_temporal_filter, active_filter); active_filter = hdl->video_median_filter_type->val != V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF; v4l2_ctrl_activate(hdl->video_luma_median_filter_bottom, active_filter); v4l2_ctrl_activate(hdl->video_luma_median_filter_top, active_filter); v4l2_ctrl_activate(hdl->video_chroma_median_filter_bottom, active_filter); v4l2_ctrl_activate(hdl->video_chroma_median_filter_top, active_filter); return 0; } case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE: /* video filter type cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2, hdl->video_luma_spatial_filter_type->val, hdl->video_chroma_spatial_filter_type->val); case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER: /* video filter cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2, hdl->video_spatial_filter->val, hdl->video_temporal_filter->val); case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP: /* video median cluster */ return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_CORING_LEVELS, 4, hdl->video_luma_median_filter_bottom->val, hdl->video_luma_median_filter_top->val, hdl->video_chroma_median_filter_bottom->val, hdl->video_chroma_median_filter_top->val); } return -EINVAL; } static const struct v4l2_ctrl_ops cx2341x_ops = { .try_ctrl = cx2341x_try_ctrl, .s_ctrl = cx2341x_s_ctrl, }; static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl, u32 id, s32 min, s32 max, s32 step, s32 def) { struct v4l2_ctrl_config cfg; memset(&cfg, 0, sizeof(cfg)); cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags); cfg.ops = &cx2341x_ops; cfg.id = id; cfg.min = min; cfg.max = max; cfg.def = def; if (cfg.type == V4L2_CTRL_TYPE_MENU) { cfg.step = 0; cfg.menu_skip_mask = step; cfg.qmenu = cx2341x_get_menu(id); } else { cfg.step = step; cfg.menu_skip_mask = 0; } return v4l2_ctrl_new_custom(hdl, &cfg, NULL); } static struct v4l2_ctrl *cx2341x_ctrl_new_std(struct v4l2_ctrl_handler *hdl, u32 id, s32 min, s32 max, s32 step, s32 def) { return v4l2_ctrl_new_std(hdl, &cx2341x_ops, id, min, max, step, def); } static struct v4l2_ctrl *cx2341x_ctrl_new_menu(struct v4l2_ctrl_handler *hdl, u32 id, s32 max, s32 mask, s32 def) { return v4l2_ctrl_new_std_menu(hdl, &cx2341x_ops, id, max, mask, def); } int cx2341x_handler_init(struct cx2341x_handler *cxhdl, unsigned nr_of_controls_hint) { struct v4l2_ctrl_handler *hdl = &cxhdl->hdl; u32 caps = cxhdl->capabilities; int has_sliced_vbi = caps & CX2341X_CAP_HAS_SLICED_VBI; int has_ac3 = caps & CX2341X_CAP_HAS_AC3; int has_ts = caps & CX2341X_CAP_HAS_TS; cxhdl->width = 720; cxhdl->height = 480; v4l2_ctrl_handler_init(hdl, nr_of_controls_hint); /* Add controls in ascending control ID order for fastest insertion time. */ cxhdl->stream_type = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_STREAM_TYPE, V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, has_ts ? 0 : 2, V4L2_MPEG_STREAM_TYPE_MPEG2_PS); cxhdl->stream_vbi_fmt = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_STREAM_VBI_FMT, V4L2_MPEG_STREAM_VBI_FMT_IVTV, has_sliced_vbi ? 0 : 2, V4L2_MPEG_STREAM_VBI_FMT_NONE); cxhdl->audio_sampling_freq = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 0, V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000); cxhdl->audio_encoding = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_MPEG_AUDIO_ENCODING_AC3, has_ac3 ? ~0x12 : ~0x2, V4L2_MPEG_AUDIO_ENCODING_LAYER_2); cxhdl->audio_l2_bitrate = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_L2_BITRATE, V4L2_MPEG_AUDIO_L2_BITRATE_384K, 0x1ff, V4L2_MPEG_AUDIO_L2_BITRATE_224K); cxhdl->audio_mode = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_MODE, V4L2_MPEG_AUDIO_MODE_MONO, 0, V4L2_MPEG_AUDIO_MODE_STEREO); cxhdl->audio_mode_extension = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_MODE_EXTENSION, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 0, V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4); cxhdl->audio_emphasis = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_EMPHASIS, V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 0, V4L2_MPEG_AUDIO_EMPHASIS_NONE); cxhdl->audio_crc = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_CRC, V4L2_MPEG_AUDIO_CRC_CRC16, 0, V4L2_MPEG_AUDIO_CRC_NONE); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_AUDIO_MUTE, 0, 1, 1, 0); if (has_ac3) cxhdl->audio_ac3_bitrate = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_AUDIO_AC3_BITRATE, V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 0x03, V4L2_MPEG_AUDIO_AC3_BITRATE_224K); cxhdl->video_encoding = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 0, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_MPEG_VIDEO_ASPECT_221x100, 0, V4L2_MPEG_VIDEO_ASPECT_4x3); cxhdl->video_b_frames = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 33, 1, 2); cxhdl->video_gop_size = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 34, 1, cxhdl->is_50hz ? 12 : 15); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, 0, 1, 1, 1); cxhdl->video_bitrate_mode = cx2341x_ctrl_new_menu(hdl, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); cxhdl->video_bitrate = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_BITRATE, 0, 27000000, 1, 6000000); cxhdl->video_bitrate_peak = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 0, 27000000, 1, 8000000); cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 0, 255, 1, 0); cxhdl->video_mute = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_MUTE, 0, 1, 1, 0); cxhdl->video_mute_yuv = cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_MUTE_YUV, 0, 0xffffff, 1, 0x008080); /* CX23415/6 specific */ cxhdl->video_spatial_filter_mode = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 0, V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL); cxhdl->video_spatial_filter = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER, 0, 15, 1, 0); cxhdl->video_luma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE, 0, V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR); cxhdl->video_chroma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR, 0, V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR); cxhdl->video_temporal_filter_mode = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 0, V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL); cxhdl->video_temporal_filter = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER, 0, 31, 1, 8); cxhdl->video_median_filter_type = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 0, V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF); cxhdl->video_luma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM, 0, 255, 1, 0); cxhdl->video_luma_median_filter_top = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP, 0, 255, 1, 255); cxhdl->video_chroma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM, 0, 255, 1, 0); cxhdl->video_chroma_median_filter_top = cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP, 0, 255, 1, 255); cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS, 0, 1, 1, 0); if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } v4l2_ctrl_cluster(8, &cxhdl->audio_sampling_freq); v4l2_ctrl_cluster(2, &cxhdl->video_b_frames); v4l2_ctrl_cluster(5, &cxhdl->stream_type); v4l2_ctrl_cluster(2, &cxhdl->video_mute); v4l2_ctrl_cluster(3, &cxhdl->video_spatial_filter_mode); v4l2_ctrl_cluster(2, &cxhdl->video_luma_spatial_filter_type); v4l2_ctrl_cluster(2, &cxhdl->video_spatial_filter); v4l2_ctrl_cluster(4, &cxhdl->video_luma_median_filter_top); return 0; } EXPORT_SYMBOL(cx2341x_handler_init); void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz) { cxhdl->is_50hz = is_50hz; cxhdl->video_gop_size->default_value = cxhdl->is_50hz ? 12 : 15; } EXPORT_SYMBOL(cx2341x_handler_set_50hz); int cx2341x_handler_setup(struct cx2341x_handler *cxhdl) { int h = cxhdl->height; int w = cxhdl->width; int err; err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_OUTPUT_PORT, 2, cxhdl->port, 0); if (err) return err; err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_RATE, 1, cxhdl->is_50hz); if (err) return err; if (v4l2_ctrl_g_ctrl(cxhdl->video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) { w /= 2; h /= 2; } err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w); if (err) return err; return v4l2_ctrl_handler_setup(&cxhdl->hdl); } EXPORT_SYMBOL(cx2341x_handler_setup); void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy) { v4l2_ctrl_grab(cxhdl->audio_sampling_freq, busy); v4l2_ctrl_grab(cxhdl->audio_encoding, busy); v4l2_ctrl_grab(cxhdl->audio_l2_bitrate, busy); v4l2_ctrl_grab(cxhdl->audio_ac3_bitrate, busy); v4l2_ctrl_grab(cxhdl->stream_vbi_fmt, busy); v4l2_ctrl_grab(cxhdl->stream_type, busy); v4l2_ctrl_grab(cxhdl->video_bitrate_mode, busy); v4l2_ctrl_grab(cxhdl->video_bitrate, busy); v4l2_ctrl_grab(cxhdl->video_bitrate_peak, busy); } EXPORT_SYMBOL(cx2341x_handler_set_busy);
1 9 1 30 66 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Cryptographic scatter and gather helpers. * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com> * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com> * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> */ #ifndef _CRYPTO_SCATTERWALK_H #define _CRYPTO_SCATTERWALK_H #include <crypto/algapi.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/scatterlist.h> static inline void scatterwalk_crypto_chain(struct scatterlist *head, struct scatterlist *sg, int num) { if (sg) sg_chain(head, num, sg); else sg_mark_end(head); } static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) { unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; unsigned int len_this_page = offset_in_page(~walk->offset) + 1; return len_this_page > len ? len : len_this_page; } static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { unsigned int len_this_page = scatterwalk_pagelen(walk); return nbytes > len_this_page ? len_this_page : nbytes; } static inline void scatterwalk_advance(struct scatter_walk *walk, unsigned int nbytes) { walk->offset += nbytes; } static inline struct page *scatterwalk_page(struct scatter_walk *walk) { return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); } static inline void scatterwalk_unmap(void *vaddr) { kunmap_local(vaddr); } static inline void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg) { walk->sg = sg; walk->offset = sg->offset; } static inline void *scatterwalk_map(struct scatter_walk *walk) { return kmap_local_page(scatterwalk_page(walk)) + offset_in_page(walk->offset); } static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, unsigned int more) { if (out) { struct page *page; page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); flush_dcache_page(page); } if (more && walk->offset >= walk->sg->offset + walk->sg->length) scatterwalk_start(walk, sg_next(walk->sg)); } static inline void scatterwalk_done(struct scatter_walk *walk, int out, int more) { if (!more || walk->offset >= walk->sg->offset + walk->sg->length || !(walk->offset & (PAGE_SIZE - 1))) scatterwalk_pagedone(walk, out, more); } void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, unsigned int len); #endif /* _CRYPTO_SCATTERWALK_H */
3 1 2 1 1 1 1 3 3 7 1 1 3 2 3 2 1 3 4 4 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 // SPDX-License-Identifier: GPL-2.0-or-later /* Daemon interface * * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/completion.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/namei.h> #include <linux/poll.h> #include <linux/mount.h> #include <linux/statfs.h> #include <linux/ctype.h> #include <linux/string.h> #include <linux/fs_struct.h> #include "internal.h" static int cachefiles_daemon_open(struct inode *, struct file *); static int cachefiles_daemon_release(struct inode *, struct file *); static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t, loff_t *); static ssize_t cachefiles_daemon_write(struct file *, const char __user *, size_t, loff_t *); static __poll_t cachefiles_daemon_poll(struct file *, struct poll_table_struct *); static int cachefiles_daemon_frun(struct cachefiles_cache *, char *); static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *); static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *); static int cachefiles_daemon_brun(struct cachefiles_cache *, char *); static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *); static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *); static int cachefiles_daemon_cull(struct cachefiles_cache *, char *); static int cachefiles_daemon_debug(struct cachefiles_cache *, char *); static int cachefiles_daemon_dir(struct cachefiles_cache *, char *); static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *); static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *); static int cachefiles_daemon_tag(struct cachefiles_cache *, char *); static int cachefiles_daemon_bind(struct cachefiles_cache *, char *); static void cachefiles_daemon_unbind(struct cachefiles_cache *); static unsigned long cachefiles_open; const struct file_operations cachefiles_daemon_fops = { .owner = THIS_MODULE, .open = cachefiles_daemon_open, .release = cachefiles_daemon_release, .read = cachefiles_daemon_read, .write = cachefiles_daemon_write, .poll = cachefiles_daemon_poll, .llseek = noop_llseek, }; struct cachefiles_daemon_cmd { char name[8]; int (*handler)(struct cachefiles_cache *cache, char *args); }; static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "bind", cachefiles_daemon_bind }, { "brun", cachefiles_daemon_brun }, { "bcull", cachefiles_daemon_bcull }, { "bstop", cachefiles_daemon_bstop }, { "cull", cachefiles_daemon_cull }, { "debug", cachefiles_daemon_debug }, { "dir", cachefiles_daemon_dir }, { "frun", cachefiles_daemon_frun }, { "fcull", cachefiles_daemon_fcull }, { "fstop", cachefiles_daemon_fstop }, { "inuse", cachefiles_daemon_inuse }, { "secctx", cachefiles_daemon_secctx }, { "tag", cachefiles_daemon_tag }, #ifdef CONFIG_CACHEFILES_ONDEMAND { "copen", cachefiles_ondemand_copen }, { "restore", cachefiles_ondemand_restore }, #endif { "", NULL } }; /* * Prepare a cache for caching. */ static int cachefiles_daemon_open(struct inode *inode, struct file *file) { struct cachefiles_cache *cache; _enter(""); /* only the superuser may do this */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* the cachefiles device may only be open once at a time */ if (xchg(&cachefiles_open, 1) == 1) return -EBUSY; /* allocate a cache record */ cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); if (!cache) { cachefiles_open = 0; return -ENOMEM; } mutex_init(&cache->daemon_mutex); init_waitqueue_head(&cache->daemon_pollwq); INIT_LIST_HEAD(&cache->volumes); INIT_LIST_HEAD(&cache->object_list); spin_lock_init(&cache->object_list_lock); refcount_set(&cache->unbind_pincount, 1); xa_init_flags(&cache->reqs, XA_FLAGS_ALLOC); xa_init_flags(&cache->ondemand_ids, XA_FLAGS_ALLOC1); /* set default caching limits * - limit at 1% free space and/or free files * - cull below 5% free space and/or free files * - cease culling above 7% free space and/or free files */ cache->frun_percent = 7; cache->fcull_percent = 5; cache->fstop_percent = 1; cache->brun_percent = 7; cache->bcull_percent = 5; cache->bstop_percent = 1; file->private_data = cache; cache->cachefilesd = file; return 0; } static void cachefiles_flush_reqs(struct cachefiles_cache *cache) { struct xarray *xa = &cache->reqs; struct cachefiles_req *req; unsigned long index; /* * Make sure the following two operations won't be reordered. * 1) set CACHEFILES_DEAD bit * 2) flush requests in the xarray * Otherwise the request may be enqueued after xarray has been * flushed, leaving the orphan request never being completed. * * CPU 1 CPU 2 * ===== ===== * flush requests in the xarray * test CACHEFILES_DEAD bit * enqueue the request * set CACHEFILES_DEAD bit */ smp_mb(); xa_lock(xa); xa_for_each(xa, index, req) { req->error = -EIO; complete(&req->done); } xa_unlock(xa); xa_destroy(&cache->reqs); xa_destroy(&cache->ondemand_ids); } void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache) { if (refcount_dec_and_test(&cache->unbind_pincount)) { cachefiles_daemon_unbind(cache); cachefiles_open = 0; kfree(cache); } } void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache) { refcount_inc(&cache->unbind_pincount); } /* * Release a cache. */ static int cachefiles_daemon_release(struct inode *inode, struct file *file) { struct cachefiles_cache *cache = file->private_data; _enter(""); ASSERT(cache); set_bit(CACHEFILES_DEAD, &cache->flags); if (cachefiles_in_ondemand_mode(cache)) cachefiles_flush_reqs(cache); /* clean up the control file interface */ cache->cachefilesd = NULL; file->private_data = NULL; cachefiles_put_unbind_pincount(cache); _leave(""); return 0; } static ssize_t cachefiles_do_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) { unsigned long long b_released; unsigned f_released; char buffer[256]; int n; /* check how much space the cache has */ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check); /* summarise */ f_released = atomic_xchg(&cache->f_released, 0); b_released = atomic_long_xchg(&cache->b_released, 0); clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); n = snprintf(buffer, sizeof(buffer), "cull=%c" " frun=%llx" " fcull=%llx" " fstop=%llx" " brun=%llx" " bcull=%llx" " bstop=%llx" " freleased=%x" " breleased=%llx", test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', (unsigned long long) cache->frun, (unsigned long long) cache->fcull, (unsigned long long) cache->fstop, (unsigned long long) cache->brun, (unsigned long long) cache->bcull, (unsigned long long) cache->bstop, f_released, b_released); if (n > buflen) return -EMSGSIZE; if (copy_to_user(_buffer, buffer, n) != 0) return -EFAULT; return n; } /* * Read the cache state. */ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer, size_t buflen, loff_t *pos) { struct cachefiles_cache *cache = file->private_data; //_enter(",,%zu,", buflen); if (!test_bit(CACHEFILES_READY, &cache->flags)) return 0; if (cachefiles_in_ondemand_mode(cache)) return cachefiles_ondemand_daemon_read(cache, _buffer, buflen); else return cachefiles_do_daemon_read(cache, _buffer, buflen); } /* * Take a command from cachefilesd, parse it and act on it. */ static ssize_t cachefiles_daemon_write(struct file *file, const char __user *_data, size_t datalen, loff_t *pos) { const struct cachefiles_daemon_cmd *cmd; struct cachefiles_cache *cache = file->private_data; ssize_t ret; char *data, *args, *cp; //_enter(",,%zu,", datalen); ASSERT(cache); if (test_bit(CACHEFILES_DEAD, &cache->flags)) return -EIO; if (datalen > PAGE_SIZE - 1) return -EOPNOTSUPP; /* drag the command string into the kernel so we can parse it */ data = memdup_user_nul(_data, datalen); if (IS_ERR(data)) return PTR_ERR(data); ret = -EINVAL; if (memchr(data, '\0', datalen)) goto error; /* strip any newline */ cp = memchr(data, '\n', datalen); if (cp) { if (cp == data) goto error; *cp = '\0'; } /* parse the command */ ret = -EOPNOTSUPP; for (args = data; *args; args++) if (isspace(*args)) break; if (*args) { if (args == data) goto error; *args = '\0'; args = skip_spaces(++args); } /* run the appropriate command handler */ for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++) if (strcmp(cmd->name, data) == 0) goto found_command; error: kfree(data); //_leave(" = %zd", ret); return ret; found_command: mutex_lock(&cache->daemon_mutex); ret = -EIO; if (!test_bit(CACHEFILES_DEAD, &cache->flags)) ret = cmd->handler(cache, args); mutex_unlock(&cache->daemon_mutex); if (ret == 0) ret = datalen; goto error; } /* * Poll for culling state * - use EPOLLOUT to indicate culling state */ static __poll_t cachefiles_daemon_poll(struct file *file, struct poll_table_struct *poll) { struct cachefiles_cache *cache = file->private_data; XA_STATE(xas, &cache->reqs, 0); struct cachefiles_req *req; __poll_t mask; poll_wait(file, &cache->daemon_pollwq, poll); mask = 0; if (cachefiles_in_ondemand_mode(cache)) { if (!xa_empty(&cache->reqs)) { rcu_read_lock(); xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { if (!cachefiles_ondemand_is_reopening_read(req)) { mask |= EPOLLIN; break; } } rcu_read_unlock(); } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) mask |= EPOLLIN; } if (test_bit(CACHEFILES_CULLING, &cache->flags)) mask |= EPOLLOUT; return mask; } /* * Give a range error for cache space constraints * - can be tail-called */ static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, char *args) { pr_err("Free space limits must be in range 0%%<=stop<cull<run<100%%\n"); return -EINVAL; } /* * Set the percentage of files at which to stop culling * - command: "frun <N>%" */ static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args) { unsigned long frun; _enter(",%s", args); if (!*args) return -EINVAL; frun = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (frun <= cache->fcull_percent || frun >= 100) return cachefiles_daemon_range_error(cache, args); cache->frun_percent = frun; return 0; } /* * Set the percentage of files at which to start culling * - command: "fcull <N>%" */ static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args) { unsigned long fcull; _enter(",%s", args); if (!*args) return -EINVAL; fcull = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent) return cachefiles_daemon_range_error(cache, args); cache->fcull_percent = fcull; return 0; } /* * Set the percentage of files at which to stop allocating * - command: "fstop <N>%" */ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) { unsigned long fstop; _enter(",%s", args); if (!*args) return -EINVAL; fstop = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (fstop >= cache->fcull_percent) return cachefiles_daemon_range_error(cache, args); cache->fstop_percent = fstop; return 0; } /* * Set the percentage of blocks at which to stop culling * - command: "brun <N>%" */ static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args) { unsigned long brun; _enter(",%s", args); if (!*args) return -EINVAL; brun = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (brun <= cache->bcull_percent || brun >= 100) return cachefiles_daemon_range_error(cache, args); cache->brun_percent = brun; return 0; } /* * Set the percentage of blocks at which to start culling * - command: "bcull <N>%" */ static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args) { unsigned long bcull; _enter(",%s", args); if (!*args) return -EINVAL; bcull = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent) return cachefiles_daemon_range_error(cache, args); cache->bcull_percent = bcull; return 0; } /* * Set the percentage of blocks at which to stop allocating * - command: "bstop <N>%" */ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) { unsigned long bstop; _enter(",%s", args); if (!*args) return -EINVAL; bstop = simple_strtoul(args, &args, 10); if (args[0] != '%' || args[1] != '\0') return -EINVAL; if (bstop >= cache->bcull_percent) return cachefiles_daemon_range_error(cache, args); cache->bstop_percent = bstop; return 0; } /* * Set the cache directory * - command: "dir <name>" */ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) { char *dir; _enter(",%s", args); if (!*args) { pr_err("Empty directory specified\n"); return -EINVAL; } if (cache->rootdirname) { pr_err("Second cache directory specified\n"); return -EEXIST; } dir = kstrdup(args, GFP_KERNEL); if (!dir) return -ENOMEM; cache->rootdirname = dir; return 0; } /* * Set the cache security context * - command: "secctx <ctx>" */ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) { char *secctx; _enter(",%s", args); if (!*args) { pr_err("Empty security context specified\n"); return -EINVAL; } if (cache->secctx) { pr_err("Second security context specified\n"); return -EINVAL; } secctx = kstrdup(args, GFP_KERNEL); if (!secctx) return -ENOMEM; cache->secctx = secctx; return 0; } /* * Set the cache tag * - command: "tag <name>" */ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) { char *tag; _enter(",%s", args); if (!*args) { pr_err("Empty tag specified\n"); return -EINVAL; } if (cache->tag) return -EEXIST; tag = kstrdup(args, GFP_KERNEL); if (!tag) return -ENOMEM; cache->tag = tag; return 0; } /* * Request a node in the cache be culled from the current working directory * - command: "cull <name>" */ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) { struct path path; const struct cred *saved_cred; int ret; _enter(",%s", args); if (strchr(args, '/')) goto inval; if (!test_bit(CACHEFILES_READY, &cache->flags)) { pr_err("cull applied to unready cache\n"); return -EIO; } if (test_bit(CACHEFILES_DEAD, &cache->flags)) { pr_err("cull applied to dead cache\n"); return -EIO; } get_fs_pwd(current->fs, &path); if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); ret = cachefiles_cull(cache, path.dentry, args); cachefiles_end_secure(cache, saved_cred); path_put(&path); _leave(" = %d", ret); return ret; notdir: path_put(&path); pr_err("cull command requires dirfd to be a directory\n"); return -ENOTDIR; inval: pr_err("cull command requires dirfd and filename\n"); return -EINVAL; } /* * Set debugging mode * - command: "debug <mask>" */ static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) { unsigned long mask; _enter(",%s", args); mask = simple_strtoul(args, &args, 0); if (args[0] != '\0') goto inval; cachefiles_debug = mask; _leave(" = 0"); return 0; inval: pr_err("debug command requires mask\n"); return -EINVAL; } /* * Find out whether an object in the current working directory is in use or not * - command: "inuse <name>" */ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) { struct path path; const struct cred *saved_cred; int ret; //_enter(",%s", args); if (strchr(args, '/')) goto inval; if (!test_bit(CACHEFILES_READY, &cache->flags)) { pr_err("inuse applied to unready cache\n"); return -EIO; } if (test_bit(CACHEFILES_DEAD, &cache->flags)) { pr_err("inuse applied to dead cache\n"); return -EIO; } get_fs_pwd(current->fs, &path); if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); ret = cachefiles_check_in_use(cache, path.dentry, args); cachefiles_end_secure(cache, saved_cred); path_put(&path); //_leave(" = %d", ret); return ret; notdir: path_put(&path); pr_err("inuse command requires dirfd to be a directory\n"); return -ENOTDIR; inval: pr_err("inuse command requires dirfd and filename\n"); return -EINVAL; } /* * Bind a directory as a cache */ static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) { _enter("{%u,%u,%u,%u,%u,%u},%s", cache->frun_percent, cache->fcull_percent, cache->fstop_percent, cache->brun_percent, cache->bcull_percent, cache->bstop_percent, args); if (cache->fstop_percent >= cache->fcull_percent || cache->fcull_percent >= cache->frun_percent || cache->frun_percent >= 100) return -ERANGE; if (cache->bstop_percent >= cache->bcull_percent || cache->bcull_percent >= cache->brun_percent || cache->brun_percent >= 100) return -ERANGE; if (!cache->rootdirname) { pr_err("No cache directory specified\n"); return -EINVAL; } /* Don't permit already bound caches to be re-bound */ if (test_bit(CACHEFILES_READY, &cache->flags)) { pr_err("Cache already bound\n"); return -EBUSY; } if (IS_ENABLED(CONFIG_CACHEFILES_ONDEMAND)) { if (!strcmp(args, "ondemand")) { set_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags); } else if (*args) { pr_err("Invalid argument to the 'bind' command\n"); return -EINVAL; } } else if (*args) { pr_err("'bind' command doesn't take an argument\n"); return -EINVAL; } /* Make sure we have copies of the tag string */ if (!cache->tag) { /* * The tag string is released by the fops->release() * function, so we don't release it on error here */ cache->tag = kstrdup("CacheFiles", GFP_KERNEL); if (!cache->tag) return -ENOMEM; } return cachefiles_add_cache(cache); } /* * Unbind a cache. */ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache) { _enter(""); if (test_bit(CACHEFILES_READY, &cache->flags)) cachefiles_withdraw_cache(cache); cachefiles_put_directory(cache->graveyard); cachefiles_put_directory(cache->store); mntput(cache->mnt); put_cred(cache->cache_cred); kfree(cache->rootdirname); kfree(cache->secctx); kfree(cache->tag); _leave(""); }
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) International Business Machines Corp., 2000-2004 */ #include <linux/fs.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_filsys.h" #include "jfs_imap.h" #include "jfs_dinode.h" #include "jfs_debug.h" void jfs_set_inode_flags(struct inode *inode) { unsigned int flags = JFS_IP(inode)->mode2; unsigned int new_fl = 0; if (flags & JFS_IMMUTABLE_FL) new_fl |= S_IMMUTABLE; if (flags & JFS_APPEND_FL) new_fl |= S_APPEND; if (flags & JFS_NOATIME_FL) new_fl |= S_NOATIME; if (flags & JFS_DIRSYNC_FL) new_fl |= S_DIRSYNC; if (flags & JFS_SYNC_FL) new_fl |= S_SYNC; inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND | S_NOATIME | S_DIRSYNC | S_SYNC); } /* * NAME: ialloc() * * FUNCTION: Allocate a new inode * */ struct inode *ialloc(struct inode *parent, umode_t mode) { struct super_block *sb = parent->i_sb; struct inode *inode; struct jfs_inode_info *jfs_inode; int rc; inode = new_inode(sb); if (!inode) { jfs_warn("ialloc: new_inode returned NULL!"); return ERR_PTR(-ENOMEM); } jfs_inode = JFS_IP(inode); rc = diAlloc(parent, S_ISDIR(mode), inode); if (rc) { jfs_warn("ialloc: diAlloc returned %d!", rc); goto fail_put; } if (insert_inode_locked(inode) < 0) { rc = -EINVAL; goto fail_put; } inode_init_owner(&nop_mnt_idmap, inode, parent, mode); /* * New inodes need to save sane values on disk when * uid & gid mount options are used */ jfs_inode->saved_uid = inode->i_uid; jfs_inode->saved_gid = inode->i_gid; /* * Allocate inode to quota. */ rc = dquot_initialize(inode); if (rc) goto fail_drop; rc = dquot_alloc_inode(inode); if (rc) goto fail_drop; /* inherit flags from parent */ jfs_inode->mode2 = JFS_IP(parent)->mode2 & JFS_FL_INHERIT; if (S_ISDIR(mode)) { jfs_inode->mode2 |= IDIRECTORY; jfs_inode->mode2 &= ~JFS_DIRSYNC_FL; } else { jfs_inode->mode2 |= INLINEEA | ISPARSE; if (S_ISLNK(mode)) jfs_inode->mode2 &= ~(JFS_IMMUTABLE_FL|JFS_APPEND_FL); } jfs_inode->mode2 |= inode->i_mode; inode->i_blocks = 0; simple_inode_init_ts(inode); jfs_inode->otime = inode_get_ctime_sec(inode); inode->i_generation = JFS_SBI(sb)->gengen++; jfs_inode->cflag = 0; /* Zero remaining fields */ memset(&jfs_inode->acl, 0, sizeof(dxd_t)); memset(&jfs_inode->ea, 0, sizeof(dxd_t)); jfs_inode->next_index = 0; jfs_inode->acltype = 0; jfs_inode->btorder = 0; jfs_inode->btindex = 0; jfs_inode->bxflag = 0; jfs_inode->blid = 0; jfs_inode->atlhead = 0; jfs_inode->atltail = 0; jfs_inode->xtlid = 0; jfs_set_inode_flags(inode); jfs_info("ialloc returns inode = 0x%p", inode); return inode; fail_drop: dquot_drop(inode); inode->i_flags |= S_NOQUOTA; clear_nlink(inode); discard_new_inode(inode); return ERR_PTR(rc); fail_put: iput(inode); return ERR_PTR(rc); }
21 17 18 4 23 21 4 11 22 12 1 24 18 23 23 13 37 1 36 14 23 18 38 39 2 8 13 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2022, Alibaba Cloud */ #include "xattr.h" #include <trace/events/erofs.h> struct erofs_qstr { const unsigned char *name; const unsigned char *end; }; /* based on the end of qn is accurate and it must have the trailing '\0' */ static inline int erofs_dirnamecmp(const struct erofs_qstr *qn, const struct erofs_qstr *qd, unsigned int *matched) { unsigned int i = *matched; /* * on-disk error, let's only BUG_ON in the debugging mode. * otherwise, it will return 1 to just skip the invalid name * and go on (in consideration of the lookup performance). */ DBG_BUGON(qd->name > qd->end); /* qd could not have trailing '\0' */ /* However it is absolutely safe if < qd->end */ while (qd->name + i < qd->end && qd->name[i] != '\0') { if (qn->name[i] != qd->name[i]) { *matched = i; return qn->name[i] > qd->name[i] ? 1 : -1; } ++i; } *matched = i; /* See comments in __d_alloc on the terminating NUL character */ return qn->name[i] == '\0' ? 0 : 1; } #define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, u8 *data, unsigned int dirblksize, const int ndirents) { int head, back; unsigned int startprfx, endprfx; struct erofs_dirent *const de = (struct erofs_dirent *)data; /* since the 1st dirent has been evaluated previously */ head = 1; back = ndirents - 1; startprfx = endprfx = 0; while (head <= back) { const int mid = head + (back - head) / 2; const int nameoff = nameoff_from_disk(de[mid].nameoff, dirblksize); unsigned int matched = min(startprfx, endprfx); struct erofs_qstr dname = { .name = data + nameoff, .end = mid >= ndirents - 1 ? data + dirblksize : data + nameoff_from_disk(de[mid + 1].nameoff, dirblksize) }; /* string comparison without already matched prefix */ int ret = erofs_dirnamecmp(name, &dname, &matched); if (!ret) { return de + mid; } else if (ret > 0) { head = mid + 1; startprfx = matched; } else { back = mid - 1; endprfx = matched; } } return ERR_PTR(-ENOENT); } static void *erofs_find_target_block(struct erofs_buf *target, struct inode *dir, struct erofs_qstr *name, int *_ndirents) { unsigned int bsz = i_blocksize(dir); int head = 0, back = erofs_iblks(dir) - 1; unsigned int startprfx = 0, endprfx = 0; void *candidate = ERR_PTR(-ENOENT); while (head <= back) { const int mid = head + (back - head) / 2; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_dirent *de; buf.inode = dir; de = erofs_bread(&buf, mid, EROFS_KMAP); if (!IS_ERR(de)) { const int nameoff = nameoff_from_disk(de->nameoff, bsz); const int ndirents = nameoff / sizeof(*de); int diff; unsigned int matched; struct erofs_qstr dname; if (!ndirents) { erofs_put_metabuf(&buf); erofs_err(dir->i_sb, "corrupted dir block %d @ nid %llu", mid, EROFS_I(dir)->nid); DBG_BUGON(1); de = ERR_PTR(-EFSCORRUPTED); goto out; } matched = min(startprfx, endprfx); dname.name = (u8 *)de + nameoff; if (ndirents == 1) dname.end = (u8 *)de + bsz; else dname.end = (u8 *)de + nameoff_from_disk(de[1].nameoff, bsz); /* string comparison without already matched prefix */ diff = erofs_dirnamecmp(name, &dname, &matched); if (diff < 0) { erofs_put_metabuf(&buf); back = mid - 1; endprfx = matched; continue; } if (!IS_ERR(candidate)) erofs_put_metabuf(target); *target = buf; if (!diff) { *_ndirents = 0; return de; } head = mid + 1; startprfx = matched; candidate = de; *_ndirents = ndirents; continue; } out: /* free if the candidate is valid */ if (!IS_ERR(candidate)) erofs_put_metabuf(target); return de; } return candidate; } int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid, unsigned int *d_type) { int ndirents; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_dirent *de; struct erofs_qstr qn; if (!dir->i_size) return -ENOENT; qn.name = name->name; qn.end = name->name + name->len; buf.inode = dir; ndirents = 0; de = erofs_find_target_block(&buf, dir, &qn, &ndirents); if (IS_ERR(de)) return PTR_ERR(de); if (ndirents) de = find_target_dirent(&qn, (u8 *)de, i_blocksize(dir), ndirents); if (!IS_ERR(de)) { *nid = le64_to_cpu(de->nid); *d_type = de->file_type; } erofs_put_metabuf(&buf); return PTR_ERR_OR_ZERO(de); } static struct dentry *erofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int err; erofs_nid_t nid; unsigned int d_type; struct inode *inode; trace_erofs_lookup(dir, dentry, flags); if (dentry->d_name.len > EROFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); if (err == -ENOENT) /* negative dentry */ inode = NULL; else if (err) inode = ERR_PTR(err); else inode = erofs_iget(dir->i_sb, nid); return d_splice_alias(inode, dentry); } const struct inode_operations erofs_dir_iops = { .lookup = erofs_lookup, .getattr = erofs_getattr, .listxattr = erofs_listxattr, .get_inode_acl = erofs_get_acl, .fiemap = erofs_fiemap, };
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 /* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com> */ #ifndef MAP_TO_7SEGMENT_H #define MAP_TO_7SEGMENT_H /* This file provides translation primitives and tables for the conversion * of (ASCII) characters to a 7-segments notation. * * The 7 segment's wikipedia notation below is used as standard. * See: https://en.wikipedia.org/wiki/Seven_segment_display * * Notation: +-a-+ * f b * +-g-+ * e c * +-d-+ * * Usage: * * Register a map variable, and fill it with a character set: * static SEG7_DEFAULT_MAP(map_seg7); * * * Then use for conversion: * seg7 = map_to_seg7(&map_seg7, some_char); * ... * * In device drivers it is recommended, if required, to make the char map * accessible via the sysfs interface using the following scheme: * * static ssize_t map_seg7_show(struct device *dev, * struct device_attribute *attr, char *buf) * { * memcpy(buf, &map_seg7, sizeof(map_seg7)); * return sizeof(map_seg7); * } * static ssize_t map_seg7_store(struct device *dev, * struct device_attribute *attr, const char *buf, * size_t cnt) * { * if(cnt != sizeof(map_seg7)) * return -EINVAL; * memcpy(&map_seg7, buf, cnt); * return cnt; * } * static DEVICE_ATTR_RW(map_seg7); * * History: * 2005-05-31 RFC linux-kernel@vger.kernel.org */ #include <linux/errno.h> #define BIT_SEG7_A 0 #define BIT_SEG7_B 1 #define BIT_SEG7_C 2 #define BIT_SEG7_D 3 #define BIT_SEG7_E 4 #define BIT_SEG7_F 5 #define BIT_SEG7_G 6 #define BIT_SEG7_RESERVED 7 struct seg7_conversion_map { unsigned char table[128]; }; static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c) { return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; } #define SEG7_CONVERSION_MAP(_name, _map) \ struct seg7_conversion_map _name = { .table = { _map } } /* * It is recommended to use a facility that allows user space to redefine * custom character sets for LCD devices. Please use a sysfs interface * as described above. */ #define MAP_TO_SEG7_SYSFS_FILE "map_seg7" /******************************************************************************* * ASCII conversion table ******************************************************************************/ #define _SEG7(l,a,b,c,d,e,f,g) \ ( a<<BIT_SEG7_A | b<<BIT_SEG7_B | c<<BIT_SEG7_C | d<<BIT_SEG7_D | \ e<<BIT_SEG7_E | f<<BIT_SEG7_F | g<<BIT_SEG7_G ) #define _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, #define _MAP_33_47_ASCII_SEG7_SYMBOL \ _SEG7('!',0,0,0,0,1,1,0), _SEG7('"',0,1,0,0,0,1,0), _SEG7('#',0,1,1,0,1,1,0),\ _SEG7('$',1,0,1,1,0,1,1), _SEG7('%',0,0,1,0,0,1,0), _SEG7('&',1,0,1,1,1,1,1),\ _SEG7('\'',0,0,0,0,0,1,0),_SEG7('(',1,0,0,1,1,1,0), _SEG7(')',1,1,1,1,0,0,0),\ _SEG7('*',0,1,1,0,1,1,1), _SEG7('+',0,1,1,0,0,0,1), _SEG7(',',0,0,0,0,1,0,0),\ _SEG7('-',0,0,0,0,0,0,1), _SEG7('.',0,0,0,0,1,0,0), _SEG7('/',0,1,0,0,1,0,1), #define _MAP_48_57_ASCII_SEG7_NUMERIC \ _SEG7('0',1,1,1,1,1,1,0), _SEG7('1',0,1,1,0,0,0,0), _SEG7('2',1,1,0,1,1,0,1),\ _SEG7('3',1,1,1,1,0,0,1), _SEG7('4',0,1,1,0,0,1,1), _SEG7('5',1,0,1,1,0,1,1),\ _SEG7('6',1,0,1,1,1,1,1), _SEG7('7',1,1,1,0,0,0,0), _SEG7('8',1,1,1,1,1,1,1),\ _SEG7('9',1,1,1,1,0,1,1), #define _MAP_58_64_ASCII_SEG7_SYMBOL \ _SEG7(':',0,0,0,1,0,0,1), _SEG7(';',0,0,0,1,0,0,1), _SEG7('<',1,0,0,0,0,1,1),\ _SEG7('=',0,0,0,1,0,0,1), _SEG7('>',1,1,0,0,0,0,1), _SEG7('?',1,1,1,0,0,1,0),\ _SEG7('@',1,1,0,1,1,1,1), #define _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \ _SEG7('A',1,1,1,0,1,1,1), _SEG7('B',1,1,1,1,1,1,1), _SEG7('C',1,0,0,1,1,1,0),\ _SEG7('D',1,1,1,1,1,1,0), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\ _SEG7('G',1,1,1,1,0,1,1), _SEG7('H',0,1,1,0,1,1,1), _SEG7('I',0,1,1,0,0,0,0),\ _SEG7('J',0,1,1,1,0,0,0), _SEG7('K',0,1,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\ _SEG7('M',1,1,1,0,1,1,0), _SEG7('N',1,1,1,0,1,1,0), _SEG7('O',1,1,1,1,1,1,0),\ _SEG7('P',1,1,0,0,1,1,1), _SEG7('Q',1,1,1,1,1,1,0), _SEG7('R',1,1,1,0,1,1,1),\ _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('U',0,1,1,1,1,1,0),\ _SEG7('V',0,1,1,1,1,1,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\ _SEG7('Y',0,1,1,0,0,1,1), _SEG7('Z',1,1,0,1,1,0,1), #define _MAP_91_96_ASCII_SEG7_SYMBOL \ _SEG7('[',1,0,0,1,1,1,0), _SEG7('\\',0,0,1,0,0,1,1),_SEG7(']',1,1,1,1,0,0,0),\ _SEG7('^',1,1,0,0,0,1,0), _SEG7('_',0,0,0,1,0,0,0), _SEG7('`',0,1,0,0,0,0,0), #define _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ _SEG7('A',1,1,1,0,1,1,1), _SEG7('b',0,0,1,1,1,1,1), _SEG7('c',0,0,0,1,1,0,1),\ _SEG7('d',0,1,1,1,1,0,1), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\ _SEG7('G',1,1,1,1,0,1,1), _SEG7('h',0,0,1,0,1,1,1), _SEG7('i',0,0,1,0,0,0,0),\ _SEG7('j',0,0,1,1,0,0,0), _SEG7('k',0,0,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\ _SEG7('M',1,1,1,0,1,1,0), _SEG7('n',0,0,1,0,1,0,1), _SEG7('o',0,0,1,1,1,0,1),\ _SEG7('P',1,1,0,0,1,1,1), _SEG7('q',1,1,1,0,0,1,1), _SEG7('r',0,0,0,0,1,0,1),\ _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('u',0,0,1,1,1,0,0),\ _SEG7('v',0,0,1,1,1,0,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\ _SEG7('y',0,1,1,1,0,1,1), _SEG7('Z',1,1,0,1,1,0,1), #define _MAP_123_126_ASCII_SEG7_SYMBOL \ _SEG7('{',1,0,0,1,1,1,0), _SEG7('|',0,0,0,0,1,1,0), _SEG7('}',1,1,1,1,0,0,0),\ _SEG7('~',1,0,0,0,0,0,0), /* Maps */ /* This set tries to map as close as possible to the visible characteristics * of the ASCII symbol, lowercase and uppercase letters may differ in * presentation on the display. */ #define MAP_ASCII7SEG_ALPHANUM \ _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ _MAP_33_47_ASCII_SEG7_SYMBOL \ _MAP_48_57_ASCII_SEG7_NUMERIC \ _MAP_58_64_ASCII_SEG7_SYMBOL \ _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \ _MAP_91_96_ASCII_SEG7_SYMBOL \ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ _MAP_123_126_ASCII_SEG7_SYMBOL /* This set tries to map as close as possible to the symbolic characteristics * of the ASCII character for maximum discrimination. * For now this means all alpha chars are in lower case representations. * (This for example facilitates the use of hex numbers with uppercase input.) */ #define MAP_ASCII7SEG_ALPHANUM_LC \ _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \ _MAP_33_47_ASCII_SEG7_SYMBOL \ _MAP_48_57_ASCII_SEG7_NUMERIC \ _MAP_58_64_ASCII_SEG7_SYMBOL \ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ _MAP_91_96_ASCII_SEG7_SYMBOL \ _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \ _MAP_123_126_ASCII_SEG7_SYMBOL #define SEG7_DEFAULT_MAP(_name) \ SEG7_CONVERSION_MAP(_name,MAP_ASCII7SEG_ALPHANUM) #endif /* MAP_TO_7SEGMENT_H */
2 9 1 1 1 2 2 2 2 2 4 3 3 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 // SPDX-License-Identifier: GPL-2.0-only /* * irqchip.c: Common API for in kernel interrupt controllers * Copyright (c) 2007, Intel Corporation. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright (c) 2013, Alexander Graf <agraf@suse.de> * * This file is derived from virt/kvm/irq_comm.c. * * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * Alexander Graf <agraf@suse.de> */ #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/srcu.h> #include <linux/export.h> #include <trace/events/kvm.h> int kvm_irq_map_gsi(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *entries, int gsi) { struct kvm_irq_routing_table *irq_rt; struct kvm_kernel_irq_routing_entry *e; int n = 0; irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, lockdep_is_held(&kvm->irq_lock)); if (irq_rt && gsi < irq_rt->nr_rt_entries) { hlist_for_each_entry(e, &irq_rt->map[gsi], link) { entries[n] = *e; ++n; } } return n; } int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin) { struct kvm_irq_routing_table *irq_rt; irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); return irq_rt->chip[irqchip][pin]; } int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) { struct kvm_kernel_irq_routing_entry route; if (!kvm_arch_irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID)) return -EINVAL; route.msi.address_lo = msi->address_lo; route.msi.address_hi = msi->address_hi; route.msi.data = msi->data; route.msi.flags = msi->flags; route.msi.devid = msi->devid; return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); } /* * Return value: * < 0 Interrupt was ignored (masked or not delivered for other reasons) * = 0 Interrupt was coalesced (previous irq is still pending) * > 0 Number of CPUs interrupt was delivered to */ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, bool line_status) { struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS]; int ret = -1, i, idx; trace_kvm_set_irq(irq, level, irq_source_id); /* Not possible to detect if the guest uses the PIC or the * IOAPIC. So set the bit in both. The guest will ignore * writes to the unused one. */ idx = srcu_read_lock(&kvm->irq_srcu); i = kvm_irq_map_gsi(kvm, irq_set, irq); srcu_read_unlock(&kvm->irq_srcu, idx); while (i--) { int r; r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level, line_status); if (r < 0) continue; ret = r + ((ret < 0) ? 0 : ret); } return ret; } static void free_irq_routing_table(struct kvm_irq_routing_table *rt) { int i; if (!rt) return; for (i = 0; i < rt->nr_rt_entries; ++i) { struct kvm_kernel_irq_routing_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &rt->map[i], link) { hlist_del(&e->link); kfree(e); } } kfree(rt); } void kvm_free_irq_routing(struct kvm *kvm) { /* Called only during vm destruction. Nobody can use the pointer at this stage */ struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing); free_irq_routing_table(rt); } static int setup_routing_entry(struct kvm *kvm, struct kvm_irq_routing_table *rt, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { struct kvm_kernel_irq_routing_entry *ei; int r; u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES); /* * Do not allow GSI to be mapped to the same irqchip more than once. * Allow only one to one mapping between GSI and non-irqchip routing. */ hlist_for_each_entry(ei, &rt->map[gsi], link) if (ei->type != KVM_IRQ_ROUTING_IRQCHIP || ue->type != KVM_IRQ_ROUTING_IRQCHIP || ue->u.irqchip.irqchip == ei->irqchip.irqchip) return -EINVAL; e->gsi = gsi; e->type = ue->type; r = kvm_set_routing_entry(kvm, e, ue); if (r) return r; if (e->type == KVM_IRQ_ROUTING_IRQCHIP) rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi; hlist_add_head(&e->link, &rt->map[e->gsi]); return 0; } void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm) { } bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm) { return true; } int kvm_set_irq_routing(struct kvm *kvm, const struct kvm_irq_routing_entry *ue, unsigned nr, unsigned flags) { struct kvm_irq_routing_table *new, *old; struct kvm_kernel_irq_routing_entry *e; u32 i, j, nr_rt_entries = 0; int r; for (i = 0; i < nr; ++i) { if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES) return -EINVAL; nr_rt_entries = max(nr_rt_entries, ue[i].gsi); } nr_rt_entries += 1; new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT); if (!new) return -ENOMEM; new->nr_rt_entries = nr_rt_entries; for (i = 0; i < KVM_NR_IRQCHIPS; i++) for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++) new->chip[i][j] = -1; for (i = 0; i < nr; ++i) { r = -ENOMEM; e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT); if (!e) goto out; r = -EINVAL; switch (ue->type) { case KVM_IRQ_ROUTING_MSI: if (ue->flags & ~KVM_MSI_VALID_DEVID) goto free_entry; break; default: if (ue->flags) goto free_entry; break; } r = setup_routing_entry(kvm, new, e, ue); if (r) goto free_entry; ++ue; } mutex_lock(&kvm->irq_lock); old = rcu_dereference_protected(kvm->irq_routing, 1); rcu_assign_pointer(kvm->irq_routing, new); kvm_irq_routing_update(kvm); kvm_arch_irq_routing_update(kvm); mutex_unlock(&kvm->irq_lock); kvm_arch_post_irq_routing_update(kvm); synchronize_srcu_expedited(&kvm->irq_srcu); new = old; r = 0; goto out; free_entry: kfree(e); out: free_irq_routing_table(new); return r; }
1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 // SPDX-License-Identifier: GPL-2.0-only /* * This file contians vfs address (mmap) ops for 9P2000. * * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/swap.h> #include <linux/uio.h> #include <linux/netfs.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include <trace/events/netfs.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "cache.h" #include "fid.h" static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq) { struct p9_fid *fid = subreq->rreq->netfs_priv; int err, len; trace_netfs_sreq(subreq, netfs_sreq_trace_submit); len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err); netfs_write_subrequest_terminated(subreq, len ?: err, false); } static void v9fs_upload_to_server_worker(struct work_struct *work) { struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); v9fs_upload_to_server(subreq); } /* * Set up write requests for a writeback slice. We need to add a write request * for each write we want to make. */ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len) { struct netfs_io_subrequest *subreq; subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER, start, len, v9fs_upload_to_server_worker); if (subreq) netfs_queue_write_request(subreq); } /** * v9fs_issue_read - Issue a read from 9P * @subreq: The read to make */ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct p9_fid *fid = rreq->netfs_priv; int total, err; total = p9_client_read(fid, subreq->start + subreq->transferred, &subreq->io_iter, &err); /* if we just extended the file size, any portion not in * cache won't be on server and is zeroes */ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); netfs_subreq_terminated(subreq, err ?: total, false); } /** * v9fs_init_request - Initialise a request * @rreq: The read request * @file: The file being read from */ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { struct p9_fid *fid; bool writing = (rreq->origin == NETFS_READ_FOR_WRITE || rreq->origin == NETFS_WRITEBACK || rreq->origin == NETFS_WRITETHROUGH || rreq->origin == NETFS_LAUNDER_WRITE || rreq->origin == NETFS_UNBUFFERED_WRITE || rreq->origin == NETFS_DIO_WRITE); if (file) { fid = file->private_data; if (!fid) goto no_fid; p9_fid_get(fid); } else { fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true); if (!fid) goto no_fid; } /* we might need to read from a fid that was opened write-only * for read-modify-write of page cache, use the writeback fid * for that */ WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR)); rreq->netfs_priv = fid; return 0; no_fid: WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n", rreq->inode->i_ino); return -EINVAL; } /** * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq * @rreq: The I/O request to clean up */ static void v9fs_free_request(struct netfs_io_request *rreq) { struct p9_fid *fid = rreq->netfs_priv; p9_fid_put(fid); } const struct netfs_request_ops v9fs_req_ops = { .init_request = v9fs_init_request, .free_request = v9fs_free_request, .issue_read = v9fs_issue_read, .create_write_requests = v9fs_create_write_requests, }; const struct address_space_operations v9fs_addr_operations = { .read_folio = netfs_read_folio, .readahead = netfs_readahead, .dirty_folio = netfs_dirty_folio, .release_folio = netfs_release_folio, .invalidate_folio = netfs_invalidate_folio, .launder_folio = netfs_launder_folio, .direct_IO = noop_direct_IO, .writepages = netfs_writepages, };
2 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 // SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #include "pvrusb2-sysfs.h" #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif pvr2_sysfs_create(pvr); } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } pvr2_sysfs_class_create(); ret = usb_register(&pvr_driver); if (ret == 0) pr_info("pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) pr_info("pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); pvr2_context_global_done(); pvr2_sysfs_class_destroy(); pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION("0.9.1");
9 1 1 1 3 3 1 5 1 2 2 3 1 2 2 2 2 2 2 4 4 1 1 4 3 2 4 4 4 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_skbmod.c skb data modifier * * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com> */ #include <linux/module.h> #include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/inet_ecn.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tc_wrapper.h> #include <linux/tc_act/tc_skbmod.h> #include <net/tc_act/tc_skbmod.h> static struct tc_action_ops act_skbmod_ops; TC_INDIRECT_SCOPE int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_skbmod *d = to_skbmod(a); int action, max_edit_len, err; struct tcf_skbmod_params *p; u64 flags; tcf_lastuse_update(&d->tcf_tm); bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); action = READ_ONCE(d->tcf_action); if (unlikely(action == TC_ACT_SHOT)) goto drop; max_edit_len = skb_mac_header_len(skb); p = rcu_dereference_bh(d->skbmod_p); flags = p->flags; /* tcf_skbmod_init() guarantees "flags" to be one of the following: * 1. a combination of SKBMOD_F_{DMAC,SMAC,ETYPE} * 2. SKBMOD_F_SWAPMAC * 3. SKBMOD_F_ECN * SKBMOD_F_ECN only works with IP packets; all other flags only work with Ethernet * packets. */ if (flags == SKBMOD_F_ECN) { switch (skb_protocol(skb, true)) { case cpu_to_be16(ETH_P_IP): case cpu_to_be16(ETH_P_IPV6): max_edit_len += skb_network_header_len(skb); break; default: goto out; } } else if (!skb->dev || skb->dev->type != ARPHRD_ETHER) { goto out; } err = skb_ensure_writable(skb, max_edit_len); if (unlikely(err)) /* best policy is to drop on the floor */ goto drop; if (flags & SKBMOD_F_DMAC) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst); if (flags & SKBMOD_F_SMAC) ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src); if (flags & SKBMOD_F_ETYPE) eth_hdr(skb)->h_proto = p->eth_type; if (flags & SKBMOD_F_SWAPMAC) { u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */ /*XXX: I am sure we can come up with more efficient swapping*/ ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest); ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source); ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr); } if (flags & SKBMOD_F_ECN) INET_ECN_set_ce(skb); out: return action; drop: qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = { [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) }, [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN }, [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 }, }; static int tcf_skbmod_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); bool ovr = flags & TCA_ACT_FLAGS_REPLACE; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_SKBMOD_MAX + 1]; struct tcf_skbmod_params *p, *p_old; struct tcf_chain *goto_ch = NULL; struct tc_skbmod *parm; u32 lflags = 0, index; struct tcf_skbmod *d; bool exists = false; u8 *daddr = NULL; u8 *saddr = NULL; u16 eth_type = 0; int ret = 0, err; if (!nla) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla, skbmod_policy, NULL); if (err < 0) return err; if (!tb[TCA_SKBMOD_PARMS]) return -EINVAL; if (tb[TCA_SKBMOD_DMAC]) { daddr = nla_data(tb[TCA_SKBMOD_DMAC]); lflags |= SKBMOD_F_DMAC; } if (tb[TCA_SKBMOD_SMAC]) { saddr = nla_data(tb[TCA_SKBMOD_SMAC]); lflags |= SKBMOD_F_SMAC; } if (tb[TCA_SKBMOD_ETYPE]) { eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]); lflags |= SKBMOD_F_ETYPE; } parm = nla_data(tb[TCA_SKBMOD_PARMS]); index = parm->index; if (parm->flags & SKBMOD_F_SWAPMAC) lflags = SKBMOD_F_SWAPMAC; if (parm->flags & SKBMOD_F_ECN) lflags = SKBMOD_F_ECN; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!lflags) { if (exists) tcf_idr_release(*a, bind); else tcf_idr_cleanup(tn, index); return -EINVAL; } if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_skbmod_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!ovr) { tcf_idr_release(*a, bind); return -EEXIST; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; d = to_skbmod(*a); p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL); if (unlikely(!p)) { err = -ENOMEM; goto put_chain; } p->flags = lflags; if (ovr) spin_lock_bh(&d->tcf_lock); /* Protected by tcf_lock if overwriting existing action. */ goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); p_old = rcu_dereference_protected(d->skbmod_p, 1); if (lflags & SKBMOD_F_DMAC) ether_addr_copy(p->eth_dst, daddr); if (lflags & SKBMOD_F_SMAC) ether_addr_copy(p->eth_src, saddr); if (lflags & SKBMOD_F_ETYPE) p->eth_type = htons(eth_type); rcu_assign_pointer(d->skbmod_p, p); if (ovr) spin_unlock_bh(&d->tcf_lock); if (p_old) kfree_rcu(p_old, rcu); if (goto_ch) tcf_chain_put_by_act(goto_ch); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static void tcf_skbmod_cleanup(struct tc_action *a) { struct tcf_skbmod *d = to_skbmod(a); struct tcf_skbmod_params *p; p = rcu_dereference_protected(d->skbmod_p, 1); if (p) kfree_rcu(p, rcu); } static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { struct tcf_skbmod *d = to_skbmod(a); unsigned char *b = skb_tail_pointer(skb); struct tcf_skbmod_params *p; struct tc_skbmod opt; struct tcf_t t; memset(&opt, 0, sizeof(opt)); opt.index = d->tcf_index; opt.refcnt = refcount_read(&d->tcf_refcnt) - ref, opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind; spin_lock_bh(&d->tcf_lock); opt.action = d->tcf_action; p = rcu_dereference_protected(d->skbmod_p, lockdep_is_held(&d->tcf_lock)); opt.flags = p->flags; if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if ((p->flags & SKBMOD_F_DMAC) && nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst)) goto nla_put_failure; if ((p->flags & SKBMOD_F_SMAC) && nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src)) goto nla_put_failure; if ((p->flags & SKBMOD_F_ETYPE) && nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type))) goto nla_put_failure; tcf_tm_dump(&t, &d->tcf_tm); if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD)) goto nla_put_failure; spin_unlock_bh(&d->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&d->tcf_lock); nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_skbmod_ops = { .kind = "skbmod", .id = TCA_ACT_SKBMOD, .owner = THIS_MODULE, .act = tcf_skbmod_act, .dump = tcf_skbmod_dump, .init = tcf_skbmod_init, .cleanup = tcf_skbmod_cleanup, .size = sizeof(struct tcf_skbmod), }; MODULE_ALIAS_NET_ACT("skbmod"); static __net_init int skbmod_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_skbmod_ops.net_id); return tc_action_net_init(net, tn, &act_skbmod_ops); } static void __net_exit skbmod_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_skbmod_ops.net_id); } static struct pernet_operations skbmod_net_ops = { .init = skbmod_init_net, .exit_batch = skbmod_exit_net, .id = &act_skbmod_ops.net_id, .size = sizeof(struct tc_action_net), }; MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>"); MODULE_DESCRIPTION("SKB data mod-ing"); MODULE_LICENSE("GPL"); static int __init skbmod_init_module(void) { return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops); } static void __exit skbmod_cleanup_module(void) { tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops); } module_init(skbmod_init_module); module_exit(skbmod_cleanup_module);
151 151 153 152 133 131 47 47 133 81 9 9 80 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 // SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/base/map.c * * (C) Copyright Al Viro 2002,2003 * * NOTE: data structure needs to be changed. It works, but for large dev_t * it will be too slow. It is isolated, though, so these changes will be * local to that file. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/kdev_t.h> #include <linux/kobject.h> #include <linux/kobj_map.h> struct kobj_map { struct probe { struct probe *next; dev_t dev; unsigned long range; struct module *owner; kobj_probe_t *get; int (*lock)(dev_t, void *); void *data; } *probes[255]; struct mutex *lock; }; int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, struct module *module, kobj_probe_t *probe, int (*lock)(dev_t, void *), void *data) { unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; unsigned int index = MAJOR(dev); unsigned int i; struct probe *p; if (n > 255) n = 255; p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL); if (p == NULL) return -ENOMEM; for (i = 0; i < n; i++, p++) { p->owner = module; p->get = probe; p->lock = lock; p->dev = dev; p->range = range; p->data = data; } mutex_lock(domain->lock); for (i = 0, p -= n; i < n; i++, p++, index++) { struct probe **s = &domain->probes[index % 255]; while (*s && (*s)->range < range) s = &(*s)->next; p->next = *s; *s = p; } mutex_unlock(domain->lock); return 0; } void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) { unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; unsigned int index = MAJOR(dev); unsigned int i; struct probe *found = NULL; if (n > 255) n = 255; mutex_lock(domain->lock); for (i = 0; i < n; i++, index++) { struct probe **s; for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { struct probe *p = *s; if (p->dev == dev && p->range == range) { *s = p->next; if (!found) found = p; break; } } } mutex_unlock(domain->lock); kfree(found); } struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) { struct kobject *kobj; struct probe *p; unsigned long best = ~0UL; retry: mutex_lock(domain->lock); for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) { struct kobject *(*probe)(dev_t, int *, void *); struct module *owner; void *data; if (p->dev > dev || p->dev + p->range - 1 < dev) continue; if (p->range - 1 >= best) break; if (!try_module_get(p->owner)) continue; owner = p->owner; data = p->data; probe = p->get; best = p->range - 1; *index = dev - p->dev; if (p->lock && p->lock(dev, data) < 0) { module_put(owner); continue; } mutex_unlock(domain->lock); kobj = probe(dev, index, data); /* Currently ->owner protects _only_ ->probe() itself. */ module_put(owner); if (kobj) return kobj; goto retry; } mutex_unlock(domain->lock); return NULL; } struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock) { struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL); struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL); int i; if ((p == NULL) || (base == NULL)) { kfree(p); kfree(base); return NULL; } base->dev = 1; base->range = ~0; base->get = base_probe; for (i = 0; i < 255; i++) p->probes[i] = base; p->lock = lock; return p; }
45 45 33 1 1 2 1 1 1306 1255 48 18 1 2 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 // SPDX-License-Identifier: GPL-2.0-or-later /* * G8BPQ compatible "AX.25 via ethernet" driver release 004 * * This code REQUIRES 2.0.0 or higher/ NET3.029 * * This is a "pseudo" network driver to allow AX.25 over Ethernet * using G8BPQ encapsulation. It has been extracted from the protocol * implementation because * * - things got unreadable within the protocol stack * - to cure the protocol stack from "feature-ism" * - a protocol implementation shouldn't need to know on * which hardware it is running * - user-level programs like the AX.25 utilities shouldn't * need to know about the hardware. * - IP over ethernet encapsulated AX.25 was impossible * - rxecho.c did not work * - to have room for extensions * - it just deserves to "live" as an own driver * * This driver can use any ethernet destination address, and can be * limited to accept frames from one dedicated ethernet card only. * * Note that the driver sets up the BPQ devices automagically on * startup or (if started before the "insmod" of an ethernet device) * on "ifconfig up". It hopefully will remove the BPQ on "rmmod"ing * the ethernet device (in fact: as soon as another ethernet or bpq * device gets "ifconfig"ured). * * I have heard that several people are thinking of experiments * with highspeed packet radio using existing ethernet cards. * Well, this driver is prepared for this purpose, just add * your tx key control and a txdelay / tailtime algorithm, * probably some buffering, and /voila/... * * History * BPQ 001 Joerg(DL1BKE) Extracted BPQ code from AX.25 * protocol stack and added my own * yet existing patches * BPQ 002 Joerg(DL1BKE) Scan network device list on * startup. * BPQ 003 Joerg(DL1BKE) Ethernet destination address * and accepted source address * can be configured by an ioctl() * call. * Fixed to match Linux networking * changes - 2.1.15. * BPQ 004 Joerg(DL1BKE) Fixed to not lock up on ifconfig. */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <net/ip.h> #include <net/arp.h> #include <net/net_namespace.h> #include <linux/bpqether.h> static const char banner[] __initconst = KERN_INFO \ "AX.25: bpqether driver version 004\n"; static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int bpq_device_event(struct notifier_block *, unsigned long, void *); static struct packet_type bpq_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_BPQ), .func = bpq_rcv, }; static struct notifier_block bpq_dev_notifier = { .notifier_call = bpq_device_event, }; struct bpqdev { struct list_head bpq_list; /* list of bpq devices chain */ struct net_device *ethdev; /* link to ethernet device */ struct net_device *axdev; /* bpq device (bpq#) */ char dest_addr[6]; /* ether destination address */ char acpt_addr[6]; /* accept ether frames from this address only */ }; static LIST_HEAD(bpq_devices); /* * bpqether network devices are paired with ethernet devices below them, so * form a special "super class" of normal ethernet devices; split their locks * off into a separate class since they always nest. */ static struct lock_class_key bpq_netdev_xmit_lock_key; static struct lock_class_key bpq_netdev_addr_lock_key; static void bpq_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); } static void bpq_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); } /* ------------------------------------------------------------------------ */ /* * Get the ethernet device for a BPQ device */ static inline struct net_device *bpq_get_ether_dev(struct net_device *dev) { struct bpqdev *bpq = netdev_priv(dev); return bpq ? bpq->ethdev : NULL; } /* * Get the BPQ device for the ethernet device */ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev) { struct bpqdev *bpq; list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list, lockdep_rtnl_is_held()) { if (bpq->ethdev == dev) return bpq->axdev; } return NULL; } static inline int dev_is_ethdev(struct net_device *dev) { return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5); } /* ------------------------------------------------------------------------ */ /* * Receive an AX.25 frame via an ethernet interface. */ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { int len; char * ptr; struct ethhdr *eth; struct bpqdev *bpq; if (!net_eq(dev_net(dev), &init_net)) goto drop; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return NET_RX_DROP; if (!pskb_may_pull(skb, sizeof(struct ethhdr))) goto drop; rcu_read_lock(); dev = bpq_get_ax25_dev(dev); if (dev == NULL || !netif_running(dev)) goto drop_unlock; /* * if we want to accept frames from just one ethernet device * we check the source address of the sender. */ bpq = netdev_priv(dev); eth = eth_hdr(skb); if (!(bpq->acpt_addr[0] & 0x01) && !ether_addr_equal(eth->h_source, bpq->acpt_addr)) goto drop_unlock; if (skb_cow(skb, sizeof(struct ethhdr))) goto drop_unlock; len = skb->data[0] + skb->data[1] * 256 - 5; skb_pull(skb, 2); /* Remove the length bytes */ skb_trim(skb, len); /* Set the length of the data */ dev->stats.rx_packets++; dev->stats.rx_bytes += len; ptr = skb_push(skb, 1); *ptr = 0; skb->protocol = ax25_type_trans(skb, dev); netif_rx(skb); unlock: rcu_read_unlock(); return 0; drop_unlock: kfree_skb(skb); goto unlock; drop: kfree_skb(skb); return 0; } /* * Send an AX.25 frame via an ethernet interface */ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned char *ptr; struct bpqdev *bpq; struct net_device *orig_dev; int size; if (skb->protocol == htons(ETH_P_IP)) return ax25_ip_xmit(skb); /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. */ if (!netif_running(dev)) { kfree_skb(skb); return NETDEV_TX_OK; } skb_pull(skb, 1); /* Drop KISS byte */ size = skb->len; /* * We're about to mess with the skb which may still shared with the * generic networking code so unshare and ensure it's got enough * space for the BPQ headers. */ if (skb_cow(skb, AX25_BPQ_HEADER_LEN)) { if (net_ratelimit()) pr_err("bpqether: out of memory\n"); kfree_skb(skb); return NETDEV_TX_OK; } ptr = skb_push(skb, 2); /* Make space for length */ *ptr++ = (size + 5) % 256; *ptr++ = (size + 5) / 256; bpq = netdev_priv(dev); orig_dev = dev; if ((dev = bpq_get_ether_dev(dev)) == NULL) { orig_dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; } skb->protocol = ax25_type_trans(skb, dev); skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); dev->stats.tx_packets++; dev->stats.tx_bytes+=skb->len; dev_queue_xmit(skb); netif_wake_queue(dev); return NETDEV_TX_OK; } /* * Set AX.25 callsign */ static int bpq_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *sa = (struct sockaddr *)addr; dev_addr_set(dev, sa->sa_data); return 0; } /* Ioctl commands * * SIOCSBPQETHOPT reserved for enhancements * SIOCSBPQETHADDR set the destination and accepted * source ethernet address (broadcast * or multicast: accept all) */ static int bpq_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { struct bpq_ethaddr __user *ethaddr = data; struct bpqdev *bpq = netdev_priv(dev); struct bpq_req req; if (!capable(CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case SIOCSBPQETHOPT: if (copy_from_user(&req, data, sizeof(struct bpq_req))) return -EFAULT; switch (req.cmd) { case SIOCGBPQETHPARAM: case SIOCSBPQETHPARAM: default: return -EINVAL; } break; case SIOCSBPQETHADDR: if (copy_from_user(bpq->dest_addr, ethaddr->destination, ETH_ALEN)) return -EFAULT; if (copy_from_user(bpq->acpt_addr, ethaddr->accept, ETH_ALEN)) return -EFAULT; break; default: return -EINVAL; } return 0; } /* * open/close a device */ static int bpq_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int bpq_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS /* * Proc filesystem */ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { int i = 1; struct bpqdev *bpqdev; rcu_read_lock(); if (*pos == 0) return SEQ_START_TOKEN; list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) { if (i == *pos) return bpqdev; } return NULL; } static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct list_head *p; struct bpqdev *bpqdev = v; ++*pos; if (v == SEQ_START_TOKEN) p = rcu_dereference(list_next_rcu(&bpq_devices)); else p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list)); return (p == &bpq_devices) ? NULL : list_entry(p, struct bpqdev, bpq_list); } static void bpq_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int bpq_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "dev ether destination accept from\n"); else { const struct bpqdev *bpqdev = v; seq_printf(seq, "%-5s %-10s %pM ", bpqdev->axdev->name, bpqdev->ethdev->name, bpqdev->dest_addr); if (is_multicast_ether_addr(bpqdev->acpt_addr)) seq_printf(seq, "*\n"); else seq_printf(seq, "%pM\n", bpqdev->acpt_addr); } return 0; } static const struct seq_operations bpq_seqops = { .start = bpq_seq_start, .next = bpq_seq_next, .stop = bpq_seq_stop, .show = bpq_seq_show, }; #endif /* ------------------------------------------------------------------------ */ static const struct net_device_ops bpq_netdev_ops = { .ndo_open = bpq_open, .ndo_stop = bpq_close, .ndo_start_xmit = bpq_xmit, .ndo_set_mac_address = bpq_set_mac_address, .ndo_siocdevprivate = bpq_siocdevprivate, }; static void bpq_setup(struct net_device *dev) { dev->netdev_ops = &bpq_netdev_ops; dev->needs_free_netdev = true; dev->flags = 0; dev->features = NETIF_F_LLTX; /* Allow recursion */ #if IS_ENABLED(CONFIG_AX25) dev->header_ops = &ax25_header_ops; #endif dev->type = ARPHRD_AX25; dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN; dev->mtu = AX25_DEF_PACLEN; dev->addr_len = AX25_ADDR_LEN; memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN); dev_addr_set(dev, (u8 *)&ax25_defaddr); } /* * Setup a new device. */ static int bpq_new_device(struct net_device *edev) { int err; struct net_device *ndev; struct bpqdev *bpq; ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", NET_NAME_UNKNOWN, bpq_setup); if (!ndev) return -ENOMEM; bpq = netdev_priv(ndev); dev_hold(edev); bpq->ethdev = edev; bpq->axdev = ndev; eth_broadcast_addr(bpq->dest_addr); eth_broadcast_addr(bpq->acpt_addr); err = register_netdevice(ndev); if (err) goto error; bpq_set_lockdep_class(ndev); /* List protected by RTNL */ list_add_rcu(&bpq->bpq_list, &bpq_devices); return 0; error: dev_put(edev); free_netdev(ndev); return err; } static void bpq_free_device(struct net_device *ndev) { struct bpqdev *bpq = netdev_priv(ndev); dev_put(bpq->ethdev); list_del_rcu(&bpq->bpq_list); unregister_netdevice(ndev); } /* * Handle device status changes. */ static int bpq_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UP: /* new ethernet device -> new BPQ interface */ if (bpq_get_ax25_dev(dev) == NULL) bpq_new_device(dev); break; case NETDEV_DOWN: /* ethernet device closed -> close BPQ interface */ if ((dev = bpq_get_ax25_dev(dev)) != NULL) dev_close(dev); break; case NETDEV_UNREGISTER: /* ethernet device removed -> free BPQ interface */ if ((dev = bpq_get_ax25_dev(dev)) != NULL) bpq_free_device(dev); break; default: break; } return NOTIFY_DONE; } /* ------------------------------------------------------------------------ */ /* * Initialize driver. To be called from af_ax25 if not compiled as a * module */ static int __init bpq_init_driver(void) { #ifdef CONFIG_PROC_FS if (!proc_create_seq("bpqether", 0444, init_net.proc_net, &bpq_seqops)) { printk(KERN_ERR "bpq: cannot create /proc/net/bpqether entry.\n"); return -ENOENT; } #endif /* CONFIG_PROC_FS */ dev_add_pack(&bpq_packet_type); register_netdevice_notifier(&bpq_dev_notifier); printk(banner); return 0; } static void __exit bpq_cleanup_driver(void) { struct bpqdev *bpq; dev_remove_pack(&bpq_packet_type); unregister_netdevice_notifier(&bpq_dev_notifier); remove_proc_entry("bpqether", init_net.proc_net); rtnl_lock(); while (!list_empty(&bpq_devices)) { bpq = list_entry(bpq_devices.next, struct bpqdev, bpq_list); bpq_free_device(bpq->axdev); } rtnl_unlock(); } MODULE_AUTHOR("Joerg Reuter DL1BKE <jreuter@yaina.de>"); MODULE_DESCRIPTION("Transmit and receive AX.25 packets over Ethernet"); MODULE_LICENSE("GPL"); module_init(bpq_init_driver); module_exit(bpq_cleanup_driver);
3 3 2 3 2 2 2 3 3 3 3 3 2 2 2 1 1 1 3 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/export.h> #include <drm/drm_bridge.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_internal.h" /** * DOC: overview * * Encoders represent the connecting element between the CRTC (as the overall * pixel pipeline, represented by &struct drm_crtc) and the connectors (as the * generic sink entity, represented by &struct drm_connector). An encoder takes * pixel data from a CRTC and converts it to a format suitable for any attached * connector. Encoders are objects exposed to userspace, originally to allow * userspace to infer cloning and connector/CRTC restrictions. Unfortunately * almost all drivers get this wrong, making the uabi pretty much useless. On * top of that the exposed restrictions are too simple for today's hardware, and * the recommended way to infer restrictions is by using the * DRM_MODE_ATOMIC_TEST_ONLY flag for the atomic IOCTL. * * Otherwise encoders aren't used in the uapi at all (any modeset request from * userspace directly connects a connector with a CRTC), drivers are therefore * free to use them however they wish. Modeset helper libraries make strong use * of encoders to facilitate code sharing. But for more complex settings it is * usually better to move shared code into a separate &drm_bridge. Compared to * encoders, bridges also have the benefit of being purely an internal * abstraction since they are not exposed to userspace at all. * * Encoders are initialized with drm_encoder_init() and cleaned up using * drm_encoder_cleanup(). */ static const struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, { DRM_MODE_ENCODER_DSI, "DSI" }, { DRM_MODE_ENCODER_DPMST, "DP MST" }, { DRM_MODE_ENCODER_DPI, "DPI" }, }; int drm_encoder_register_all(struct drm_device *dev) { struct drm_encoder *encoder; int ret = 0; drm_for_each_encoder(encoder, dev) { drm_debugfs_encoder_add(encoder); if (encoder->funcs && encoder->funcs->late_register) ret = encoder->funcs->late_register(encoder); if (ret) return ret; } return 0; } void drm_encoder_unregister_all(struct drm_device *dev) { struct drm_encoder *encoder; drm_for_each_encoder(encoder, dev) { if (encoder->funcs && encoder->funcs->early_unregister) encoder->funcs->early_unregister(encoder); drm_debugfs_encoder_remove(encoder); } } __printf(5, 0) static int __drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, va_list ap) { int ret; /* encoder index is used with 32bit bitmasks */ if (WARN_ON(dev->mode_config.num_encoder >= 32)) return -EINVAL; ret = drm_mode_object_add(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); if (ret) return ret; encoder->dev = dev; encoder->encoder_type = encoder_type; encoder->funcs = funcs; if (name) { encoder->name = kvasprintf(GFP_KERNEL, name, ap); } else { encoder->name = kasprintf(GFP_KERNEL, "%s-%d", drm_encoder_enum_list[encoder_type].name, encoder->base.id); } if (!encoder->name) { ret = -ENOMEM; goto out_put; } INIT_LIST_HEAD(&encoder->bridge_chain); list_add_tail(&encoder->head, &dev->mode_config.encoder_list); encoder->index = dev->mode_config.num_encoder++; out_put: if (ret) drm_mode_object_unregister(dev, &encoder->base); return ret; } /** * drm_encoder_init - Init a preallocated encoder * @dev: drm device * @encoder: the encoder to init * @funcs: callbacks for this encoder * @encoder_type: user visible type of the encoder * @name: printf style format string for the encoder name, or NULL for default name * * Initializes a preallocated encoder. Encoder should be subclassed as part of * driver encoder objects. At driver unload time the driver's * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree() * the encoder structure. The encoder structure should not be allocated with * devm_kzalloc(). * * Note: consider using drmm_encoder_alloc() or drmm_encoder_init() * instead of drm_encoder_init() to let the DRM managed resource * infrastructure take care of cleanup and deallocation. * * Returns: * Zero on success, error code on failure. */ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...) { va_list ap; int ret; WARN_ON(!funcs->destroy); va_start(ap, name); ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap); va_end(ap); return ret; } EXPORT_SYMBOL(drm_encoder_init); /** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * * Cleans up the encoder but doesn't free the object. */ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_bridge *bridge, *next; /* Note that the encoder_list is considered to be static; should we * remove the drm_encoder at runtime we would have to decrement all * the indices on the drm_encoder after us in the encoder_list. */ list_for_each_entry_safe(bridge, next, &encoder->bridge_chain, chain_node) drm_bridge_detach(bridge); drm_mode_object_unregister(dev, &encoder->base); kfree(encoder->name); list_del(&encoder->head); dev->mode_config.num_encoder--; memset(encoder, 0, sizeof(*encoder)); } EXPORT_SYMBOL(drm_encoder_cleanup); static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr) { struct drm_encoder *encoder = ptr; if (WARN_ON(!encoder->dev)) return; drm_encoder_cleanup(encoder); } __printf(5, 0) static int __drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, va_list args) { int ret; if (drm_WARN_ON(dev, funcs && funcs->destroy)) return -EINVAL; ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, args); if (ret) return ret; ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder); if (ret) return ret; return 0; } void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...) { void *container; struct drm_encoder *encoder; va_list ap; int ret; container = drmm_kzalloc(dev, size, GFP_KERNEL); if (!container) return ERR_PTR(-ENOMEM); encoder = container + offset; va_start(ap, name); ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap); va_end(ap); if (ret) return ERR_PTR(ret); return container; } EXPORT_SYMBOL(__drmm_encoder_alloc); /** * drmm_encoder_init - Initialize a preallocated encoder * @dev: drm device * @encoder: the encoder to init * @funcs: callbacks for this encoder (optional) * @encoder_type: user visible type of the encoder * @name: printf style format string for the encoder name, or NULL for default name * * Initializes a preallocated encoder. Encoder should be subclassed as * part of driver encoder objects. Cleanup is automatically handled * through registering drm_encoder_cleanup() with drmm_add_action(). The * encoder structure should be allocated with drmm_kzalloc(). * * The @drm_encoder_funcs.destroy hook must be NULL. * * Returns: * Zero on success, error code on failure. */ int drmm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...) { va_list ap; int ret; va_start(ap, name); ret = __drmm_encoder_init(dev, encoder, funcs, encoder_type, name, ap); va_end(ap); if (ret) return ret; return 0; } EXPORT_SYMBOL(drmm_encoder_init); static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; bool uses_atomic = false; struct drm_connector_list_iter conn_iter; /* For atomic drivers only state objects are synchronously updated and * protected by modeset locks, so check those first. */ drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->state) continue; uses_atomic = true; if (connector->state->best_encoder != encoder) continue; drm_connector_list_iter_end(&conn_iter); return connector->state->crtc; } drm_connector_list_iter_end(&conn_iter); /* Don't return stale data (e.g. pending async disable). */ if (uses_atomic) return NULL; return encoder->crtc; } int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_encoder *encoder; struct drm_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; encoder = drm_encoder_find(dev, file_priv, enc_resp->encoder_id); if (!encoder) return -ENOENT; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); crtc = drm_encoder_get_crtc(encoder); if (crtc && drm_lease_held(file_priv, crtc->base.id)) enc_resp->crtc_id = crtc->base.id; else enc_resp->crtc_id = 0; drm_modeset_unlock(&dev->mode_config.connection_mutex); enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = drm_lease_filter_crtcs(file_priv, encoder->possible_crtcs); enc_resp->possible_clones = encoder->possible_clones; return 0; }
4 4 3 2 2 5 5 800 801 8 12 10 5 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 // SPDX-License-Identifier: GPL-2.0-only #include <linux/ethtool_netlink.h> #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/rtnetlink.h> #include <linux/ptp_clock_kernel.h> #include "common.h" const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { [NETIF_F_SG_BIT] = "tx-scatter-gather", [NETIF_F_IP_CSUM_BIT] = "tx-checksum-ipv4", [NETIF_F_HW_CSUM_BIT] = "tx-checksum-ip-generic", [NETIF_F_IPV6_CSUM_BIT] = "tx-checksum-ipv6", [NETIF_F_HIGHDMA_BIT] = "highdma", [NETIF_F_FRAGLIST_BIT] = "tx-scatter-gather-fraglist", [NETIF_F_HW_VLAN_CTAG_TX_BIT] = "tx-vlan-hw-insert", [NETIF_F_HW_VLAN_CTAG_RX_BIT] = "rx-vlan-hw-parse", [NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter", [NETIF_F_HW_VLAN_STAG_TX_BIT] = "tx-vlan-stag-hw-insert", [NETIF_F_HW_VLAN_STAG_RX_BIT] = "rx-vlan-stag-hw-parse", [NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter", [NETIF_F_VLAN_CHALLENGED_BIT] = "vlan-challenged", [NETIF_F_GSO_BIT] = "tx-generic-segmentation", [NETIF_F_LLTX_BIT] = "tx-lockless", [NETIF_F_NETNS_LOCAL_BIT] = "netns-local", [NETIF_F_GRO_BIT] = "rx-gro", [NETIF_F_GRO_HW_BIT] = "rx-gro-hw", [NETIF_F_LRO_BIT] = "rx-lro", [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation", [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", [NETIF_F_GSO_GRE_CSUM_BIT] = "tx-gre-csum-segmentation", [NETIF_F_GSO_IPXIP4_BIT] = "tx-ipxip4-segmentation", [NETIF_F_GSO_IPXIP6_BIT] = "tx-ipxip6-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation", [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation", [NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation", [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", [NETIF_F_FCOE_MTU_BIT] = "fcoe-mtu", [NETIF_F_NTUPLE_BIT] = "rx-ntuple-filter", [NETIF_F_RXHASH_BIT] = "rx-hashing", [NETIF_F_RXCSUM_BIT] = "rx-checksum", [NETIF_F_NOCACHE_COPY_BIT] = "tx-nocache-copy", [NETIF_F_LOOPBACK_BIT] = "loopback", [NETIF_F_RXFCS_BIT] = "rx-fcs", [NETIF_F_RXALL_BIT] = "rx-all", [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", [NETIF_F_HW_TC_BIT] = "hw-tc-offload", [NETIF_F_HW_ESP_BIT] = "esp-hw-offload", [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", [NETIF_F_HW_TLS_RECORD_BIT] = "tls-hw-record", [NETIF_F_HW_TLS_TX_BIT] = "tls-hw-tx-offload", [NETIF_F_HW_TLS_RX_BIT] = "tls-hw-rx-offload", [NETIF_F_GRO_FRAGLIST_BIT] = "rx-gro-list", [NETIF_F_HW_MACSEC_BIT] = "macsec-hw-offload", [NETIF_F_GRO_UDP_FWD_BIT] = "rx-udp-gro-forwarding", [NETIF_F_HW_HSR_TAG_INS_BIT] = "hsr-tag-ins-offload", [NETIF_F_HW_HSR_TAG_RM_BIT] = "hsr-tag-rm-offload", [NETIF_F_HW_HSR_FWD_BIT] = "hsr-fwd-offload", [NETIF_F_HW_HSR_DUP_BIT] = "hsr-dup-offload", }; const char rss_hash_func_strings[ETH_RSS_HASH_FUNCS_COUNT][ETH_GSTRING_LEN] = { [ETH_RSS_HASH_TOP_BIT] = "toeplitz", [ETH_RSS_HASH_XOR_BIT] = "xor", [ETH_RSS_HASH_CRC32_BIT] = "crc32", }; const char tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = { [ETHTOOL_ID_UNSPEC] = "Unspec", [ETHTOOL_RX_COPYBREAK] = "rx-copybreak", [ETHTOOL_TX_COPYBREAK] = "tx-copybreak", [ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout", [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size", }; const char phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = { [ETHTOOL_ID_UNSPEC] = "Unspec", [ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift", [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down", [ETHTOOL_PHY_EDPD] = "phy-energy-detect-power-down", }; #define __LINK_MODE_NAME(speed, type, duplex) \ #speed "base" #type "/" #duplex #define __DEFINE_LINK_MODE_NAME(speed, type, duplex) \ [ETHTOOL_LINK_MODE(speed, type, duplex)] = \ __LINK_MODE_NAME(speed, type, duplex) #define __DEFINE_SPECIAL_MODE_NAME(_mode, _name) \ [ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = _name const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(10, T, Half), __DEFINE_LINK_MODE_NAME(10, T, Full), __DEFINE_LINK_MODE_NAME(100, T, Half), __DEFINE_LINK_MODE_NAME(100, T, Full), __DEFINE_LINK_MODE_NAME(1000, T, Half), __DEFINE_LINK_MODE_NAME(1000, T, Full), __DEFINE_SPECIAL_MODE_NAME(Autoneg, "Autoneg"), __DEFINE_SPECIAL_MODE_NAME(TP, "TP"), __DEFINE_SPECIAL_MODE_NAME(AUI, "AUI"), __DEFINE_SPECIAL_MODE_NAME(MII, "MII"), __DEFINE_SPECIAL_MODE_NAME(FIBRE, "FIBRE"), __DEFINE_SPECIAL_MODE_NAME(BNC, "BNC"), __DEFINE_LINK_MODE_NAME(10000, T, Full), __DEFINE_SPECIAL_MODE_NAME(Pause, "Pause"), __DEFINE_SPECIAL_MODE_NAME(Asym_Pause, "Asym_Pause"), __DEFINE_LINK_MODE_NAME(2500, X, Full), __DEFINE_SPECIAL_MODE_NAME(Backplane, "Backplane"), __DEFINE_LINK_MODE_NAME(1000, KX, Full), __DEFINE_LINK_MODE_NAME(10000, KX4, Full), __DEFINE_LINK_MODE_NAME(10000, KR, Full), __DEFINE_SPECIAL_MODE_NAME(10000baseR_FEC, "10000baseR_FEC"), __DEFINE_LINK_MODE_NAME(20000, MLD2, Full), __DEFINE_LINK_MODE_NAME(20000, KR2, Full), __DEFINE_LINK_MODE_NAME(40000, KR4, Full), __DEFINE_LINK_MODE_NAME(40000, CR4, Full), __DEFINE_LINK_MODE_NAME(40000, SR4, Full), __DEFINE_LINK_MODE_NAME(40000, LR4, Full), __DEFINE_LINK_MODE_NAME(56000, KR4, Full), __DEFINE_LINK_MODE_NAME(56000, CR4, Full), __DEFINE_LINK_MODE_NAME(56000, SR4, Full), __DEFINE_LINK_MODE_NAME(56000, LR4, Full), __DEFINE_LINK_MODE_NAME(25000, CR, Full), __DEFINE_LINK_MODE_NAME(25000, KR, Full), __DEFINE_LINK_MODE_NAME(25000, SR, Full), __DEFINE_LINK_MODE_NAME(50000, CR2, Full), __DEFINE_LINK_MODE_NAME(50000, KR2, Full), __DEFINE_LINK_MODE_NAME(100000, KR4, Full), __DEFINE_LINK_MODE_NAME(100000, SR4, Full), __DEFINE_LINK_MODE_NAME(100000, CR4, Full), __DEFINE_LINK_MODE_NAME(100000, LR4_ER4, Full), __DEFINE_LINK_MODE_NAME(50000, SR2, Full), __DEFINE_LINK_MODE_NAME(1000, X, Full), __DEFINE_LINK_MODE_NAME(10000, CR, Full), __DEFINE_LINK_MODE_NAME(10000, SR, Full), __DEFINE_LINK_MODE_NAME(10000, LR, Full), __DEFINE_LINK_MODE_NAME(10000, LRM, Full), __DEFINE_LINK_MODE_NAME(10000, ER, Full), __DEFINE_LINK_MODE_NAME(2500, T, Full), __DEFINE_LINK_MODE_NAME(5000, T, Full), __DEFINE_SPECIAL_MODE_NAME(FEC_NONE, "None"), __DEFINE_SPECIAL_MODE_NAME(FEC_RS, "RS"), __DEFINE_SPECIAL_MODE_NAME(FEC_BASER, "BASER"), __DEFINE_LINK_MODE_NAME(50000, KR, Full), __DEFINE_LINK_MODE_NAME(50000, SR, Full), __DEFINE_LINK_MODE_NAME(50000, CR, Full), __DEFINE_LINK_MODE_NAME(50000, LR_ER_FR, Full), __DEFINE_LINK_MODE_NAME(50000, DR, Full), __DEFINE_LINK_MODE_NAME(100000, KR2, Full), __DEFINE_LINK_MODE_NAME(100000, SR2, Full), __DEFINE_LINK_MODE_NAME(100000, CR2, Full), __DEFINE_LINK_MODE_NAME(100000, LR2_ER2_FR2, Full), __DEFINE_LINK_MODE_NAME(100000, DR2, Full), __DEFINE_LINK_MODE_NAME(200000, KR4, Full), __DEFINE_LINK_MODE_NAME(200000, SR4, Full), __DEFINE_LINK_MODE_NAME(200000, LR4_ER4_FR4, Full), __DEFINE_LINK_MODE_NAME(200000, DR4, Full), __DEFINE_LINK_MODE_NAME(200000, CR4, Full), __DEFINE_LINK_MODE_NAME(100, T1, Full), __DEFINE_LINK_MODE_NAME(1000, T1, Full), __DEFINE_LINK_MODE_NAME(400000, KR8, Full), __DEFINE_LINK_MODE_NAME(400000, SR8, Full), __DEFINE_LINK_MODE_NAME(400000, LR8_ER8_FR8, Full), __DEFINE_LINK_MODE_NAME(400000, DR8, Full), __DEFINE_LINK_MODE_NAME(400000, CR8, Full), __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"), __DEFINE_LINK_MODE_NAME(100000, KR, Full), __DEFINE_LINK_MODE_NAME(100000, SR, Full), __DEFINE_LINK_MODE_NAME(100000, LR_ER_FR, Full), __DEFINE_LINK_MODE_NAME(100000, DR, Full), __DEFINE_LINK_MODE_NAME(100000, CR, Full), __DEFINE_LINK_MODE_NAME(200000, KR2, Full), __DEFINE_LINK_MODE_NAME(200000, SR2, Full), __DEFINE_LINK_MODE_NAME(200000, LR2_ER2_FR2, Full), __DEFINE_LINK_MODE_NAME(200000, DR2, Full), __DEFINE_LINK_MODE_NAME(200000, CR2, Full), __DEFINE_LINK_MODE_NAME(400000, KR4, Full), __DEFINE_LINK_MODE_NAME(400000, SR4, Full), __DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full), __DEFINE_LINK_MODE_NAME(400000, DR4, Full), __DEFINE_LINK_MODE_NAME(400000, CR4, Full), __DEFINE_LINK_MODE_NAME(100, FX, Half), __DEFINE_LINK_MODE_NAME(100, FX, Full), __DEFINE_LINK_MODE_NAME(10, T1L, Full), __DEFINE_LINK_MODE_NAME(800000, CR8, Full), __DEFINE_LINK_MODE_NAME(800000, KR8, Full), __DEFINE_LINK_MODE_NAME(800000, DR8, Full), __DEFINE_LINK_MODE_NAME(800000, DR8_2, Full), __DEFINE_LINK_MODE_NAME(800000, SR8, Full), __DEFINE_LINK_MODE_NAME(800000, VR8, Full), __DEFINE_LINK_MODE_NAME(10, T1S, Full), __DEFINE_LINK_MODE_NAME(10, T1S, Half), __DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_CR 1 #define __LINK_MODE_LANES_CR2 2 #define __LINK_MODE_LANES_CR4 4 #define __LINK_MODE_LANES_CR8 8 #define __LINK_MODE_LANES_DR 1 #define __LINK_MODE_LANES_DR2 2 #define __LINK_MODE_LANES_DR4 4 #define __LINK_MODE_LANES_DR8 8 #define __LINK_MODE_LANES_KR 1 #define __LINK_MODE_LANES_KR2 2 #define __LINK_MODE_LANES_KR4 4 #define __LINK_MODE_LANES_KR8 8 #define __LINK_MODE_LANES_SR 1 #define __LINK_MODE_LANES_SR2 2 #define __LINK_MODE_LANES_SR4 4 #define __LINK_MODE_LANES_SR8 8 #define __LINK_MODE_LANES_ER 1 #define __LINK_MODE_LANES_KX 1 #define __LINK_MODE_LANES_KX4 4 #define __LINK_MODE_LANES_LR 1 #define __LINK_MODE_LANES_LR4 4 #define __LINK_MODE_LANES_LR4_ER4 4 #define __LINK_MODE_LANES_LR_ER_FR 1 #define __LINK_MODE_LANES_LR2_ER2_FR2 2 #define __LINK_MODE_LANES_LR4_ER4_FR4 4 #define __LINK_MODE_LANES_LR8_ER8_FR8 8 #define __LINK_MODE_LANES_LRM 1 #define __LINK_MODE_LANES_MLD2 2 #define __LINK_MODE_LANES_T 1 #define __LINK_MODE_LANES_T1 1 #define __LINK_MODE_LANES_X 1 #define __LINK_MODE_LANES_FX 1 #define __LINK_MODE_LANES_T1L 1 #define __LINK_MODE_LANES_T1S 1 #define __LINK_MODE_LANES_T1S_P2MP 1 #define __LINK_MODE_LANES_VR8 8 #define __LINK_MODE_LANES_DR8_2 8 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ .speed = SPEED_ ## _speed, \ .lanes = __LINK_MODE_LANES_ ## _type, \ .duplex = __DUPLEX_ ## _duplex \ } #define __DUPLEX_Half DUPLEX_HALF #define __DUPLEX_Full DUPLEX_FULL #define __DEFINE_SPECIAL_MODE_PARAMS(_mode) \ [ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = { \ .speed = SPEED_UNKNOWN, \ .lanes = 0, \ .duplex = DUPLEX_UNKNOWN, \ } const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(10, T, Half), __DEFINE_LINK_MODE_PARAMS(10, T, Full), __DEFINE_LINK_MODE_PARAMS(100, T, Half), __DEFINE_LINK_MODE_PARAMS(100, T, Full), __DEFINE_LINK_MODE_PARAMS(1000, T, Half), __DEFINE_LINK_MODE_PARAMS(1000, T, Full), __DEFINE_SPECIAL_MODE_PARAMS(Autoneg), __DEFINE_SPECIAL_MODE_PARAMS(TP), __DEFINE_SPECIAL_MODE_PARAMS(AUI), __DEFINE_SPECIAL_MODE_PARAMS(MII), __DEFINE_SPECIAL_MODE_PARAMS(FIBRE), __DEFINE_SPECIAL_MODE_PARAMS(BNC), __DEFINE_LINK_MODE_PARAMS(10000, T, Full), __DEFINE_SPECIAL_MODE_PARAMS(Pause), __DEFINE_SPECIAL_MODE_PARAMS(Asym_Pause), __DEFINE_LINK_MODE_PARAMS(2500, X, Full), __DEFINE_SPECIAL_MODE_PARAMS(Backplane), __DEFINE_LINK_MODE_PARAMS(1000, KX, Full), __DEFINE_LINK_MODE_PARAMS(10000, KX4, Full), __DEFINE_LINK_MODE_PARAMS(10000, KR, Full), [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = { .speed = SPEED_10000, .lanes = 1, .duplex = DUPLEX_FULL, }, __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full), __DEFINE_LINK_MODE_PARAMS(20000, KR2, Full), __DEFINE_LINK_MODE_PARAMS(40000, KR4, Full), __DEFINE_LINK_MODE_PARAMS(40000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(40000, SR4, Full), __DEFINE_LINK_MODE_PARAMS(40000, LR4, Full), __DEFINE_LINK_MODE_PARAMS(56000, KR4, Full), __DEFINE_LINK_MODE_PARAMS(56000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(56000, SR4, Full), __DEFINE_LINK_MODE_PARAMS(56000, LR4, Full), __DEFINE_LINK_MODE_PARAMS(25000, CR, Full), __DEFINE_LINK_MODE_PARAMS(25000, KR, Full), __DEFINE_LINK_MODE_PARAMS(25000, SR, Full), __DEFINE_LINK_MODE_PARAMS(50000, CR2, Full), __DEFINE_LINK_MODE_PARAMS(50000, KR2, Full), __DEFINE_LINK_MODE_PARAMS(100000, KR4, Full), __DEFINE_LINK_MODE_PARAMS(100000, SR4, Full), __DEFINE_LINK_MODE_PARAMS(100000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(100000, LR4_ER4, Full), __DEFINE_LINK_MODE_PARAMS(50000, SR2, Full), __DEFINE_LINK_MODE_PARAMS(1000, X, Full), __DEFINE_LINK_MODE_PARAMS(10000, CR, Full), __DEFINE_LINK_MODE_PARAMS(10000, SR, Full), __DEFINE_LINK_MODE_PARAMS(10000, LR, Full), __DEFINE_LINK_MODE_PARAMS(10000, LRM, Full), __DEFINE_LINK_MODE_PARAMS(10000, ER, Full), __DEFINE_LINK_MODE_PARAMS(2500, T, Full), __DEFINE_LINK_MODE_PARAMS(5000, T, Full), __DEFINE_SPECIAL_MODE_PARAMS(FEC_NONE), __DEFINE_SPECIAL_MODE_PARAMS(FEC_RS), __DEFINE_SPECIAL_MODE_PARAMS(FEC_BASER), __DEFINE_LINK_MODE_PARAMS(50000, KR, Full), __DEFINE_LINK_MODE_PARAMS(50000, SR, Full), __DEFINE_LINK_MODE_PARAMS(50000, CR, Full), __DEFINE_LINK_MODE_PARAMS(50000, LR_ER_FR, Full), __DEFINE_LINK_MODE_PARAMS(50000, DR, Full), __DEFINE_LINK_MODE_PARAMS(100000, KR2, Full), __DEFINE_LINK_MODE_PARAMS(100000, SR2, Full), __DEFINE_LINK_MODE_PARAMS(100000, CR2, Full), __DEFINE_LINK_MODE_PARAMS(100000, LR2_ER2_FR2, Full), __DEFINE_LINK_MODE_PARAMS(100000, DR2, Full), __DEFINE_LINK_MODE_PARAMS(200000, KR4, Full), __DEFINE_LINK_MODE_PARAMS(200000, SR4, Full), __DEFINE_LINK_MODE_PARAMS(200000, LR4_ER4_FR4, Full), __DEFINE_LINK_MODE_PARAMS(200000, DR4, Full), __DEFINE_LINK_MODE_PARAMS(200000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(100, T1, Full), __DEFINE_LINK_MODE_PARAMS(1000, T1, Full), __DEFINE_LINK_MODE_PARAMS(400000, KR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, SR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, LR8_ER8_FR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full), __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS), __DEFINE_LINK_MODE_PARAMS(100000, KR, Full), __DEFINE_LINK_MODE_PARAMS(100000, SR, Full), __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full), __DEFINE_LINK_MODE_PARAMS(100000, DR, Full), __DEFINE_LINK_MODE_PARAMS(100000, CR, Full), __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full), __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full), __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full), __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full), __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full), __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full), __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full), __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full), __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full), __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), __DEFINE_LINK_MODE_PARAMS(100, FX, Half), __DEFINE_LINK_MODE_PARAMS(100, FX, Full), __DEFINE_LINK_MODE_PARAMS(10, T1L, Full), __DEFINE_LINK_MODE_PARAMS(800000, CR8, Full), __DEFINE_LINK_MODE_PARAMS(800000, KR8, Full), __DEFINE_LINK_MODE_PARAMS(800000, DR8, Full), __DEFINE_LINK_MODE_PARAMS(800000, DR8_2, Full), __DEFINE_LINK_MODE_PARAMS(800000, SR8, Full), __DEFINE_LINK_MODE_PARAMS(800000, VR8, Full), __DEFINE_LINK_MODE_PARAMS(10, T1S, Full), __DEFINE_LINK_MODE_PARAMS(10, T1S, Half), __DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); const char netif_msg_class_names[][ETH_GSTRING_LEN] = { [NETIF_MSG_DRV_BIT] = "drv", [NETIF_MSG_PROBE_BIT] = "probe", [NETIF_MSG_LINK_BIT] = "link", [NETIF_MSG_TIMER_BIT] = "timer", [NETIF_MSG_IFDOWN_BIT] = "ifdown", [NETIF_MSG_IFUP_BIT] = "ifup", [NETIF_MSG_RX_ERR_BIT] = "rx_err", [NETIF_MSG_TX_ERR_BIT] = "tx_err", [NETIF_MSG_TX_QUEUED_BIT] = "tx_queued", [NETIF_MSG_INTR_BIT] = "intr", [NETIF_MSG_TX_DONE_BIT] = "tx_done", [NETIF_MSG_RX_STATUS_BIT] = "rx_status", [NETIF_MSG_PKTDATA_BIT] = "pktdata", [NETIF_MSG_HW_BIT] = "hw", [NETIF_MSG_WOL_BIT] = "wol", }; static_assert(ARRAY_SIZE(netif_msg_class_names) == NETIF_MSG_CLASS_COUNT); const char wol_mode_names[][ETH_GSTRING_LEN] = { [const_ilog2(WAKE_PHY)] = "phy", [const_ilog2(WAKE_UCAST)] = "ucast", [const_ilog2(WAKE_MCAST)] = "mcast", [const_ilog2(WAKE_BCAST)] = "bcast", [const_ilog2(WAKE_ARP)] = "arp", [const_ilog2(WAKE_MAGIC)] = "magic", [const_ilog2(WAKE_MAGICSECURE)] = "magicsecure", [const_ilog2(WAKE_FILTER)] = "filter", }; static_assert(ARRAY_SIZE(wol_mode_names) == WOL_MODE_COUNT); const char sof_timestamping_names[][ETH_GSTRING_LEN] = { [const_ilog2(SOF_TIMESTAMPING_TX_HARDWARE)] = "hardware-transmit", [const_ilog2(SOF_TIMESTAMPING_TX_SOFTWARE)] = "software-transmit", [const_ilog2(SOF_TIMESTAMPING_RX_HARDWARE)] = "hardware-receive", [const_ilog2(SOF_TIMESTAMPING_RX_SOFTWARE)] = "software-receive", [const_ilog2(SOF_TIMESTAMPING_SOFTWARE)] = "software-system-clock", [const_ilog2(SOF_TIMESTAMPING_SYS_HARDWARE)] = "hardware-legacy-clock", [const_ilog2(SOF_TIMESTAMPING_RAW_HARDWARE)] = "hardware-raw-clock", [const_ilog2(SOF_TIMESTAMPING_OPT_ID)] = "option-id", [const_ilog2(SOF_TIMESTAMPING_TX_SCHED)] = "sched-transmit", [const_ilog2(SOF_TIMESTAMPING_TX_ACK)] = "ack-transmit", [const_ilog2(SOF_TIMESTAMPING_OPT_CMSG)] = "option-cmsg", [const_ilog2(SOF_TIMESTAMPING_OPT_TSONLY)] = "option-tsonly", [const_ilog2(SOF_TIMESTAMPING_OPT_STATS)] = "option-stats", [const_ilog2(SOF_TIMESTAMPING_OPT_PKTINFO)] = "option-pktinfo", [const_ilog2(SOF_TIMESTAMPING_OPT_TX_SWHW)] = "option-tx-swhw", [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc", [const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp", }; static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT); const char ts_tx_type_names[][ETH_GSTRING_LEN] = { [HWTSTAMP_TX_OFF] = "off", [HWTSTAMP_TX_ON] = "on", [HWTSTAMP_TX_ONESTEP_SYNC] = "onestep-sync", [HWTSTAMP_TX_ONESTEP_P2P] = "onestep-p2p", }; static_assert(ARRAY_SIZE(ts_tx_type_names) == __HWTSTAMP_TX_CNT); const char ts_rx_filter_names[][ETH_GSTRING_LEN] = { [HWTSTAMP_FILTER_NONE] = "none", [HWTSTAMP_FILTER_ALL] = "all", [HWTSTAMP_FILTER_SOME] = "some", [HWTSTAMP_FILTER_PTP_V1_L4_EVENT] = "ptpv1-l4-event", [HWTSTAMP_FILTER_PTP_V1_L4_SYNC] = "ptpv1-l4-sync", [HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ] = "ptpv1-l4-delay-req", [HWTSTAMP_FILTER_PTP_V2_L4_EVENT] = "ptpv2-l4-event", [HWTSTAMP_FILTER_PTP_V2_L4_SYNC] = "ptpv2-l4-sync", [HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ] = "ptpv2-l4-delay-req", [HWTSTAMP_FILTER_PTP_V2_L2_EVENT] = "ptpv2-l2-event", [HWTSTAMP_FILTER_PTP_V2_L2_SYNC] = "ptpv2-l2-sync", [HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ] = "ptpv2-l2-delay-req", [HWTSTAMP_FILTER_PTP_V2_EVENT] = "ptpv2-event", [HWTSTAMP_FILTER_PTP_V2_SYNC] = "ptpv2-sync", [HWTSTAMP_FILTER_PTP_V2_DELAY_REQ] = "ptpv2-delay-req", [HWTSTAMP_FILTER_NTP_ALL] = "ntp-all", }; static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT); const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = { [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN] = "vxlan", [ETHTOOL_UDP_TUNNEL_TYPE_GENEVE] = "geneve", [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE] = "vxlan-gpe", }; static_assert(ARRAY_SIZE(udp_tunnel_type_names) == __ETHTOOL_UDP_TUNNEL_TYPE_CNT); /* return false if legacy contained non-0 deprecated fields * maxtxpkt/maxrxpkt. rest of ksettings always updated */ bool convert_legacy_settings_to_link_ksettings( struct ethtool_link_ksettings *link_ksettings, const struct ethtool_cmd *legacy_settings) { bool retval = true; memset(link_ksettings, 0, sizeof(*link_ksettings)); /* This is used to tell users that driver is still using these * deprecated legacy fields, and they should not use * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS */ if (legacy_settings->maxtxpkt || legacy_settings->maxrxpkt) retval = false; ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.supported, legacy_settings->supported); ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.advertising, legacy_settings->advertising); ethtool_convert_legacy_u32_to_link_mode( link_ksettings->link_modes.lp_advertising, legacy_settings->lp_advertising); link_ksettings->base.speed = ethtool_cmd_speed(legacy_settings); link_ksettings->base.duplex = legacy_settings->duplex; link_ksettings->base.port = legacy_settings->port; link_ksettings->base.phy_address = legacy_settings->phy_address; link_ksettings->base.autoneg = legacy_settings->autoneg; link_ksettings->base.mdio_support = legacy_settings->mdio_support; link_ksettings->base.eth_tp_mdix = legacy_settings->eth_tp_mdix; link_ksettings->base.eth_tp_mdix_ctrl = legacy_settings->eth_tp_mdix_ctrl; return retval; } int __ethtool_get_link(struct net_device *dev) { if (!dev->ethtool_ops->get_link) return -EOPNOTSUPP; return netif_running(dev) && dev->ethtool_ops->get_link(dev); } static int ethtool_get_rxnfc_rule_count(struct net_device *dev) { const struct ethtool_ops *ops = dev->ethtool_ops; struct ethtool_rxnfc info = { .cmd = ETHTOOL_GRXCLSRLCNT, }; int err; err = ops->get_rxnfc(dev, &info, NULL); if (err) return err; return info.rule_cnt; } int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max) { const struct ethtool_ops *ops = dev->ethtool_ops; struct ethtool_rxnfc *info; int err, i, rule_cnt; u64 max_ring = 0; if (!ops->get_rxnfc) return -EOPNOTSUPP; rule_cnt = ethtool_get_rxnfc_rule_count(dev); if (rule_cnt <= 0) return -EINVAL; info = kvzalloc(struct_size(info, rule_locs, rule_cnt), GFP_KERNEL); if (!info) return -ENOMEM; info->cmd = ETHTOOL_GRXCLSRLALL; info->rule_cnt = rule_cnt; err = ops->get_rxnfc(dev, info, info->rule_locs); if (err) goto err_free_info; for (i = 0; i < rule_cnt; i++) { struct ethtool_rxnfc rule_info = { .cmd = ETHTOOL_GRXCLSRULE, .fs.location = info->rule_locs[i], }; err = ops->get_rxnfc(dev, &rule_info, NULL); if (err) goto err_free_info; if (rule_info.fs.ring_cookie != RX_CLS_FLOW_DISC && rule_info.fs.ring_cookie != RX_CLS_FLOW_WAKE && !(rule_info.flow_type & FLOW_RSS) && !ethtool_get_flow_spec_ring_vf(rule_info.fs.ring_cookie)) max_ring = max_t(u64, max_ring, rule_info.fs.ring_cookie); } kvfree(info); *max = max_ring; return 0; err_free_info: kvfree(info); return err; } int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max) { struct ethtool_rxfh_param rxfh = {}; u32 dev_size, current_max = 0; int ret; if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh) return -EOPNOTSUPP; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) return -EOPNOTSUPP; rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER); if (!rxfh.indir) return -ENOMEM; ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); if (ret) goto out; while (dev_size--) current_max = max(current_max, rxfh.indir[dev_size]); *max = current_max; out: kfree(rxfh.indir); return ret; } int ethtool_check_ops(const struct ethtool_ops *ops) { if (WARN_ON(ops->set_coalesce && !ops->supported_coalesce_params)) return -EINVAL; /* NOTE: sufficiently insane drivers may swap ethtool_ops at runtime, * the fact that ops are checked at registration time does not * mean the ops attached to a netdev later on are sane. */ return 0; } int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; memset(info, 0, sizeof(*info)); info->cmd = ETHTOOL_GET_TS_INFO; if (phy_has_tsinfo(phydev)) return phy_ts_info(phydev, info); if (ops->get_ts_info) return ops->get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; info->phc_index = -1; return 0; } int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) { struct ethtool_ts_info info = { }; int num = 0; if (!__ethtool_get_ts_info(dev, &info)) num = ptp_get_vclocks_index(info.phc_index, vclock_index); return num; } EXPORT_SYMBOL(ethtool_get_phc_vclocks); int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info) { return __ethtool_get_ts_info(dev, info); } EXPORT_SYMBOL(ethtool_get_ts_info_by_layer); const struct ethtool_phy_ops *ethtool_phy_ops; void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) { ASSERT_RTNL(); ethtool_phy_ops = ops; } EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops); void ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode) { const struct link_mode_info *link_info; if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS)) return; link_info = &link_mode_params[link_mode]; link_ksettings->base.speed = link_info->speed; link_ksettings->lanes = link_info->lanes; link_ksettings->base.duplex = link_info->duplex; } EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode); /** * ethtool_forced_speed_maps_init * @maps: Pointer to an array of Ethtool forced speed map * @size: Array size * * Initialize an array of Ethtool forced speed map to Ethtool link modes. This * should be called during driver module init. */ void ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size) { for (u32 i = 0; i < size; i++) { struct ethtool_forced_speed_map *map = &maps[i]; linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); map->cap_arr = NULL; map->arr_size = 0; } } EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init);
22 6 6 6 23 23 23 23 7 11 23 23 23 23 1 11 19 19 18 23 15 3 23 1 23 16 23 23 18 17 23 6 6 5 6 4 4 1 5 5 23 23 2 2 5 2 7 19 12 16 19 13 15 18 13 22 22 17 13 16 5 4 12 12 5 4 13 15 20 18 2 2 3 18 17 22 22 5 16 23 12 9 12 12 12 9 10 4 10 10 4 2 10 23 22 16 22 23 13 23 5 23 23 13 13 12 5 5 5 5 5 6 6 15 9 14 22 9 9 9 9 9 9 9 9 9 1 2 1 10 10 4 10 10 10 16 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 // SPDX-License-Identifier: GPL-2.0 #include <linux/slab.h> #include <trace/events/btrfs.h> #include "messages.h" #include "ctree.h" #include "extent-io-tree.h" #include "btrfs_inode.h" static struct kmem_cache *extent_state_cache; static inline bool extent_state_in_tree(const struct extent_state *state) { return !RB_EMPTY_NODE(&state->rb_node); } #ifdef CONFIG_BTRFS_DEBUG static LIST_HEAD(states); static DEFINE_SPINLOCK(leak_lock); static inline void btrfs_leak_debug_add_state(struct extent_state *state) { unsigned long flags; spin_lock_irqsave(&leak_lock, flags); list_add(&state->leak_list, &states); spin_unlock_irqrestore(&leak_lock, flags); } static inline void btrfs_leak_debug_del_state(struct extent_state *state) { unsigned long flags; spin_lock_irqsave(&leak_lock, flags); list_del(&state->leak_list); spin_unlock_irqrestore(&leak_lock, flags); } static inline void btrfs_extent_state_leak_debug_check(void) { struct extent_state *state; while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", state->start, state->end, state->state, extent_state_in_tree(state), refcount_read(&state->refs)); list_del(&state->leak_list); WARN_ON_ONCE(1); kmem_cache_free(extent_state_cache, state); } } #define btrfs_debug_check_extent_io_range(tree, start, end) \ __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end)) static inline void __btrfs_debug_check_extent_io_range(const char *caller, struct extent_io_tree *tree, u64 start, u64 end) { const struct btrfs_inode *inode; u64 isize; if (tree->owner != IO_TREE_INODE_IO) return; inode = extent_io_tree_to_inode_const(tree); isize = i_size_read(&inode->vfs_inode); if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { btrfs_debug_rl(inode->root->fs_info, "%s: ino %llu isize %llu odd range [%llu,%llu]", caller, btrfs_ino(inode), isize, start, end); } } #else #define btrfs_leak_debug_add_state(state) do {} while (0) #define btrfs_leak_debug_del_state(state) do {} while (0) #define btrfs_extent_state_leak_debug_check() do {} while (0) #define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0) #endif /* * The only tree allowed to set the inode is IO_TREE_INODE_IO. */ static bool is_inode_io_tree(const struct extent_io_tree *tree) { return tree->owner == IO_TREE_INODE_IO; } /* Return the inode if it's valid for the given tree, otherwise NULL. */ struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree) { if (tree->owner == IO_TREE_INODE_IO) return tree->inode; return NULL; } /* Read-only access to the inode. */ const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree) { if (tree->owner == IO_TREE_INODE_IO) return tree->inode; return NULL; } /* For read-only access to fs_info. */ const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree) { if (tree->owner == IO_TREE_INODE_IO) return tree->inode->root->fs_info; return tree->fs_info; } void extent_io_tree_init(struct btrfs_fs_info *fs_info, struct extent_io_tree *tree, unsigned int owner) { tree->state = RB_ROOT; spin_lock_init(&tree->lock); tree->fs_info = fs_info; tree->owner = owner; } /* * Empty an io tree, removing and freeing every extent state record from the * tree. This should be called once we are sure no other task can access the * tree anymore, so no tree updates happen after we empty the tree and there * aren't any waiters on any extent state record (EXTENT_LOCKED bit is never * set on any extent state when calling this function). */ void extent_io_tree_release(struct extent_io_tree *tree) { struct rb_root root; struct extent_state *state; struct extent_state *tmp; spin_lock(&tree->lock); root = tree->state; tree->state = RB_ROOT; rbtree_postorder_for_each_entry_safe(state, tmp, &root, rb_node) { /* Clear node to keep free_extent_state() happy. */ RB_CLEAR_NODE(&state->rb_node); ASSERT(!(state->state & EXTENT_LOCKED)); /* * No need for a memory barrier here, as we are holding the tree * lock and we only change the waitqueue while holding that lock * (see wait_extent_bit()). */ ASSERT(!waitqueue_active(&state->wq)); free_extent_state(state); cond_resched_lock(&tree->lock); } /* * Should still be empty even after a reschedule, no other task should * be accessing the tree anymore. */ ASSERT(RB_EMPTY_ROOT(&tree->state)); spin_unlock(&tree->lock); } static struct extent_state *alloc_extent_state(gfp_t mask) { struct extent_state *state; /* * The given mask might be not appropriate for the slab allocator, * drop the unsupported bits */ mask &= ~(__GFP_DMA32|__GFP_HIGHMEM); state = kmem_cache_alloc(extent_state_cache, mask); if (!state) return state; state->state = 0; RB_CLEAR_NODE(&state->rb_node); btrfs_leak_debug_add_state(state); refcount_set(&state->refs, 1); init_waitqueue_head(&state->wq); trace_alloc_extent_state(state, mask, _RET_IP_); return state; } static struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc) { if (!prealloc) prealloc = alloc_extent_state(GFP_ATOMIC); return prealloc; } void free_extent_state(struct extent_state *state) { if (!state) return; if (refcount_dec_and_test(&state->refs)) { WARN_ON(extent_state_in_tree(state)); btrfs_leak_debug_del_state(state); trace_free_extent_state(state, _RET_IP_); kmem_cache_free(extent_state_cache, state); } } static int add_extent_changeset(struct extent_state *state, u32 bits, struct extent_changeset *changeset, int set) { int ret; if (!changeset) return 0; if (set && (state->state & bits) == bits) return 0; if (!set && (state->state & bits) == 0) return 0; changeset->bytes_changed += state->end - state->start + 1; ret = ulist_add(&changeset->range_changed, state->start, state->end, GFP_ATOMIC); return ret; } static inline struct extent_state *next_state(struct extent_state *state) { struct rb_node *next = rb_next(&state->rb_node); if (next) return rb_entry(next, struct extent_state, rb_node); else return NULL; } static inline struct extent_state *prev_state(struct extent_state *state) { struct rb_node *next = rb_prev(&state->rb_node); if (next) return rb_entry(next, struct extent_state, rb_node); else return NULL; } /* * Search @tree for an entry that contains @offset. Such entry would have * entry->start <= offset && entry->end >= offset. * * @tree: the tree to search * @offset: offset that should fall within an entry in @tree * @node_ret: pointer where new node should be anchored (used when inserting an * entry in the tree) * @parent_ret: points to entry which would have been the parent of the entry, * containing @offset * * Return a pointer to the entry that contains @offset byte address and don't change * @node_ret and @parent_ret. * * If no such entry exists, return pointer to entry that ends before @offset * and fill parameters @node_ret and @parent_ret, ie. does not return NULL. */ static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree, u64 offset, struct rb_node ***node_ret, struct rb_node **parent_ret) { struct rb_root *root = &tree->state; struct rb_node **node = &root->rb_node; struct rb_node *prev = NULL; struct extent_state *entry = NULL; while (*node) { prev = *node; entry = rb_entry(prev, struct extent_state, rb_node); if (offset < entry->start) node = &(*node)->rb_left; else if (offset > entry->end) node = &(*node)->rb_right; else return entry; } if (node_ret) *node_ret = node; if (parent_ret) *parent_ret = prev; /* Search neighbors until we find the first one past the end */ while (entry && offset > entry->end) entry = next_state(entry); return entry; } /* * Search offset in the tree or fill neighbor rbtree node pointers. * * @tree: the tree to search * @offset: offset that should fall within an entry in @tree * @next_ret: pointer to the first entry whose range ends after @offset * @prev_ret: pointer to the first entry whose range begins before @offset * * Return a pointer to the entry that contains @offset byte address. If no * such entry exists, then return NULL and fill @prev_ret and @next_ret. * Otherwise return the found entry and other pointers are left untouched. */ static struct extent_state *tree_search_prev_next(struct extent_io_tree *tree, u64 offset, struct extent_state **prev_ret, struct extent_state **next_ret) { struct rb_root *root = &tree->state; struct rb_node **node = &root->rb_node; struct extent_state *orig_prev; struct extent_state *entry = NULL; ASSERT(prev_ret); ASSERT(next_ret); while (*node) { entry = rb_entry(*node, struct extent_state, rb_node); if (offset < entry->start) node = &(*node)->rb_left; else if (offset > entry->end) node = &(*node)->rb_right; else return entry; } orig_prev = entry; while (entry && offset > entry->end) entry = next_state(entry); *next_ret = entry; entry = orig_prev; while (entry && offset < entry->start) entry = prev_state(entry); *prev_ret = entry; return NULL; } /* * Inexact rb-tree search, return the next entry if @offset is not found */ static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64 offset) { return tree_search_for_insert(tree, offset, NULL, NULL); } static void extent_io_tree_panic(const struct extent_io_tree *tree, const struct extent_state *state, const char *opname, int err) { btrfs_panic(extent_io_tree_to_fs_info(tree), err, "extent io tree error on %s state start %llu end %llu", opname, state->start, state->end); } static void merge_prev_state(struct extent_io_tree *tree, struct extent_state *state) { struct extent_state *prev; prev = prev_state(state); if (prev && prev->end == state->start - 1 && prev->state == state->state) { if (is_inode_io_tree(tree)) btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), state, prev); state->start = prev->start; rb_erase(&prev->rb_node, &tree->state); RB_CLEAR_NODE(&prev->rb_node); free_extent_state(prev); } } static void merge_next_state(struct extent_io_tree *tree, struct extent_state *state) { struct extent_state *next; next = next_state(state); if (next && next->start == state->end + 1 && next->state == state->state) { if (is_inode_io_tree(tree)) btrfs_merge_delalloc_extent(extent_io_tree_to_inode(tree), state, next); state->end = next->end; rb_erase(&next->rb_node, &tree->state); RB_CLEAR_NODE(&next->rb_node); free_extent_state(next); } } /* * Utility function to look for merge candidates inside a given range. Any * extents with matching state are merged together into a single extent in the * tree. Extents with EXTENT_IO in their state field are not merged because * the end_io handlers need to be able to do operations on them without * sleeping (or doing allocations/splits). * * This should be called with the tree lock held. */ static void merge_state(struct extent_io_tree *tree, struct extent_state *state) { if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY)) return; merge_prev_state(tree, state); merge_next_state(tree, state); } static void set_state_bits(struct extent_io_tree *tree, struct extent_state *state, u32 bits, struct extent_changeset *changeset) { u32 bits_to_set = bits & ~EXTENT_CTLBITS; int ret; if (is_inode_io_tree(tree)) btrfs_set_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); ret = add_extent_changeset(state, bits_to_set, changeset, 1); BUG_ON(ret < 0); state->state |= bits_to_set; } /* * Insert an extent_state struct into the tree. 'bits' are set on the * struct before it is inserted. * * Returns a pointer to the struct extent_state record containing the range * requested for insertion, which may be the same as the given struct or it * may be an existing record in the tree that was expanded to accommodate the * requested range. In case of an extent_state different from the one that was * given, the later can be freed or reused by the caller. * * On error it returns an error pointer. * * The tree lock is not taken internally. This is a utility function and * probably isn't what you want to call (see set/clear_extent_bit). */ static struct extent_state *insert_state(struct extent_io_tree *tree, struct extent_state *state, u32 bits, struct extent_changeset *changeset) { struct rb_node **node; struct rb_node *parent = NULL; const u64 start = state->start - 1; const u64 end = state->end + 1; const bool try_merge = !(bits & (EXTENT_LOCKED | EXTENT_BOUNDARY)); set_state_bits(tree, state, bits, changeset); node = &tree->state.rb_node; while (*node) { struct extent_state *entry; parent = *node; entry = rb_entry(parent, struct extent_state, rb_node); if (state->end < entry->start) { if (try_merge && end == entry->start && state->state == entry->state) { if (is_inode_io_tree(tree)) btrfs_merge_delalloc_extent( extent_io_tree_to_inode(tree), state, entry); entry->start = state->start; merge_prev_state(tree, entry); state->state = 0; return entry; } node = &(*node)->rb_left; } else if (state->end > entry->end) { if (try_merge && entry->end == start && state->state == entry->state) { if (is_inode_io_tree(tree)) btrfs_merge_delalloc_extent( extent_io_tree_to_inode(tree), state, entry); entry->end = state->end; merge_next_state(tree, entry); state->state = 0; return entry; } node = &(*node)->rb_right; } else { return ERR_PTR(-EEXIST); } } rb_link_node(&state->rb_node, parent, node); rb_insert_color(&state->rb_node, &tree->state); return state; } /* * Insert state to @tree to the location given by @node and @parent. */ static void insert_state_fast(struct extent_io_tree *tree, struct extent_state *state, struct rb_node **node, struct rb_node *parent, unsigned bits, struct extent_changeset *changeset) { set_state_bits(tree, state, bits, changeset); rb_link_node(&state->rb_node, parent, node); rb_insert_color(&state->rb_node, &tree->state); merge_state(tree, state); } /* * Split a given extent state struct in two, inserting the preallocated * struct 'prealloc' as the newly created second half. 'split' indicates an * offset inside 'orig' where it should be split. * * Before calling, * the tree has 'orig' at [orig->start, orig->end]. After calling, there * are two extent state structs in the tree: * prealloc: [orig->start, split - 1] * orig: [ split, orig->end ] * * The tree locks are not taken by this function. They need to be held * by the caller. */ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, struct extent_state *prealloc, u64 split) { struct rb_node *parent = NULL; struct rb_node **node; if (is_inode_io_tree(tree)) btrfs_split_delalloc_extent(extent_io_tree_to_inode(tree), orig, split); prealloc->start = orig->start; prealloc->end = split - 1; prealloc->state = orig->state; orig->start = split; parent = &orig->rb_node; node = &parent; while (*node) { struct extent_state *entry; parent = *node; entry = rb_entry(parent, struct extent_state, rb_node); if (prealloc->end < entry->start) { node = &(*node)->rb_left; } else if (prealloc->end > entry->end) { node = &(*node)->rb_right; } else { free_extent_state(prealloc); return -EEXIST; } } rb_link_node(&prealloc->rb_node, parent, node); rb_insert_color(&prealloc->rb_node, &tree->state); return 0; } /* * Utility function to clear some bits in an extent state struct. It will * optionally wake up anyone waiting on this state (wake == 1). * * If no bits are set on the state struct after clearing things, the * struct is freed and removed from the tree */ static struct extent_state *clear_state_bit(struct extent_io_tree *tree, struct extent_state *state, u32 bits, int wake, struct extent_changeset *changeset) { struct extent_state *next; u32 bits_to_clear = bits & ~EXTENT_CTLBITS; int ret; if (is_inode_io_tree(tree)) btrfs_clear_delalloc_extent(extent_io_tree_to_inode(tree), state, bits); ret = add_extent_changeset(state, bits_to_clear, changeset, 0); BUG_ON(ret < 0); state->state &= ~bits_to_clear; if (wake) wake_up(&state->wq); if (state->state == 0) { next = next_state(state); if (extent_state_in_tree(state)) { rb_erase(&state->rb_node, &tree->state); RB_CLEAR_NODE(&state->rb_node); free_extent_state(state); } else { WARN_ON(1); } } else { merge_state(tree, state); next = next_state(state); } return next; } /* * Detect if extent bits request NOWAIT semantics and set the gfp mask accordingly, * unset the EXTENT_NOWAIT bit. */ static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask) { *mask = (*bits & EXTENT_NOWAIT ? GFP_NOWAIT : GFP_NOFS); *bits &= EXTENT_NOWAIT - 1; } /* * Clear some bits on a range in the tree. This may require splitting or * inserting elements in the tree, so the gfp mask is used to indicate which * allocations or sleeping are allowed. * * Pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove the given * range from the tree regardless of state (ie for truncate). * * The range [start, end] is inclusive. * * This takes the tree lock, and returns 0 on success and < 0 on error. */ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached_state, struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *cached; struct extent_state *prealloc = NULL; u64 last_end; int err; int clear = 0; int wake; int delete = (bits & EXTENT_CLEAR_ALL_BITS); gfp_t mask; set_gfp_mask_from_bits(&bits, &mask); btrfs_debug_check_extent_io_range(tree, start, end); trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); if (delete) bits |= ~EXTENT_CTLBITS; if (bits & EXTENT_DELALLOC) bits |= EXTENT_NORESERVE; wake = (bits & EXTENT_LOCKED) ? 1 : 0; if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY)) clear = 1; again: if (!prealloc) { /* * Don't care for allocation failure here because we might end * up not needing the pre-allocated extent state at all, which * is the case if we only have in the tree extent states that * cover our input range and don't cover too any other range. * If we end up needing a new extent state we allocate it later. */ prealloc = alloc_extent_state(mask); } spin_lock(&tree->lock); if (cached_state) { cached = *cached_state; if (clear) { *cached_state = NULL; cached_state = NULL; } if (cached && extent_state_in_tree(cached) && cached->start <= start && cached->end > start) { if (clear) refcount_dec(&cached->refs); state = cached; goto hit_next; } if (clear) free_extent_state(cached); } /* This search will find the extents that end after our range starts. */ state = tree_search(tree, start); if (!state) goto out; hit_next: if (state->start > end) goto out; WARN_ON(state->end < start); last_end = state->end; /* The state doesn't have the wanted bits, go ahead. */ if (!(state->state & bits)) { state = next_state(state); goto next; } /* * | ---- desired range ---- | * | state | or * | ------------- state -------------- | * * We need to split the extent we found, and may flip bits on second * half. * * If the extent we found extends past our range, we just split and * search again. It'll get split again the next time though. * * If the extent we found is inside our range, we clear the desired bit * on it. */ if (state->start < start) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; err = split_state(tree, state, prealloc, start); if (err) extent_io_tree_panic(tree, state, "split", err); prealloc = NULL; if (err) goto out; if (state->end <= end) { state = clear_state_bit(tree, state, bits, wake, changeset); goto next; } goto search_again; } /* * | ---- desired range ---- | * | state | * We need to split the extent, and clear the bit on the first half. */ if (state->start <= end && state->end > end) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; err = split_state(tree, state, prealloc, end + 1); if (err) extent_io_tree_panic(tree, state, "split", err); if (wake) wake_up(&state->wq); clear_state_bit(tree, prealloc, bits, wake, changeset); prealloc = NULL; goto out; } state = clear_state_bit(tree, state, bits, wake, changeset); next: if (last_end == (u64)-1) goto out; start = last_end + 1; if (start <= end && state && !need_resched()) goto hit_next; search_again: if (start > end) goto out; spin_unlock(&tree->lock); if (gfpflags_allow_blocking(mask)) cond_resched(); goto again; out: spin_unlock(&tree->lock); if (prealloc) free_extent_state(prealloc); return 0; } /* * Wait for one or more bits to clear on a range in the state tree. * The range [start, end] is inclusive. * The tree lock is taken by this function */ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached_state) { struct extent_state *state; btrfs_debug_check_extent_io_range(tree, start, end); spin_lock(&tree->lock); again: /* * Maintain cached_state, as we may not remove it from the tree if there * are more bits than the bits we're waiting on set on this state. */ if (cached_state && *cached_state) { state = *cached_state; if (extent_state_in_tree(state) && state->start <= start && start < state->end) goto process_node; } while (1) { /* * This search will find all the extents that end after our * range starts. */ state = tree_search(tree, start); process_node: if (!state) break; if (state->start > end) goto out; if (state->state & bits) { DEFINE_WAIT(wait); start = state->start; refcount_inc(&state->refs); prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock(&tree->lock); schedule(); spin_lock(&tree->lock); finish_wait(&state->wq, &wait); free_extent_state(state); goto again; } start = state->end + 1; if (start > end) break; if (!cond_resched_lock(&tree->lock)) { state = next_state(state); goto process_node; } } out: /* This state is no longer useful, clear it and free it up. */ if (cached_state && *cached_state) { state = *cached_state; *cached_state = NULL; free_extent_state(state); } spin_unlock(&tree->lock); } static void cache_state_if_flags(struct extent_state *state, struct extent_state **cached_ptr, unsigned flags) { if (cached_ptr && !(*cached_ptr)) { if (!flags || (state->state & flags)) { *cached_ptr = state; refcount_inc(&state->refs); } } } static void cache_state(struct extent_state *state, struct extent_state **cached_ptr) { return cache_state_if_flags(state, cached_ptr, EXTENT_LOCKED | EXTENT_BOUNDARY); } /* * Find the first state struct with 'bits' set after 'start', and return it. * tree->lock must be held. NULL will returned if nothing was found after * 'start'. */ static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, u64 start, u32 bits) { struct extent_state *state; /* * This search will find all the extents that end after our range * starts. */ state = tree_search(tree, start); while (state) { if (state->end >= start && (state->state & bits)) return state; state = next_state(state); } return NULL; } /* * Find the first offset in the io tree with one or more @bits set. * * Note: If there are multiple bits set in @bits, any of them will match. * * Return true if we find something, and update @start_ret and @end_ret. * Return false if we found nothing. */ bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits, struct extent_state **cached_state) { struct extent_state *state; bool ret = false; spin_lock(&tree->lock); if (cached_state && *cached_state) { state = *cached_state; if (state->end == start - 1 && extent_state_in_tree(state)) { while ((state = next_state(state)) != NULL) { if (state->state & bits) break; } /* * If we found the next extent state, clear cached_state * so that we can cache the next extent state below and * avoid future calls going over the same extent state * again. If we haven't found any, clear as well since * it's now useless. */ free_extent_state(*cached_state); *cached_state = NULL; if (state) goto got_it; goto out; } free_extent_state(*cached_state); *cached_state = NULL; } state = find_first_extent_bit_state(tree, start, bits); got_it: if (state) { cache_state_if_flags(state, cached_state, 0); *start_ret = state->start; *end_ret = state->end; ret = true; } out: spin_unlock(&tree->lock); return ret; } /* * Find a contiguous area of bits * * @tree: io tree to check * @start: offset to start the search from * @start_ret: the first offset we found with the bits set * @end_ret: the final contiguous range of the bits that were set * @bits: bits to look for * * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges * to set bits appropriately, and then merge them again. During this time it * will drop the tree->lock, so use this helper if you want to find the actual * contiguous area for given bits. We will search to the first bit we find, and * then walk down the tree until we find a non-contiguous area. The area * returned will be the full contiguous area with the bits set. */ int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits) { struct extent_state *state; int ret = 1; ASSERT(!btrfs_fs_incompat(extent_io_tree_to_fs_info(tree), NO_HOLES)); spin_lock(&tree->lock); state = find_first_extent_bit_state(tree, start, bits); if (state) { *start_ret = state->start; *end_ret = state->end; while ((state = next_state(state)) != NULL) { if (state->start > (*end_ret + 1)) break; *end_ret = state->end; } ret = 0; } spin_unlock(&tree->lock); return ret; } /* * Find a contiguous range of bytes in the file marked as delalloc, not more * than 'max_bytes'. start and end are used to return the range, * * True is returned if we find something, false if nothing was in the tree. */ bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, u64 *end, u64 max_bytes, struct extent_state **cached_state) { struct extent_state *state; u64 cur_start = *start; bool found = false; u64 total_bytes = 0; spin_lock(&tree->lock); /* * This search will find all the extents that end after our range * starts. */ state = tree_search(tree, cur_start); if (!state) { *end = (u64)-1; goto out; } while (state) { if (found && (state->start != cur_start || (state->state & EXTENT_BOUNDARY))) { goto out; } if (!(state->state & EXTENT_DELALLOC)) { if (!found) *end = state->end; goto out; } if (!found) { *start = state->start; *cached_state = state; refcount_inc(&state->refs); } found = true; *end = state->end; cur_start = state->end + 1; total_bytes += state->end - state->start + 1; if (total_bytes >= max_bytes) break; state = next_state(state); } out: spin_unlock(&tree->lock); return found; } /* * Set some bits on a range in the tree. This may require allocations or * sleeping. By default all allocations use GFP_NOFS, use EXTENT_NOWAIT for * GFP_NOWAIT. * * If any of the exclusive bits are set, this will fail with -EEXIST if some * part of the range already has the desired bits set. The extent_state of the * existing range is returned in failed_state in this case, and the start of the * existing range is returned in failed_start. failed_state is used as an * optimization for wait_extent_bit, failed_start must be used as the source of * truth as failed_state may have changed since we returned. * * [start, end] is inclusive This takes the tree lock. */ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, u64 *failed_start, struct extent_state **failed_state, struct extent_state **cached_state, struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *prealloc = NULL; struct rb_node **p = NULL; struct rb_node *parent = NULL; int err = 0; u64 last_start; u64 last_end; u32 exclusive_bits = (bits & EXTENT_LOCKED); gfp_t mask; set_gfp_mask_from_bits(&bits, &mask); btrfs_debug_check_extent_io_range(tree, start, end); trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); if (exclusive_bits) ASSERT(failed_start); else ASSERT(failed_start == NULL && failed_state == NULL); again: if (!prealloc) { /* * Don't care for allocation failure here because we might end * up not needing the pre-allocated extent state at all, which * is the case if we only have in the tree extent states that * cover our input range and don't cover too any other range. * If we end up needing a new extent state we allocate it later. */ prealloc = alloc_extent_state(mask); } spin_lock(&tree->lock); if (cached_state && *cached_state) { state = *cached_state; if (state->start <= start && state->end > start && extent_state_in_tree(state)) goto hit_next; } /* * This search will find all the extents that end after our range * starts. */ state = tree_search_for_insert(tree, start, &p, &parent); if (!state) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; prealloc->start = start; prealloc->end = end; insert_state_fast(tree, prealloc, p, parent, bits, changeset); cache_state(prealloc, cached_state); prealloc = NULL; goto out; } hit_next: last_start = state->start; last_end = state->end; /* * | ---- desired range ---- | * | state | * * Just lock what we found and keep going */ if (state->start == start && state->end <= end) { if (state->state & exclusive_bits) { *failed_start = state->start; cache_state(state, failed_state); err = -EEXIST; goto out; } set_state_bits(tree, state, bits, changeset); cache_state(state, cached_state); merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; state = next_state(state); if (start < end && state && state->start == start && !need_resched()) goto hit_next; goto search_again; } /* * | ---- desired range ---- | * | state | * or * | ------------- state -------------- | * * We need to split the extent we found, and may flip bits on second * half. * * If the extent we found extends past our range, we just split and * search again. It'll get split again the next time though. * * If the extent we found is inside our range, we set the desired bit * on it. */ if (state->start < start) { if (state->state & exclusive_bits) { *failed_start = start; cache_state(state, failed_state); err = -EEXIST; goto out; } /* * If this extent already has all the bits we want set, then * skip it, not necessary to split it or do anything with it. */ if ((state->state & bits) == bits) { start = state->end + 1; cache_state(state, cached_state); goto search_again; } prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; err = split_state(tree, state, prealloc, start); if (err) extent_io_tree_panic(tree, state, "split", err); prealloc = NULL; if (err) goto out; if (state->end <= end) { set_state_bits(tree, state, bits, changeset); cache_state(state, cached_state); merge_state(tree, state); if (last_end == (u64)-1) goto out; start = last_end + 1; state = next_state(state); if (start < end && state && state->start == start && !need_resched()) goto hit_next; } goto search_again; } /* * | ---- desired range ---- | * | state | or | state | * * There's a hole, we need to insert something in it and ignore the * extent we found. */ if (state->start > start) { u64 this_end; struct extent_state *inserted_state; if (end < last_start) this_end = end; else this_end = last_start - 1; prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; /* * Avoid to free 'prealloc' if it can be merged with the later * extent. */ prealloc->start = start; prealloc->end = this_end; inserted_state = insert_state(tree, prealloc, bits, changeset); if (IS_ERR(inserted_state)) { err = PTR_ERR(inserted_state); extent_io_tree_panic(tree, prealloc, "insert", err); } cache_state(inserted_state, cached_state); if (inserted_state == prealloc) prealloc = NULL; start = this_end + 1; goto search_again; } /* * | ---- desired range ---- | * | state | * * We need to split the extent, and set the bit on the first half */ if (state->start <= end && state->end > end) { if (state->state & exclusive_bits) { *failed_start = start; cache_state(state, failed_state); err = -EEXIST; goto out; } prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) goto search_again; err = split_state(tree, state, prealloc, end + 1); if (err) extent_io_tree_panic(tree, state, "split", err); set_state_bits(tree, prealloc, bits, changeset); cache_state(prealloc, cached_state); merge_state(tree, prealloc); prealloc = NULL; goto out; } search_again: if (start > end) goto out; spin_unlock(&tree->lock); if (gfpflags_allow_blocking(mask)) cond_resched(); goto again; out: spin_unlock(&tree->lock); if (prealloc) free_extent_state(prealloc); return err; } int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached_state) { return __set_extent_bit(tree, start, end, bits, NULL, NULL, cached_state, NULL); } /* * Convert all bits in a given range from one bit to another * * @tree: the io tree to search * @start: the start offset in bytes * @end: the end offset in bytes (inclusive) * @bits: the bits to set in this range * @clear_bits: the bits to clear in this range * @cached_state: state that we're going to cache * * This will go through and set bits for the given range. If any states exist * already in this range they are set with the given bit and cleared of the * clear_bits. This is only meant to be used by things that are mergeable, ie. * converting from say DELALLOC to DIRTY. This is not meant to be used with * boundary bits like LOCK. * * All allocations are done with GFP_NOFS. */ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, u32 clear_bits, struct extent_state **cached_state) { struct extent_state *state; struct extent_state *prealloc = NULL; struct rb_node **p = NULL; struct rb_node *parent = NULL; int err = 0; u64 last_start; u64 last_end; bool first_iteration = true; btrfs_debug_check_extent_io_range(tree, start, end); trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, clear_bits); again: if (!prealloc) { /* * Best effort, don't worry if extent state allocation fails * here for the first iteration. We might have a cached state * that matches exactly the target range, in which case no * extent state allocations are needed. We'll only know this * after locking the tree. */ prealloc = alloc_extent_state(GFP_NOFS); if (!prealloc && !first_iteration) return -ENOMEM; } spin_lock(&tree->lock); if (cached_state && *cached_state) { state = *cached_state; if (state->start <= start && state->end > start && extent_state_in_tree(state)) goto hit_next; } /* * This search will find all the extents that end after our range * starts. */ state = tree_search_for_insert(tree, start, &p, &parent); if (!state) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) { err = -ENOMEM; goto out; } prealloc->start = start; prealloc->end = end; insert_state_fast(tree, prealloc, p, parent, bits, NULL); cache_state(prealloc, cached_state); prealloc = NULL; goto out; } hit_next: last_start = state->start; last_end = state->end; /* * | ---- desired range ---- | * | state | * * Just lock what we found and keep going. */ if (state->start == start && state->end <= end) { set_state_bits(tree, state, bits, NULL); cache_state(state, cached_state); state = clear_state_bit(tree, state, clear_bits, 0, NULL); if (last_end == (u64)-1) goto out; start = last_end + 1; if (start < end && state && state->start == start && !need_resched()) goto hit_next; goto search_again; } /* * | ---- desired range ---- | * | state | * or * | ------------- state -------------- | * * We need to split the extent we found, and may flip bits on second * half. * * If the extent we found extends past our range, we just split and * search again. It'll get split again the next time though. * * If the extent we found is inside our range, we set the desired bit * on it. */ if (state->start < start) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) { err = -ENOMEM; goto out; } err = split_state(tree, state, prealloc, start); if (err) extent_io_tree_panic(tree, state, "split", err); prealloc = NULL; if (err) goto out; if (state->end <= end) { set_state_bits(tree, state, bits, NULL); cache_state(state, cached_state); state = clear_state_bit(tree, state, clear_bits, 0, NULL); if (last_end == (u64)-1) goto out; start = last_end + 1; if (start < end && state && state->start == start && !need_resched()) goto hit_next; } goto search_again; } /* * | ---- desired range ---- | * | state | or | state | * * There's a hole, we need to insert something in it and ignore the * extent we found. */ if (state->start > start) { u64 this_end; struct extent_state *inserted_state; if (end < last_start) this_end = end; else this_end = last_start - 1; prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) { err = -ENOMEM; goto out; } /* * Avoid to free 'prealloc' if it can be merged with the later * extent. */ prealloc->start = start; prealloc->end = this_end; inserted_state = insert_state(tree, prealloc, bits, NULL); if (IS_ERR(inserted_state)) { err = PTR_ERR(inserted_state); extent_io_tree_panic(tree, prealloc, "insert", err); } cache_state(inserted_state, cached_state); if (inserted_state == prealloc) prealloc = NULL; start = this_end + 1; goto search_again; } /* * | ---- desired range ---- | * | state | * * We need to split the extent, and set the bit on the first half. */ if (state->start <= end && state->end > end) { prealloc = alloc_extent_state_atomic(prealloc); if (!prealloc) { err = -ENOMEM; goto out; } err = split_state(tree, state, prealloc, end + 1); if (err) extent_io_tree_panic(tree, state, "split", err); set_state_bits(tree, prealloc, bits, NULL); cache_state(prealloc, cached_state); clear_state_bit(tree, prealloc, clear_bits, 0, NULL); prealloc = NULL; goto out; } search_again: if (start > end) goto out; spin_unlock(&tree->lock); cond_resched(); first_iteration = false; goto again; out: spin_unlock(&tree->lock); if (prealloc) free_extent_state(prealloc); return err; } /* * Find the first range that has @bits not set. This range could start before * @start. * * @tree: the tree to search * @start: offset at/after which the found extent should start * @start_ret: records the beginning of the range * @end_ret: records the end of the range (inclusive) * @bits: the set of bits which must be unset * * Since unallocated range is also considered one which doesn't have the bits * set it's possible that @end_ret contains -1, this happens in case the range * spans (last_range_end, end of device]. In this case it's up to the caller to * trim @end_ret to the appropriate size. */ void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits) { struct extent_state *state; struct extent_state *prev = NULL, *next = NULL; spin_lock(&tree->lock); /* Find first extent with bits cleared */ while (1) { state = tree_search_prev_next(tree, start, &prev, &next); if (!state && !next && !prev) { /* * Tree is completely empty, send full range and let * caller deal with it */ *start_ret = 0; *end_ret = -1; goto out; } else if (!state && !next) { /* * We are past the last allocated chunk, set start at * the end of the last extent. */ *start_ret = prev->end + 1; *end_ret = -1; goto out; } else if (!state) { state = next; } /* * At this point 'state' either contains 'start' or start is * before 'state' */ if (in_range(start, state->start, state->end - state->start + 1)) { if (state->state & bits) { /* * |--range with bits sets--| * | * start */ start = state->end + 1; } else { /* * 'start' falls within a range that doesn't * have the bits set, so take its start as the * beginning of the desired range * * |--range with bits cleared----| * | * start */ *start_ret = state->start; break; } } else { /* * |---prev range---|---hole/unset---|---node range---| * | * start * * or * * |---hole/unset--||--first node--| * 0 | * start */ if (prev) *start_ret = prev->end + 1; else *start_ret = 0; break; } } /* * Find the longest stretch from start until an entry which has the * bits set */ while (state) { if (state->end >= start && !(state->state & bits)) { *end_ret = state->end; } else { *end_ret = state->start - 1; break; } state = next_state(state); } out: spin_unlock(&tree->lock); } /* * Count the number of bytes in the tree that have a given bit(s) set for a * given range. * * @tree: The io tree to search. * @start: The start offset of the range. This value is updated to the * offset of the first byte found with the given bit(s), so it * can end up being bigger than the initial value. * @search_end: The end offset (inclusive value) of the search range. * @max_bytes: The maximum byte count we are interested. The search stops * once it reaches this count. * @bits: The bits the range must have in order to be accounted for. * If multiple bits are set, then only subranges that have all * the bits set are accounted for. * @contig: Indicate if we should ignore holes in the range or not. If * this is true, then stop once we find a hole. * @cached_state: A cached state to be used across multiple calls to this * function in order to speedup searches. Use NULL if this is * called only once or if each call does not start where the * previous one ended. * * Returns the total number of bytes found within the given range that have * all given bits set. If the returned number of bytes is greater than zero * then @start is updated with the offset of the first byte with the bits set. */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, u32 bits, int contig, struct extent_state **cached_state) { struct extent_state *state = NULL; struct extent_state *cached; u64 cur_start = *start; u64 total_bytes = 0; u64 last = 0; int found = 0; if (WARN_ON(search_end < cur_start)) return 0; spin_lock(&tree->lock); if (!cached_state || !*cached_state) goto search; cached = *cached_state; if (!extent_state_in_tree(cached)) goto search; if (cached->start <= cur_start && cur_start <= cached->end) { state = cached; } else if (cached->start > cur_start) { struct extent_state *prev; /* * The cached state starts after our search range's start. Check * if the previous state record starts at or before the range we * are looking for, and if so, use it - this is a common case * when there are holes between records in the tree. If there is * no previous state record, we can start from our cached state. */ prev = prev_state(cached); if (!prev) state = cached; else if (prev->start <= cur_start && cur_start <= prev->end) state = prev; } /* * This search will find all the extents that end after our range * starts. */ search: if (!state) state = tree_search(tree, cur_start); while (state) { if (state->start > search_end) break; if (contig && found && state->start > last + 1) break; if (state->end >= cur_start && (state->state & bits) == bits) { total_bytes += min(search_end, state->end) + 1 - max(cur_start, state->start); if (total_bytes >= max_bytes) break; if (!found) { *start = max(cur_start, state->start); found = 1; } last = state->end; } else if (contig && found) { break; } state = next_state(state); } if (cached_state) { free_extent_state(*cached_state); *cached_state = state; if (state) refcount_inc(&state->refs); } spin_unlock(&tree->lock); return total_bytes; } /* * Check if the single @bit exists in the given range. */ bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit) { struct extent_state *state = NULL; bool bitset = false; ASSERT(is_power_of_2(bit)); spin_lock(&tree->lock); state = tree_search(tree, start); while (state && start <= end) { if (state->start > end) break; if (state->state & bit) { bitset = true; break; } /* If state->end is (u64)-1, start will overflow to 0 */ start = state->end + 1; if (start > end || start == 0) break; state = next_state(state); } spin_unlock(&tree->lock); return bitset; } /* * Check if the whole range [@start,@end) contains the single @bit set. */ bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, struct extent_state *cached) { struct extent_state *state = NULL; bool bitset = true; ASSERT(is_power_of_2(bit)); spin_lock(&tree->lock); if (cached && extent_state_in_tree(cached) && cached->start <= start && cached->end > start) state = cached; else state = tree_search(tree, start); while (state && start <= end) { if (state->start > start) { bitset = false; break; } if (state->start > end) break; if ((state->state & bit) == 0) { bitset = false; break; } if (state->end == (u64)-1) break; /* * Last entry (if state->end is (u64)-1 and overflow happens), * or next entry starts after the range. */ start = state->end + 1; if (start > end || start == 0) break; state = next_state(state); } /* We ran out of states and were still inside of our range. */ if (!state) bitset = false; spin_unlock(&tree->lock); return bitset; } /* Wrappers around set/clear extent bit */ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset) { /* * We don't support EXTENT_LOCKED yet, as current changeset will * record any bits changed, so for EXTENT_LOCKED case, it will * either fail with -EEXIST or changeset will record the whole * range. */ ASSERT(!(bits & EXTENT_LOCKED)); return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset); } int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset) { /* * Don't support EXTENT_LOCKED case, same reason as * set_record_extent_bits(). */ ASSERT(!(bits & EXTENT_LOCKED)); return __clear_extent_bit(tree, start, end, bits, NULL, changeset); } int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { int err; u64 failed_start; err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, NULL, cached, NULL); if (err == -EEXIST) { if (failed_start > start) clear_extent_bit(tree, start, failed_start - 1, EXTENT_LOCKED, cached); return 0; } return 1; } /* * Either insert or lock state struct between start and end use mask to tell * us if waiting is desired. */ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state) { struct extent_state *failed_state = NULL; int err; u64 failed_start; err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, &failed_state, cached_state, NULL); while (err == -EEXIST) { if (failed_start != start) clear_extent_bit(tree, start, failed_start - 1, EXTENT_LOCKED, cached_state); wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED, &failed_state); err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, &failed_state, cached_state, NULL); } return err; } void __cold extent_state_free_cachep(void) { btrfs_extent_state_leak_debug_check(); kmem_cache_destroy(extent_state_cache); } int __init extent_state_init_cachep(void) { extent_state_cache = kmem_cache_create("btrfs_extent_state", sizeof(struct extent_state), 0, 0, NULL); if (!extent_state_cache) return -ENOMEM; return 0; }
3 5 2 2 2 1 1 1 3 3 3 5 5 5 5 3 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland * Copyright (c) 2002, 2003 Tuukka Toivonen * Copyright (c) 2008 Erik Andrén * * P/N 861037: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam * P/N 861075-0040: Sensor HDCS1000 ASIC * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "stv06xx_sensor.h" MODULE_AUTHOR("Erik Andrén"); MODULE_DESCRIPTION("STV06XX USB Camera Driver"); MODULE_LICENSE("GPL"); static bool dump_bridge; static bool dump_sensor; int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; u8 len = (i2c_data > 0xff) ? 2 : 1; buf[0] = i2c_data & 0xff; buf[1] = (i2c_data >> 8) & 0xff; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, address, 0, buf, len, STV06XX_URB_MSG_TIMEOUT); gspca_dbg(gspca_dev, D_CONF, "Written 0x%x to address 0x%x, status: %d\n", i2c_data, address, err); return (err < 0) ? err : 0; } int stv06xx_read_bridge(struct sd *sd, u16 address, u8 *i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, address, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); *i2c_data = buf[0]; gspca_dbg(gspca_dev, D_CONF, "Reading 0x%x from address 0x%x, status %d\n", *i2c_data, address, err); return (err < 0) ? err : 0; } /* Wraps the normal write sensor bytes / words functions for writing a single value */ int stv06xx_write_sensor(struct sd *sd, u8 address, u16 value) { if (sd->sensor->i2c_len == 2) { u16 data[2] = { address, value }; return stv06xx_write_sensor_words(sd, data, 1); } else { u8 data[2] = { address, value }; return stv06xx_write_sensor_bytes(sd, data, 1); } } static int stv06xx_write_sensor_finish(struct sd *sd) { int err = 0; if (sd->bridge == BRIDGE_STV610) { struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; buf[0] = 0; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1704, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); } return (err < 0) ? err : 0; } int stv06xx_write_sensor_bytes(struct sd *sd, const u8 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; gspca_dbg(gspca_dev, D_CONF, "I2C: Command buffer contains %d entries\n", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_BYTES && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j] = data[2*i+1]; gspca_dbg(gspca_dev, D_CONF, "I2C: Writing 0x%02x to reg 0x%02x\n", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_write_sensor_words(struct sd *sd, const u16 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; gspca_dbg(gspca_dev, D_CONF, "I2C: Command buffer contains %d entries\n", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_WORDS && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j * 2] = data[2*i+1]; buf[0x10 + j * 2 + 1] = data[2*i+1] >> 8; gspca_dbg(gspca_dev, D_CONF, "I2C: Writing 0x%04x to reg 0x%02x\n", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = stv06xx_write_bridge(sd, STV_I2C_FLUSH, sd->sensor->i2c_flush); if (err < 0) return err; /* Clear mem */ memset(buf, 0, I2C_BUFFER_LENGTH); buf[0] = address; buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = 0; /* Read I2C register */ buf[0x22] = I2C_READ_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) { pr_err("I2C: Read error writing address: %d\n", err); return err; } err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, 0x1410, 0, buf, sd->sensor->i2c_len, STV06XX_URB_MSG_TIMEOUT); if (sd->sensor->i2c_len == 2) *value = buf[0] | (buf[1] << 8); else *value = buf[0]; gspca_dbg(gspca_dev, D_CONF, "I2C: Read 0x%x from address 0x%x, status: %d\n", *value, address, err); return (err < 0) ? err : 0; } /* Dumps all bridge registers */ static void stv06xx_dump_bridge(struct sd *sd) { int i; u8 data, buf; pr_info("Dumping all stv06xx bridge registers\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); pr_info("Read 0x%x from address 0x%x\n", data, i); } pr_info("Testing stv06xx bridge registers for writability\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); buf = data; stv06xx_write_bridge(sd, i, 0xff); stv06xx_read_bridge(sd, i, &data); if (data == 0xff) pr_info("Register 0x%x is read/write\n", i); else if (data != buf) pr_info("Register 0x%x is read/write, but only partially\n", i); else pr_info("Register 0x%x is read-only\n", i); stv06xx_write_bridge(sd, i, buf); } } /* this function is called at probe and resume time */ static int stv06xx_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; gspca_dbg(gspca_dev, D_PROBE, "Initializing camera\n"); /* Let the usb init settle for a bit before performing the initialization */ msleep(250); err = sd->sensor->init(sd); if (dump_sensor && sd->sensor->dump) sd->sensor->dump(sd); return (err < 0) ? err : 0; } /* this function is called at probe time */ static int stv06xx_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PROBE, "Initializing controls\n"); gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler; return sd->sensor->init_controls(sd); } /* Start the camera */ static int stv06xx_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct usb_host_interface *alt; struct usb_interface *intf; int err, packet_size; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { gspca_err(gspca_dev, "Couldn't get altsetting\n"); return -EIO; } if (alt->desc.bNumEndpoints < 1) return -ENODEV; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) return err; /* Prepare the sensor for start */ err = sd->sensor->start(sd); if (err < 0) goto out; /* Start isochronous streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 1); out: if (err < 0) gspca_dbg(gspca_dev, D_STREAM, "Starting stream failed\n"); else gspca_dbg(gspca_dev, D_STREAM, "Started streaming\n"); return (err < 0) ? err : 0; } static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { struct usb_interface_cache *intfc; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; intfc = gspca_dev->dev->actconfig->intf_cache[0]; if (intfc->num_altsetting < 2) return -ENODEV; alt = &intfc->altsetting[1]; if (alt->desc.bNumEndpoints < 1) return -ENODEV; /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); return 0; } static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; /* * Existence of altsetting and endpoint was verified in * stv06xx_isoc_init() */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) gspca_err(gspca_dev, "set alt 1 err %d\n", ret); return ret; } static void stv06xx_stopN(struct gspca_dev *gspca_dev) { int err; struct sd *sd = (struct sd *) gspca_dev; /* stop ISO-streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 0); if (err < 0) goto out; err = sd->sensor->stop(sd); out: if (err < 0) gspca_dbg(gspca_dev, D_STREAM, "Failed to stop stream\n"); else gspca_dbg(gspca_dev, D_STREAM, "Stopped streaming\n"); } /* * Analyse an USB packet of the data stream and store it appropriately. * Each packet contains an integral number of chunks. Each chunk has * 2-bytes identification, followed by 2-bytes that describe the chunk * length. Known/guessed chunk identifications are: * 8001/8005/C001/C005 - Begin new frame * 8002/8006/C002/C006 - End frame * 0200/4200 - Contains actual image data, bayer or compressed * 0005 - 11 bytes of unknown data * 0100 - 2 bytes of unknown data * The 0005 and 0100 chunks seem to appear only in compressed stream. */ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PACK, "Packet of length %d arrived\n", len); /* A packet may contain several frames loop until the whole packet is reached */ while (len) { int id, chunk_len; if (len < 4) { gspca_dbg(gspca_dev, D_PACK, "Packet is smaller than 4 bytes\n"); return; } /* Capture the id */ id = (data[0] << 8) | data[1]; /* Capture the chunk length */ chunk_len = (data[2] << 8) | data[3]; gspca_dbg(gspca_dev, D_PACK, "Chunk id: %x, length: %d\n", id, chunk_len); data += 4; len -= 4; if (len < chunk_len) { gspca_err(gspca_dev, "URB packet length is smaller than the specified chunk length\n"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* First byte seem to be 02=data 2nd byte is unknown??? */ if (sd->bridge == BRIDGE_ST6422 && (id & 0xff00) == 0x0200) goto frame_data; switch (id) { case 0x0200: case 0x4200: frame_data: gspca_dbg(gspca_dev, D_PACK, "Frame data packet detected\n"); if (sd->to_skip) { int skip = (sd->to_skip < chunk_len) ? sd->to_skip : chunk_len; data += skip; len -= skip; chunk_len -= skip; sd->to_skip -= skip; } gspca_frame_add(gspca_dev, INTER_PACKET, data, chunk_len); break; case 0x8001: case 0x8005: case 0xc001: case 0xc005: gspca_dbg(gspca_dev, D_PACK, "Starting new frame\n"); /* Create a new frame, chunk length should be zero */ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); if (sd->bridge == BRIDGE_ST6422) sd->to_skip = gspca_dev->pixfmt.width * 4; if (chunk_len) gspca_err(gspca_dev, "Chunk length is non-zero on a SOF\n"); break; case 0x8002: case 0x8006: case 0xc002: gspca_dbg(gspca_dev, D_PACK, "End of frame detected\n"); /* Complete the last frame (if any) */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); if (chunk_len) gspca_err(gspca_dev, "Chunk length is non-zero on a EOF\n"); break; case 0x0005: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x005 detected\n"); /* Unknown chunk with 11 bytes of data, occurs just before end of each frame in compressed mode */ break; case 0x0100: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x0100 detected\n"); /* Unknown chunk with 2 bytes of data, occurs 2-3 times per USB interrupt */ break; case 0x42ff: gspca_dbg(gspca_dev, D_PACK, "Chunk 0x42ff detected\n"); /* Special chunk seen sometimes on the ST6422 */ break; default: gspca_dbg(gspca_dev, D_PACK, "Unknown chunk 0x%04x detected\n", id); /* Unknown chunk */ } data += chunk_len; len -= chunk_len; } } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && (data[0] == 0x80 || data[0] == 0x10)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); ret = 0; } if (len == 1 && (data[0] == 0x88 || data[0] == 0x11)) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); static void stv06xx_probe_error(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; kfree(sd->sensor_priv); sd->sensor_priv = NULL; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = stv06xx_config, .init = stv06xx_init, .init_controls = stv06xx_init_controls, .probe_error = stv06xx_probe_error, .start = stv06xx_start, .stopN = stv06xx_stopN, .pkt_scan = stv06xx_pkt_scan, .isoc_init = stv06xx_isoc_init, .isoc_nego = stv06xx_isoc_nego, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* This function is called at probe time */ static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; gspca_dbg(gspca_dev, D_PROBE, "Configuring camera\n"); sd->bridge = id->driver_info; gspca_dev->sd_desc = &sd_desc; if (dump_bridge) stv06xx_dump_bridge(sd); sd->sensor = &stv06xx_sensor_st6422; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_vv6410; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1x00; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1020; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_pb0100; if (!sd->sensor->probe(sd)) return 0; sd->sensor = NULL; return -ENODEV; } /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, /* QuickCam Express */ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, /* LEGO cam / QuickCam Web */ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, /* Dexxa WebCam USB */ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger */ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Communicate */ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static void sd_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); struct sd *sd = (struct sd *) gspca_dev; void *priv = sd->sensor_priv; gspca_dbg(gspca_dev, D_PROBE, "Disconnecting the stv06xx device\n"); sd->sensor = NULL; gspca_disconnect(intf); kfree(priv); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = sd_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); module_param(dump_sensor, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_sensor, "Dumps all sensor registers at startup");
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 /* SPDX-License-Identifier: GPL-2.0 */ /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation */ #include <asm/unaligned.h> void eir_create(struct hci_dev *hdev, u8 *data); u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, u8 data_len); static inline u16 eir_precalc_len(u8 data_len) { return sizeof(u8) * 2 + data_len; } static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) { eir[eir_len++] = sizeof(type) + data_len; eir[eir_len++] = type; memcpy(&eir[eir_len], data, data_len); eir_len += data_len; return eir_len; } static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) { eir[eir_len++] = sizeof(type) + sizeof(data); eir[eir_len++] = type; put_unaligned_le16(data, &eir[eir_len]); eir_len += sizeof(data); return eir_len; } static inline u16 eir_skb_put_data(struct sk_buff *skb, u8 type, u8 *data, u8 data_len) { u8 *eir; u16 eir_len; eir_len = eir_precalc_len(data_len); eir = skb_put(skb, eir_len); WARN_ON(sizeof(type) + data_len > U8_MAX); eir[0] = sizeof(type) + data_len; eir[1] = type; memcpy(&eir[2], data, data_len); return eir_len; } static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, size_t *data_len) { size_t parsed = 0; if (eir_len < 2) return NULL; while (parsed < eir_len - 1) { u8 field_len = eir[0]; if (field_len == 0) break; parsed += field_len + 1; if (parsed > eir_len) break; if (eir[1] != type) { eir += field_len + 1; continue; } /* Zero length data */ if (field_len == 1) return NULL; if (data_len) *data_len = field_len - 1; return &eir[2]; } return NULL; } void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len);
43 3 42 10 18 27 95 44 33 27 33 37 3 68 338 360 360 264 359 284 284 68 264 32 58 311 248 62 116 42 51 108 45 45 43 16 43 43 43 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/fat/misc.c * * Written 1992,1993 by Werner Almesberger * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980 * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) */ #include "fat.h" #include <linux/iversion.h> /* * fat_fs_error reports a file system problem that might indicate fa data * corruption/inconsistency. Depending on 'errors' mount option the * panic() is called, or error message is printed FAT and nothing is done, * or filesystem is remounted read-only (default behavior). * In case the file system is remounted read-only, it can be made writable * again by remounting it. */ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) { struct fat_mount_options *opts = &MSDOS_SB(sb)->options; va_list args; struct va_format vaf; if (report) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; fat_msg(sb, KERN_ERR, "error, %pV", &vaf); va_end(args); } if (opts->errors == FAT_ERRORS_PANIC) panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id); else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) { sb->s_flags |= SB_RDONLY; fat_msg(sb, KERN_ERR, "Filesystem has been set read-only"); } } EXPORT_SYMBOL_GPL(__fat_fs_error); /** * _fat_msg() - Print a preformatted FAT message based on a superblock. * @sb: A pointer to a &struct super_block * @level: A Kernel printk level constant * @fmt: The printf-style format string to print. * * Everything that is not fat_fs_error() should be fat_msg(). * * fat_msg() wraps _fat_msg() for printk indexing. */ void _fat_msg(struct super_block *sb, const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; _printk(FAT_PRINTK_PREFIX "%pV\n", level, sb->s_id, &vaf); va_end(args); } /* Flushes the number of free clusters on FAT32 */ /* XXX: Need to write one per FSINFO block. Currently only writes 1 */ int fat_clusters_flush(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh; struct fat_boot_fsinfo *fsinfo; if (!is_fat32(sbi)) return 0; bh = sb_bread(sb, sbi->fsinfo_sector); if (bh == NULL) { fat_msg(sb, KERN_ERR, "bread failed in fat_clusters_flush"); return -EIO; } fsinfo = (struct fat_boot_fsinfo *)bh->b_data; /* Sanity check */ if (!IS_FSINFO(fsinfo)) { fat_msg(sb, KERN_ERR, "Invalid FSINFO signature: " "0x%08x, 0x%08x (sector = %lu)", le32_to_cpu(fsinfo->signature1), le32_to_cpu(fsinfo->signature2), sbi->fsinfo_sector); } else { if (sbi->free_clusters != -1) fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters); if (sbi->prev_free != -1) fsinfo->next_cluster = cpu_to_le32(sbi->prev_free); mark_buffer_dirty(bh); } brelse(bh); return 0; } /* * fat_chain_add() adds a new cluster to the chain of clusters represented * by inode. */ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); int ret, new_fclus, last; /* * We must locate the last cluster of the file to add this new * one (new_dclus) to the end of the link list (the FAT). */ last = new_fclus = 0; if (MSDOS_I(inode)->i_start) { int fclus, dclus; ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus); if (ret < 0) return ret; new_fclus = fclus + 1; last = dclus; } /* add new one to the last of the cluster chain */ if (last) { struct fat_entry fatent; fatent_init(&fatent); ret = fat_ent_read(inode, &fatent, last); if (ret >= 0) { int wait = inode_needs_sync(inode); ret = fat_ent_write(inode, &fatent, new_dclus, wait); fatent_brelse(&fatent); } if (ret < 0) return ret; /* * FIXME:Although we can add this cache, fat_cache_add() is * assuming to be called after linear search with fat_cache_id. */ // fat_cache_add(inode, new_fclus, new_dclus); } else { MSDOS_I(inode)->i_start = new_dclus; MSDOS_I(inode)->i_logstart = new_dclus; /* * Since generic_write_sync() synchronizes regular files later, * we sync here only directories. */ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) { ret = fat_sync_inode(inode); if (ret) return ret; } else mark_inode_dirty(inode); } if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { fat_fs_error(sb, "clusters badly computed (%d != %llu)", new_fclus, (llu)(inode->i_blocks >> (sbi->cluster_bits - 9))); fat_cache_inval_inode(inode); } inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9); return 0; } /* * The epoch of FAT timestamp is 1980. * : bits : value * date: 0 - 4: day (1 - 31) * date: 5 - 8: month (1 - 12) * date: 9 - 15: year (0 - 127) from 1980 * time: 0 - 4: sec (0 - 29) 2sec counts * time: 5 - 10: min (0 - 59) * time: 11 - 15: hour (0 - 23) */ #define SECS_PER_MIN 60 #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) /* days between 1.1.70 and 1.1.80 (2 leap days) */ #define DAYS_DELTA (365 * 10 + 2) /* 120 (2100 - 1980) isn't leap year */ #define YEAR_2100 120 #define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != YEAR_2100) /* Linear day numbers of the respective 1sts in non-leap years. */ static long days_in_year[] = { /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, }; static inline int fat_tz_offset(const struct msdos_sb_info *sbi) { return (sbi->options.tz_set ? -sbi->options.time_offset : sys_tz.tz_minuteswest) * SECS_PER_MIN; } /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec64 *ts, __le16 __time, __le16 __date, u8 time_cs) { u16 time = le16_to_cpu(__time), date = le16_to_cpu(__date); time64_t second; long day, leap_day, month, year; year = date >> 9; month = max(1, (date >> 5) & 0xf); day = max(1, date & 0x1f) - 1; leap_day = (year + 3) / 4; if (year > YEAR_2100) /* 2100 isn't leap year */ leap_day--; if (IS_LEAP_YEAR(year) && month > 2) leap_day++; second = (time & 0x1f) << 1; second += ((time >> 5) & 0x3f) * SECS_PER_MIN; second += (time >> 11) * SECS_PER_HOUR; second += (time64_t)(year * 365 + leap_day + days_in_year[month] + day + DAYS_DELTA) * SECS_PER_DAY; second += fat_tz_offset(sbi); if (time_cs) { ts->tv_sec = second + (time_cs / 100); ts->tv_nsec = (time_cs % 100) * 10000000; } else { ts->tv_sec = second; ts->tv_nsec = 0; } } /* Export fat_time_fat2unix() for the fat_test KUnit tests. */ EXPORT_SYMBOL_GPL(fat_time_fat2unix); /* Convert linear UNIX date to a FAT time/date pair. */ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec64 *ts, __le16 *time, __le16 *date, u8 *time_cs) { struct tm tm; time64_to_tm(ts->tv_sec, -fat_tz_offset(sbi), &tm); /* FAT can only support year between 1980 to 2107 */ if (tm.tm_year < 1980 - 1900) { *time = 0; *date = cpu_to_le16((0 << 9) | (1 << 5) | 1); if (time_cs) *time_cs = 0; return; } if (tm.tm_year > 2107 - 1900) { *time = cpu_to_le16((23 << 11) | (59 << 5) | 29); *date = cpu_to_le16((127 << 9) | (12 << 5) | 31); if (time_cs) *time_cs = 199; return; } /* from 1900 -> from 1980 */ tm.tm_year -= 80; /* 0~11 -> 1~12 */ tm.tm_mon++; /* 0~59 -> 0~29(2sec counts) */ tm.tm_sec >>= 1; *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec); *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday); if (time_cs) *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000; } EXPORT_SYMBOL_GPL(fat_time_unix2fat); static inline struct timespec64 fat_timespec64_trunc_2secs(struct timespec64 ts) { return (struct timespec64){ ts.tv_sec & ~1ULL, 0 }; } /* * truncate atime to 24 hour granularity (00:00:00 in local timezone) */ struct timespec64 fat_truncate_atime(const struct msdos_sb_info *sbi, const struct timespec64 *ts) { /* to localtime */ time64_t seconds = ts->tv_sec - fat_tz_offset(sbi); s32 remainder; div_s64_rem(seconds, SECS_PER_DAY, &remainder); /* to day boundary, and back to unix time */ seconds = seconds + fat_tz_offset(sbi) - remainder; return (struct timespec64){ seconds, 0 }; } /* * truncate mtime to 2 second granularity */ struct timespec64 fat_truncate_mtime(const struct msdos_sb_info *sbi, const struct timespec64 *ts) { return fat_timespec64_trunc_2secs(*ts); } /* * truncate the various times with appropriate granularity: * all times in root node are always 0 */ int fat_truncate_time(struct inode *inode, struct timespec64 *now, int flags) { struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb); struct timespec64 ts; if (inode->i_ino == MSDOS_ROOT_INO) return 0; if (now == NULL) { now = &ts; ts = current_time(inode); } if (flags & S_ATIME) inode_set_atime_to_ts(inode, fat_truncate_atime(sbi, now)); /* * ctime and mtime share the same on-disk field, and should be * identical in memory. all mtime updates will be applied to ctime, * but ctime updates are ignored. */ if (flags & S_MTIME) inode_set_mtime_to_ts(inode, inode_set_ctime_to_ts(inode, fat_truncate_mtime(sbi, now))); return 0; } EXPORT_SYMBOL_GPL(fat_truncate_time); int fat_update_time(struct inode *inode, int flags) { int dirty_flags = 0; if (inode->i_ino == MSDOS_ROOT_INO) return 0; if (flags & (S_ATIME | S_CTIME | S_MTIME)) { fat_truncate_time(inode, NULL, flags); if (inode->i_sb->s_flags & SB_LAZYTIME) dirty_flags |= I_DIRTY_TIME; else dirty_flags |= I_DIRTY_SYNC; } __mark_inode_dirty(inode, dirty_flags); return 0; } EXPORT_SYMBOL_GPL(fat_update_time); int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) { int i, err = 0; for (i = 0; i < nr_bhs; i++) write_dirty_buffer(bhs[i], 0); for (i = 0; i < nr_bhs; i++) { wait_on_buffer(bhs[i]); if (!err && !buffer_uptodate(bhs[i])) err = -EIO; } return err; }
1 1 50 50 8 2 1 33 1 34 5 15 4 2 4 18 16 33 2 33 12 2 19 2 5 1 6 1 11 1 12 6 6 6 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * "Ping" sockets * * Based on ipv4/ping.c code. * * Authors: Lorenzo Colitti (IPv6 support) * Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6), * Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32) */ #include <net/addrconf.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/protocol.h> #include <net/udp.h> #include <net/transp_v6.h> #include <linux/proc_fs.h> #include <linux/bpf-cgroup.h> #include <net/ping.h> /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { return -EAFNOSUPPORT; } static void dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { } static int dummy_icmpv6_err_convert(u8 type, u8 code, int *err) { return -EAFNOSUPPORT; } static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) {} static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { return 0; } static int ping_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { /* This check is replicated from __ip6_datagram_connect() and * intended to prevent BPF program called below from accessing * bytes that are out of the bound specified by user in addr_len. */ if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); } static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct icmp6hdr user_icmph; int addr_type; struct in6_addr *daddr; int oif = 0; struct flowi6 fl6; int err; struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; struct ipcm6_cookie ipc6; err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; memset(&fl6, 0, sizeof(fl6)); if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); if (msg->msg_namelen < sizeof(*u)) return -EINVAL; if (u->sin6_family != AF_INET6) { return -EAFNOSUPPORT; } daddr = &(u->sin6_addr); if (inet6_test_bit(SNDFLOW, sk)) fl6.flowlabel = u->sin6_flowinfo & IPV6_FLOWINFO_MASK; if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr))) oif = u->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &sk->sk_v6_daddr; fl6.flowlabel = np->flow_label; } if (!oif) oif = sk->sk_bound_dev_if; if (!oif) oif = np->sticky_pktinfo.ipi6_ifindex; if (!oif && ipv6_addr_is_multicast(daddr)) oif = READ_ONCE(np->mcast_oif); else if (!oif) oif = READ_ONCE(np->ucast_oif); addr_type = ipv6_addr_type(daddr); if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) || (addr_type & IPV6_ADDR_MAPPED) || (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if && l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if)) return -EINVAL; ipcm6_init_sk(&ipc6, sk); ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); ipc6.sockc.mark = READ_ONCE(sk->sk_mark); fl6.flowi6_oif = oif; if (msg->msg_controllen) { struct ipv6_txoptions opt = {}; opt.tot_len = sizeof(opt); ipc6.opt = &opt; err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) return err; /* Changes to txoptions and flow info are not implemented, yet. * Drop the options. */ ipc6.opt = NULL; } fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.saddr = np->saddr; fl6.daddr = *daddr; fl6.flowi6_mark = ipc6.sockc.mark; fl6.flowi6_uid = sk->sk_uid; fl6.fl6_icmp_type = user_icmph.icmp6_type; fl6.fl6_icmp_code = user_icmph.icmp6_code; security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); if (IS_ERR(dst)) return PTR_ERR(dst); rt = (struct rt6_info *) dst; if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = READ_ONCE(np->mcast_oif); else if (!fl6.flowi6_oif) fl6.flowi6_oif = READ_ONCE(np->ucast_oif); pfh.icmph.type = user_icmph.icmp6_type; pfh.icmph.code = user_icmph.icmp6_code; pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence; pfh.msg = msg; pfh.wcheck = 0; pfh.family = AF_INET6; if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, sizeof(struct icmp6hdr), &ipc6, &fl6, rt, MSG_DONTWAIT); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { icmpv6_push_pending_frames(sk, &fl6, (struct icmp6hdr *)&pfh.icmph, len); } release_sock(sk); dst_release(dst); if (err) return err; return len; } struct proto pingv6_prot = { .name = "PINGv6", .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, .pre_connect = ping_v6_pre_connect, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .sendmsg = ping_v6_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .hash = ping_hash, .unhash = ping_unhash, .get_port = ping_get_port, .put_port = ping_unhash, .obj_size = sizeof(struct raw6_sock), .ipv6_pinfo_offset = offsetof(struct raw6_sock, inet6), }; EXPORT_SYMBOL_GPL(pingv6_prot); static struct inet_protosw pingv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_ICMPV6, .prot = &pingv6_prot, .ops = &inet6_sockraw_ops, .flags = INET_PROTOSW_REUSE, }; #ifdef CONFIG_PROC_FS static void *ping_v6_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET6); } static int ping_v6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); } else { int bucket = ((struct ping_iter_state *) seq->private)->bucket; struct inet_sock *inet = inet_sk((struct sock *)v); __u16 srcp = ntohs(inet->inet_sport); __u16 destp = ntohs(inet->inet_dport); ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); } return 0; } static const struct seq_operations ping_v6_seq_ops = { .start = ping_v6_seq_start, .show = ping_v6_seq_show, .next = ping_seq_next, .stop = ping_seq_stop, }; static int __net_init ping_v6_proc_init_net(struct net *net) { if (!proc_create_net("icmp6", 0444, net->proc_net, &ping_v6_seq_ops, sizeof(struct ping_iter_state))) return -ENOMEM; return 0; } static void __net_exit ping_v6_proc_exit_net(struct net *net) { remove_proc_entry("icmp6", net->proc_net); } static struct pernet_operations ping_v6_net_ops = { .init = ping_v6_proc_init_net, .exit = ping_v6_proc_exit_net, }; #endif int __init pingv6_init(void) { #ifdef CONFIG_PROC_FS int ret = register_pernet_subsys(&ping_v6_net_ops); if (ret) return ret; #endif pingv6_ops.ipv6_recv_error = ipv6_recv_error; pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl; pingv6_ops.ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl; pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; return inet6_register_protosw(&pingv6_protosw); } /* This never gets called because it's not possible to unload the ipv6 module, * but just in case. */ void pingv6_exit(void) { pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl; pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl; pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; #ifdef CONFIG_PROC_FS unregister_pernet_subsys(&ping_v6_net_ops); #endif inet6_unregister_protosw(&pingv6_protosw); }
233 167 21 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Authors: Lotsa people, from code originally in tcp */ #ifndef _INET6_HASHTABLES_H #define _INET6_HASHTABLES_H #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/types.h> #include <linux/jhash.h> #include <net/inet_sock.h> #include <net/ipv6.h> #include <net/netns/hash.h> struct inet_hashinfo; static inline unsigned int __inet6_ehashfn(const u32 lhash, const u16 lport, const u32 fhash, const __be16 fport, const u32 initval) { const u32 ports = (((u32)lport) << 16) | (__force u32)fport; return jhash_3words(lhash, fhash, ports, initval); } /* * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM * * The sockhash lock must be held as a reader here. */ struct sock *__inet6_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, const int dif, const int sdif); typedef u32 (inet6_ehashfn_t)(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport); inet6_ehashfn_t inet6_ehashfn; INDIRECT_CALLABLE_DECLARE(inet6_ehashfn_t udp6_ehashfn); struct sock *inet6_lookup_reuseport(struct net *net, struct sock *sk, struct sk_buff *skb, int doff, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, inet6_ehashfn_t *ehashfn); struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const unsigned short hnum, const int dif, const int sdif); struct sock *inet6_lookup_run_sk_lookup(struct net *net, int protocol, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, const int dif, inet6_ehashfn_t *ehashfn); static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, const int dif, const int sdif, bool *refcounted) { struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr, sport, daddr, hnum, dif, sdif); *refcounted = true; if (sk) return sk; *refcounted = false; return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, daddr, hnum, dif, sdif); } static inline struct sock *inet6_steal_sock(struct net *net, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, bool *refcounted, inet6_ehashfn_t *ehashfn) { struct sock *sk, *reuse_sk; bool prefetched; sk = skb_steal_sock(skb, refcounted, &prefetched); if (!sk) return NULL; if (!prefetched || !sk_fullsock(sk)) return sk; if (sk->sk_protocol == IPPROTO_TCP) { if (sk->sk_state != TCP_LISTEN) return sk; } else if (sk->sk_protocol == IPPROTO_UDP) { if (sk->sk_state != TCP_CLOSE) return sk; } else { return sk; } reuse_sk = inet6_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, ntohs(dport), ehashfn); if (!reuse_sk) return sk; /* We've chosen a new reuseport sock which is never refcounted. This * implies that sk also isn't refcounted. */ WARN_ON_ONCE(*refcounted); return reuse_sk; } static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be16 sport, const __be16 dport, int iif, int sdif, bool *refcounted) { struct net *net = dev_net(skb_dst(skb)->dev); const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct sock *sk; sk = inet6_steal_sock(net, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, dport, refcounted, inet6_ehashfn); if (IS_ERR(sk)) return NULL; if (sk) return sk; return __inet6_lookup(net, hashinfo, skb, doff, &ip6h->saddr, sport, &ip6h->daddr, ntohs(dport), iif, sdif, refcounted); } struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, const __be16 dport, const int dif); int inet6_hash(struct sock *sk); static inline bool inet6_match(struct net *net, const struct sock *sk, const struct in6_addr *saddr, const struct in6_addr *daddr, const __portpair ports, const int dif, const int sdif) { if (!net_eq(sock_net(sk), net) || sk->sk_family != AF_INET6 || sk->sk_portpair != ports || !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) || !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return false; /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif); } #endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* _INET6_HASHTABLES_H */
7057 36 49 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 /* * Performance events: * * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra * * Data type definitions, declarations, prototypes. * * Started by: Thomas Gleixner and Ingo Molnar * * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_PERF_EVENT_H #define _LINUX_PERF_EVENT_H #include <uapi/linux/perf_event.h> #include <uapi/linux/bpf_perf_event.h> /* * Kernel-internal data types and definitions: */ #ifdef CONFIG_PERF_EVENTS # include <asm/perf_event.h> # include <asm/local64.h> #endif #define PERF_GUEST_ACTIVE 0x01 #define PERF_GUEST_USER 0x02 struct perf_guest_info_callbacks { unsigned int (*state)(void); unsigned long (*get_ip)(void); unsigned int (*handle_intel_pt_intr)(void); }; #ifdef CONFIG_HAVE_HW_BREAKPOINT #include <linux/rhashtable-types.h> #include <asm/hw_breakpoint.h> #endif #include <linux/list.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/hrtimer.h> #include <linux/fs.h> #include <linux/pid_namespace.h> #include <linux/workqueue.h> #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/irq_work.h> #include <linux/static_key.h> #include <linux/jump_label_ratelimit.h> #include <linux/atomic.h> #include <linux/sysfs.h> #include <linux/perf_regs.h> #include <linux/cgroup.h> #include <linux/refcount.h> #include <linux/security.h> #include <linux/static_call.h> #include <linux/lockdep.h> #include <asm/local.h> struct perf_callchain_entry { __u64 nr; __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { struct perf_callchain_entry *entry; u32 max_stack; u32 nr; short contexts; bool contexts_maxed; }; typedef unsigned long (*perf_copy_f)(void *dst, const void *src, unsigned long off, unsigned long len); struct perf_raw_frag { union { struct perf_raw_frag *next; unsigned long pad; }; perf_copy_f copy; void *data; u32 size; } __packed; struct perf_raw_record { struct perf_raw_frag frag; u32 size; }; static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) { return frag->pad < sizeof(u64); } /* * branch stack layout: * nr: number of taken branches stored in entries[] * hw_idx: The low level index of raw branch records * for the most recent branch. * -1ULL means invalid/unknown. * * Note that nr can vary from sample to sample * branches (to, from) are stored from most recent * to least recent, i.e., entries[0] contains the most * recent branch. * The entries[] is an abstraction of raw branch records, * which may not be stored in age order in HW, e.g. Intel LBR. * The hw_idx is to expose the low level index of raw * branch record for the most recent branch aka entries[0]. * The hw_idx index is between -1 (unknown) and max depth, * which can be retrieved in /sys/devices/cpu/caps/branches. * For the architectures whose raw branch records are * already stored in age order, the hw_idx should be 0. */ struct perf_branch_stack { __u64 nr; __u64 hw_idx; struct perf_branch_entry entries[]; }; struct task_struct; /* * extra PMU register associated with an event */ struct hw_perf_event_extra { u64 config; /* register value */ unsigned int reg; /* register address or index */ int alloc; /* extra register already allocated */ int idx; /* index in shared_regs->regs[] */ }; /** * hw_perf_event::flag values * * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific * usage. */ #define PERF_EVENT_FLAG_ARCH 0x000fffff #define PERF_EVENT_FLAG_USER_READ_CNT 0x80000000 static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0); /** * struct hw_perf_event - performance event hardware details: */ struct hw_perf_event { #ifdef CONFIG_PERF_EVENTS union { struct { /* hardware */ u64 config; u64 last_tag; unsigned long config_base; unsigned long event_base; int event_base_rdpmc; int idx; int last_cpu; int flags; struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; struct { /* software */ struct hrtimer hrtimer; }; struct { /* tracepoint */ /* for tp_event->class */ struct list_head tp_list; }; struct { /* amd_power */ u64 pwr_acc; u64 ptsc; }; #ifdef CONFIG_HAVE_HW_BREAKPOINT struct { /* breakpoint */ /* * Crufty hack to avoid the chicken and egg * problem hw_breakpoint has with context * creation and event initalization. */ struct arch_hw_breakpoint info; struct rhlist_head bp_list; }; #endif struct { /* amd_iommu */ u8 iommu_bank; u8 iommu_cntr; u16 padding; u64 conf; u64 conf1; }; }; /* * If the event is a per task event, this will point to the task in * question. See the comment in perf_event_alloc(). */ struct task_struct *target; /* * PMU would store hardware filter configuration * here. */ void *addr_filters; /* Last sync'ed generation of filters */ unsigned long addr_filters_gen; /* * hw_perf_event::state flags; used to track the PERF_EF_* state. */ #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ #define PERF_HES_ARCH 0x04 int state; /* * The last observed hardware counter value, updated with a * local64_cmpxchg() such that pmu::read() can be called nested. */ local64_t prev_count; /* * The period to start the next sample with. */ u64 sample_period; union { struct { /* Sampling */ /* * The period we started this sample with. */ u64 last_period; /* * However much is left of the current period; * note that this is a full 64bit value and * allows for generation of periods longer * than hardware might allow. */ local64_t period_left; }; struct { /* Topdown events counting for context switch */ u64 saved_metric; u64 saved_slots; }; }; /* * State for throttling the event, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 interrupts_seq; u64 interrupts; /* * State for freq target events, see __perf_event_overflow() and * perf_adjust_freq_unthr_context(). */ u64 freq_time_stamp; u64 freq_count_stamp; #endif }; struct perf_event; struct perf_event_pmu_context; /* * Common implementation detail of pmu::{start,commit,cancel}_txn */ #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ /** * pmu::capabilities flags */ #define PERF_PMU_CAP_NO_INTERRUPT 0x0001 #define PERF_PMU_CAP_NO_NMI 0x0002 #define PERF_PMU_CAP_AUX_NO_SG 0x0004 #define PERF_PMU_CAP_EXTENDED_REGS 0x0008 #define PERF_PMU_CAP_EXCLUSIVE 0x0010 #define PERF_PMU_CAP_ITRACE 0x0020 #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 struct perf_output_handle; #define PMU_NULL_DEV ((void *)(~0UL)) /** * struct pmu - generic performance monitoring unit */ struct pmu { struct list_head entry; struct module *module; struct device *dev; struct device *parent; const struct attribute_group **attr_groups; const struct attribute_group **attr_update; const char *name; int type; /* * various common per-pmu feature flags */ int capabilities; int __percpu *pmu_disable_count; struct perf_cpu_pmu_context __percpu *cpu_pmu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; /* number of address filters this PMU can do */ unsigned int nr_addr_filters; /* * Fully disable/enable this PMU, can be used to protect from the PMI * as well as for lazy/batch writing of the MSRs. */ void (*pmu_enable) (struct pmu *pmu); /* optional */ void (*pmu_disable) (struct pmu *pmu); /* optional */ /* * Try and initialize the event for this PMU. * * Returns: * -ENOENT -- @event is not for this PMU * * -ENODEV -- @event is for this PMU but PMU not present * -EBUSY -- @event is for this PMU but PMU temporarily unavailable * -EINVAL -- @event is for this PMU but @event is not valid * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported * -EACCES -- @event is for this PMU, @event is valid, but no privileges * * 0 -- @event is for this PMU and valid * * Other error return values are allowed. */ int (*event_init) (struct perf_event *event); /* * Notification that the event was mapped or unmapped. Called * in the context of the mapping task. */ void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */ /* * Flags for ->add()/->del()/ ->start()/->stop(). There are * matching hw_perf_event::state flags. */ #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ /* * Adds/Removes a counter to/from the PMU, can be done inside a * transaction, see the ->*_txn() methods. * * The add/del callbacks will reserve all hardware resources required * to service the event, this includes any counter constraint * scheduling etc. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on. * * ->add() called without PERF_EF_START should result in the same state * as ->add() followed by ->stop(). * * ->del() must always PERF_EF_UPDATE stop an event. If it calls * ->stop() that must deal with already being stopped without * PERF_EF_UPDATE. */ int (*add) (struct perf_event *event, int flags); void (*del) (struct perf_event *event, int flags); /* * Starts/Stops a counter present on the PMU. * * The PMI handler should stop the counter when perf_event_overflow() * returns !0. ->start() will be used to continue. * * Also used to change the sample period. * * Called with IRQs disabled and the PMU disabled on the CPU the event * is on -- will be called from NMI context with the PMU generates * NMIs. * * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); /* * Updates the counter value of the event. * * For sampling capable PMUs this will also update the software period * hw_perf_event::period_left field. */ void (*read) (struct perf_event *event); /* * Group events scheduling is treated as a transaction, add * group events as a whole and perform one schedulability test. * If the test fails, roll back the whole group * * Start the transaction, after this ->add() doesn't need to * do schedulability tests. * * Optional. */ void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); /* * If ->start_txn() disabled the ->add() schedulability test * then ->commit_txn() is required to perform one. On success * the transaction is closed. On error the transaction is kept * open until ->cancel_txn() is called. * * Optional. */ int (*commit_txn) (struct pmu *pmu); /* * Will cancel the transaction, assumes ->del() is called * for each successful ->add() during the transaction. * * Optional. */ void (*cancel_txn) (struct pmu *pmu); /* * Will return the value for perf_event_mmap_page::index for this event, * if no implementation is provided it will default to 0 (see * perf_event_idx_default). */ int (*event_idx) (struct perf_event *event); /*optional */ /* * context-switches callback */ void (*sched_task) (struct perf_event_pmu_context *pmu_ctx, bool sched_in); /* * Kmem cache of PMU specific data */ struct kmem_cache *task_ctx_cache; /* * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) * can be synchronized using this function. See Intel LBR callstack support * implementation and Perf core context switch handling callbacks for usage * examples. */ void (*swap_task_ctx) (struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); /* optional */ /* * Set up pmu-private data structures for an AUX area */ void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ /* * Free pmu-private AUX data structures */ void (*free_aux) (void *aux); /* optional */ /* * Take a snapshot of the AUX buffer without touching the event * state, so that preempting ->start()/->stop() callbacks does * not interfere with their logic. Called in PMI context. * * Returns the size of AUX data copied to the output handle. * * Optional. */ long (*snapshot_aux) (struct perf_event *event, struct perf_output_handle *handle, unsigned long size); /* * Validate address range filters: make sure the HW supports the * requested configuration and number of filters; return 0 if the * supplied filters are valid, -errno otherwise. * * Runs in the context of the ioctl()ing process and is not serialized * with the rest of the PMU callbacks. */ int (*addr_filters_validate) (struct list_head *filters); /* optional */ /* * Synchronize address range filter configuration: * translate hw-agnostic filters into hardware configuration in * event::hw::addr_filters. * * Runs as a part of filter sync sequence that is done in ->start() * callback by calling perf_event_addr_filters_sync(). * * May (and should) traverse event::addr_filters::list, for which its * caller provides necessary serialization. */ void (*addr_filters_sync) (struct perf_event *event); /* optional */ /* * Check if event can be used for aux_output purposes for * events of this PMU. * * Runs from perf_event_open(). Should return 0 for "no match" * or non-zero for "match". */ int (*aux_output_match) (struct perf_event *event); /* optional */ /* * Skip programming this PMU on the given CPU. Typically needed for * big.LITTLE things. */ bool (*filter) (struct pmu *pmu, int cpu); /* optional */ /* * Check period value for PERF_EVENT_IOC_PERIOD ioctl. */ int (*check_period) (struct perf_event *event, u64 value); /* optional */ }; enum perf_addr_filter_action_t { PERF_ADDR_FILTER_ACTION_STOP = 0, PERF_ADDR_FILTER_ACTION_START, PERF_ADDR_FILTER_ACTION_FILTER, }; /** * struct perf_addr_filter - address range filter definition * @entry: event's filter list linkage * @path: object file's path for file-based filters * @offset: filter range offset * @size: filter range size (size==0 means single address trigger) * @action: filter/start/stop * * This is a hardware-agnostic filter configuration as specified by the user. */ struct perf_addr_filter { struct list_head entry; struct path path; unsigned long offset; unsigned long size; enum perf_addr_filter_action_t action; }; /** * struct perf_addr_filters_head - container for address range filters * @list: list of filters for this event * @lock: spinlock that serializes accesses to the @list and event's * (and its children's) filter generations. * @nr_file_filters: number of file-based filters * * A child event will use parent's @list (and therefore @lock), so they are * bundled together; see perf_event_addr_filters(). */ struct perf_addr_filters_head { struct list_head list; raw_spinlock_t lock; unsigned int nr_file_filters; }; struct perf_addr_filter_range { unsigned long start; unsigned long size; }; /** * enum perf_event_state - the states of an event: */ enum perf_event_state { PERF_EVENT_STATE_DEAD = -4, PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1, }; struct file; struct perf_sample_data; typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *regs); /* * Event capabilities. For event_caps and groups caps. * * PERF_EV_CAP_SOFTWARE: Is a software event. * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read * from any CPU in the package where it is active. * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and * cannot be a group leader. If an event with this flag is detached from the * group it is scheduled out and moved into an unrecoverable ERROR state. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define PERF_EV_CAP_SIBLING BIT(2) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) struct swevent_hlist { struct hlist_head heads[SWEVENT_HLIST_SIZE]; struct rcu_head rcu_head; }; #define PERF_ATTACH_CONTEXT 0x01 #define PERF_ATTACH_GROUP 0x02 #define PERF_ATTACH_TASK 0x04 #define PERF_ATTACH_TASK_DATA 0x08 #define PERF_ATTACH_ITRACE 0x10 #define PERF_ATTACH_SCHED_CB 0x20 #define PERF_ATTACH_CHILD 0x40 struct bpf_prog; struct perf_cgroup; struct perf_buffer; struct pmu_event_list { raw_spinlock_t lock; struct list_head list; }; /* * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex * as such iteration must hold either lock. However, since ctx->lock is an IRQ * safe lock, and is only held by the CPU doing the modification, having IRQs * disabled is sufficient since it will hold-off the IPIs. */ #ifdef CONFIG_PROVE_LOCKING #define lockdep_assert_event_ctx(event) \ WARN_ON_ONCE(__lockdep_enabled && \ (this_cpu_read(hardirqs_enabled) && \ lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD)) #else #define lockdep_assert_event_ctx(event) #endif #define for_each_sibling_event(sibling, event) \ lockdep_assert_event_ctx(event); \ if ((event)->group_leader == (event)) \ list_for_each_entry((sibling), &(event)->sibling_list, sibling_list) /** * struct perf_event - performance event kernel representation: */ struct perf_event { #ifdef CONFIG_PERF_EVENTS /* * entry onto perf_event_context::event_list; * modifications require ctx->lock * RCU safe iterations. */ struct list_head event_entry; /* * Locked for modification by both ctx->mutex and ctx->lock; holding * either sufficies for read. */ struct list_head sibling_list; struct list_head active_list; /* * Node on the pinned or flexible tree located at the event context; */ struct rb_node group_node; u64 group_index; /* * We need storage to track the entries in perf_pmu_migrate_context; we * cannot use the event_entry because of RCU and we want to keep the * group in tact which avoids us using the other two entries. */ struct list_head migrate_entry; struct hlist_node hlist_entry; struct list_head active_entry; int nr_siblings; /* Not serialized. Only written during event initialization. */ int event_caps; /* The cumulative AND of all event_caps for events in this group. */ int group_caps; unsigned int group_generation; struct perf_event *group_leader; /* * event->pmu will always point to pmu in which this event belongs. * Whereas event->pmu_ctx->pmu may point to other pmu when group of * different pmu events is created. */ struct pmu *pmu; void *pmu_private; enum perf_event_state state; unsigned int attach_state; local64_t count; atomic64_t child_count; /* * These are the total time in nanoseconds that the event * has been enabled (i.e. eligible to run, and the task has * been scheduled in, if this is a per-task event) * and running (scheduled onto the CPU), respectively. */ u64 total_time_enabled; u64 total_time_running; u64 tstamp; struct perf_event_attr attr; u16 header_size; u16 id_header_size; u16 read_size; struct hw_perf_event hw; struct perf_event_context *ctx; /* * event->pmu_ctx points to perf_event_pmu_context in which the event * is added. This pmu_ctx can be of other pmu for sw event when that * sw event is part of a group which also contains non-sw events. */ struct perf_event_pmu_context *pmu_ctx; atomic_long_t refcount; /* * These accumulate total time (in nanoseconds) that children * events have been enabled and running, respectively. */ atomic64_t child_total_time_enabled; atomic64_t child_total_time_running; /* * Protect attach/detach and child_list: */ struct mutex child_mutex; struct list_head child_list; struct perf_event *parent; int oncpu; int cpu; struct list_head owner_entry; struct task_struct *owner; /* mmap bits */ struct mutex mmap_mutex; atomic_t mmap_count; struct perf_buffer *rb; struct list_head rb_entry; unsigned long rcu_batches; int rcu_pending; /* poll related */ wait_queue_head_t waitq; struct fasync_struct *fasync; /* delayed work for NMIs and such */ unsigned int pending_wakeup; unsigned int pending_kill; unsigned int pending_disable; unsigned int pending_sigtrap; unsigned long pending_addr; /* SIGTRAP */ struct irq_work pending_irq; struct callback_head pending_task; unsigned int pending_work; atomic_t event_limit; /* address range filters */ struct perf_addr_filters_head addr_filters; /* vma address array for file-based filders */ struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; /* for aux_output events */ struct perf_event *aux_event; void (*destroy)(struct perf_event *); struct rcu_head rcu_head; struct pid_namespace *ns; u64 id; atomic64_t lost_samples; u64 (*clock)(void); perf_overflow_handler_t overflow_handler; void *overflow_handler_context; #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; u64 bpf_cookie; #endif #ifdef CONFIG_EVENT_TRACING struct trace_event_call *tp_event; struct event_filter *filter; #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops ftrace_ops; #endif #endif #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; /* cgroup event is attach to */ #endif #ifdef CONFIG_SECURITY void *security; #endif struct list_head sb_list; /* * Certain events gets forwarded to another pmu internally by over- * writing kernel copy of event->attr.type without user being aware * of it. event->orig_type contains original 'type' requested by * user. */ __u32 orig_type; #endif /* CONFIG_PERF_EVENTS */ }; /* * ,-----------------------[1:n]------------------------. * V V * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event * | | * `--[n:1]-> pmu <-[1:n]--' * * * struct perf_event_pmu_context lifetime is refcount based and RCU freed * (similar to perf_event_context). Locking is as if it were a member of * perf_event_context; specifically: * * modification, both: ctx->mutex && ctx->lock * reading, either: ctx->mutex || ctx->lock * * There is one exception to this; namely put_pmu_ctx() isn't always called * with ctx->mutex held; this means that as long as we can guarantee the epc * has events the above rules hold. * * Specificially, sys_perf_event_open()'s group_leader case depends on * ctx->mutex pinning the configuration. Since we hold a reference on * group_leader (through the filedesc) it can't go away, therefore it's * associated pmu_ctx must exist and cannot change due to ctx->mutex. * * perf_event holds a refcount on perf_event_context * perf_event holds a refcount on perf_event_pmu_context */ struct perf_event_pmu_context { struct pmu *pmu; struct perf_event_context *ctx; struct list_head pmu_ctx_entry; struct list_head pinned_active; struct list_head flexible_active; /* Used to avoid freeing per-cpu perf_event_pmu_context */ unsigned int embedded : 1; unsigned int nr_events; unsigned int nr_cgroups; atomic_t refcount; /* event <-> epc */ struct rcu_head rcu_head; void *task_ctx_data; /* pmu specific data */ /* * Set when one or more (plausibly active) event can't be scheduled * due to pmu overcommit or pmu constraints, except tolerant to * events not necessary to be active due to scheduling constraints, * such as cgroups. */ int rotate_necessary; }; struct perf_event_groups { struct rb_root tree; u64 index; }; /** * struct perf_event_context - event context structure * * Used as a container for task events and CPU events as well: */ struct perf_event_context { /* * Protect the states of the events in the list, * nr_active, and the list: */ raw_spinlock_t lock; /* * Protect the list of events. Locking either mutex or lock * is sufficient to ensure the list doesn't change; to change * the list you need to lock both the mutex and the spinlock. */ struct mutex mutex; struct list_head pmu_ctx_list; struct perf_event_groups pinned_groups; struct perf_event_groups flexible_groups; struct list_head event_list; int nr_events; int nr_user; int is_active; int nr_task_data; int nr_stat; int nr_freq; int rotate_disable; refcount_t refcount; /* event <-> ctx */ struct task_struct *task; /* * Context clock, runs when context enabled. */ u64 time; u64 timestamp; u64 timeoffset; /* * These fields let us detect when two contexts have both * been cloned (inherited) from a common ancestor. */ struct perf_event_context *parent_ctx; u64 parent_gen; u64 generation; int pin_count; #ifdef CONFIG_CGROUP_PERF int nr_cgroups; /* cgroup evts */ #endif struct rcu_head rcu_head; /* * Sum (event->pending_sigtrap + event->pending_work) * * The SIGTRAP is targeted at ctx->task, as such it won't do changing * that until the signal is delivered. */ local_t nr_pending; }; /* * Number of contexts where an event can trigger: * task, softirq, hardirq, nmi. */ #define PERF_NR_CONTEXTS 4 struct perf_cpu_pmu_context { struct perf_event_pmu_context epc; struct perf_event_pmu_context *task_epc; struct list_head sched_cb_entry; int sched_cb_usage; int active_oncpu; int exclusive; raw_spinlock_t hrtimer_lock; struct hrtimer hrtimer; ktime_t hrtimer_interval; unsigned int hrtimer_active; }; /** * struct perf_event_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; struct perf_event_context *task_ctx; int online; #ifdef CONFIG_CGROUP_PERF struct perf_cgroup *cgrp; #endif /* * Per-CPU storage for iterators used in visit_groups_merge. The default * storage is of size 2 to hold the CPU and any CPU event iterators. */ int heap_size; struct perf_event **heap; struct perf_event *heap_default[2]; }; struct perf_output_handle { struct perf_event *event; struct perf_buffer *rb; unsigned long wakeup; unsigned long size; u64 aux_flags; union { void *addr; unsigned long head; }; int page; }; struct bpf_perf_event_data_kern { bpf_user_pt_regs_t *regs; struct perf_sample_data *data; struct perf_event *event; }; #ifdef CONFIG_CGROUP_PERF /* * perf_cgroup_info keeps track of time_enabled for a cgroup. * This is a per-cpu dynamically allocated data structure. */ struct perf_cgroup_info { u64 time; u64 timestamp; u64 timeoffset; int active; }; struct perf_cgroup { struct cgroup_subsys_state css; struct perf_cgroup_info __percpu *info; }; /* * Must ensure cgroup is pinned (css_get) before calling * this function. In other words, we cannot call this function * if there is no cgroup event for the current CPU context. */ static inline struct perf_cgroup * perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) { return container_of(task_css_check(task, perf_event_cgrp_id, ctx ? lockdep_is_held(&ctx->lock) : true), struct perf_cgroup, css); } #endif /* CONFIG_CGROUP_PERF */ #ifdef CONFIG_PERF_EVENTS extern struct perf_event_context *perf_cpu_task_ctx(void); extern void *perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event); extern void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size); extern int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size); extern void *perf_get_aux(struct perf_output_handle *handle); extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags); extern void perf_event_itrace_started(struct perf_event *event); extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); extern void perf_pmu_unregister(struct pmu *pmu); extern void __perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task); extern void __perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next); extern int perf_event_init_task(struct task_struct *child, u64 clone_flags); extern void perf_event_exit_task(struct task_struct *child); extern void perf_event_free_task(struct task_struct *task); extern void perf_event_delayed_put(struct task_struct *task); extern struct file *perf_event_get(unsigned int fd); extern const struct perf_event *perf_get_event(struct file *file); extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); extern void perf_event_print_debug(void); extern void perf_pmu_disable(struct pmu *pmu); extern void perf_pmu_enable(struct pmu *pmu); extern void perf_sched_cb_dec(struct pmu *pmu); extern void perf_sched_cb_inc(struct pmu *pmu); extern int perf_event_task_disable(void); extern int perf_event_task_enable(void); extern void perf_pmu_resched(struct pmu *pmu); extern int perf_event_refresh(struct perf_event *event, int refresh); extern void perf_event_update_userpage(struct perf_event *event); extern int perf_event_release_kernel(struct perf_event *event); extern struct perf_event * perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, struct task_struct *task, perf_overflow_handler_t callback, void *context); extern void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu); int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running); extern u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running); extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); static inline bool branch_sample_no_flags(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS; } static inline bool branch_sample_no_cycles(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES; } static inline bool branch_sample_type(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE; } static inline bool branch_sample_hw_index(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; } static inline bool branch_sample_priv(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } static inline bool branch_sample_counters(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; } static inline bool branch_sample_call_stack(const struct perf_event *event) { return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; } struct perf_sample_data { /* * Fields set by perf_sample_data_init() unconditionally, * group so as to minimize the cachelines touched. */ u64 sample_flags; u64 period; u64 dyn_size; /* * Fields commonly set by __perf_event_header__init_id(), * group so as to minimize the cachelines touched. */ u64 type; struct { u32 pid; u32 tid; } tid_entry; u64 time; u64 id; struct { u32 cpu; u32 reserved; } cpu_entry; /* * The other fields, optionally {set,used} by * perf_{prepare,output}_sample(). */ u64 ip; struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; struct perf_regs regs_user; struct perf_regs regs_intr; u64 stack_user_size; u64 stream_id; u64 cgroup; u64 addr; u64 phys_addr; u64 data_page_size; u64 code_page_size; u64 aux_size; } ____cacheline_aligned; /* default value for data source */ #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ PERF_MEM_S(LVL, NA) |\ PERF_MEM_S(SNOOP, NA) |\ PERF_MEM_S(LOCK, NA) |\ PERF_MEM_S(TLB, NA) |\ PERF_MEM_S(LVLNUM, NA)) static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr, u64 period) { /* remaining struct members initialized in perf_prepare_sample() */ data->sample_flags = PERF_SAMPLE_PERIOD; data->period = period; data->dyn_size = 0; if (addr) { data->addr = addr; data->sample_flags |= PERF_SAMPLE_ADDR; } } static inline void perf_sample_save_callchain(struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) { int size = 1; data->callchain = perf_callchain(event, regs); size += data->callchain->nr; data->dyn_size += size * sizeof(u64); data->sample_flags |= PERF_SAMPLE_CALLCHAIN; } static inline void perf_sample_save_raw_data(struct perf_sample_data *data, struct perf_raw_record *raw) { struct perf_raw_frag *frag = &raw->frag; u32 sum = 0; int size; do { sum += frag->size; if (perf_raw_frag_last(frag)) break; frag = frag->next; } while (1); size = round_up(sum + sizeof(u32), sizeof(u64)); raw->size = size - sizeof(u32); frag->pad = raw->size - sum; data->raw = raw; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_RAW; } static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, struct perf_branch_stack *brs, u64 *brs_cntr) { int size = sizeof(u64); /* nr */ if (branch_sample_hw_index(event)) size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); /* * The extension space for counters is appended after the * struct perf_branch_stack. It is used to store the occurrences * of events of each branch. */ if (brs_cntr) size += brs->nr * sizeof(u64); data->br_stack = brs; data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } static inline u32 perf_sample_data_size(struct perf_sample_data *data, struct perf_event *event) { u32 size = sizeof(struct perf_event_header); size += event->header_size + event->id_header_size; size += data->dyn_size; return size; } /* * Clear all bitfields in the perf_branch_entry. * The to and from fields are not cleared because they are * systematically modified by caller. */ static inline void perf_clear_branch_entry_bitfields(struct perf_branch_entry *br) { br->mispred = 0; br->predicted = 0; br->in_tx = 0; br->abort = 0; br->cycles = 0; br->type = 0; br->spec = PERF_BR_SPEC_NA; br->reserved = 0; } extern void perf_output_sample(struct perf_output_handle *handle, struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_prepare_sample(struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs); extern void perf_prepare_header(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs); extern int perf_event_overflow(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_forward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern void perf_event_output_backward(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); extern int perf_event_output(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs); static inline bool __is_default_overflow_handler(perf_overflow_handler_t overflow_handler) { if (likely(overflow_handler == perf_event_output_forward)) return true; if (unlikely(overflow_handler == perf_event_output_backward)) return true; return false; } #define is_default_overflow_handler(event) \ __is_default_overflow_handler((event)->overflow_handler) #ifdef CONFIG_BPF_SYSCALL static inline bool uses_default_overflow_handler(struct perf_event *event) { if (likely(is_default_overflow_handler(event))) return true; return __is_default_overflow_handler(event->orig_overflow_handler); } #else #define uses_default_overflow_handler(event) \ is_default_overflow_handler(event) #endif extern void perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event); extern void perf_event__output_id_sample(struct perf_event *event, struct perf_output_handle *handle, struct perf_sample_data *sample); extern void perf_log_lost_samples(struct perf_event *event, u64 lost); static inline bool event_has_any_exclude_flag(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; return attr->exclude_idle || attr->exclude_user || attr->exclude_kernel || attr->exclude_hv || attr->exclude_guest || attr->exclude_host; } static inline bool is_sampling_event(struct perf_event *event) { return event->attr.sample_period != 0; } /* * Return 1 for a software event, 0 for a hardware event */ static inline int is_software_event(struct perf_event *event) { return event->event_caps & PERF_EV_CAP_SOFTWARE; } /* * Return 1 for event in sw context, 0 for event in hw context */ static inline int in_software_context(struct perf_event *event) { return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context; } static inline int is_exclusive_pmu(struct pmu *pmu) { return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; } extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); #ifndef perf_arch_fetch_caller_regs static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } #endif /* * When generating a perf sample in-line, instead of from an interrupt / * exception, we lack a pt_regs. This is typically used from software events * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints. * * We typically don't need a full set, but (for x86) do require: * - ip for PERF_SAMPLE_IP * - cs for user_mode() tests * - sp for PERF_SAMPLE_CALLCHAIN * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs()) * * NOTE: assumes @regs is otherwise already 0 filled; this is important for * things like PERF_SAMPLE_REGS_INTR. */ static inline void perf_fetch_caller_regs(struct pt_regs *regs) { perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); } static __always_inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { if (static_key_false(&perf_swevent_enabled[event_id])) __perf_sw_event(event_id, nr, regs, addr); } DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); /* * 'Special' version for the scheduler, it hard assumes no recursion, * which is guaranteed by us not actually scheduling inside other swevents * because those disable preemption. */ static __always_inline void __perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); perf_fetch_caller_regs(regs); ___perf_sw_event(event_id, nr, regs, addr); } extern struct static_key_false perf_sched_events; static __always_inline bool __perf_sw_enabled(int swevt) { return static_key_false(&perf_swevent_enabled[swevt]); } static inline void perf_event_task_migrate(struct task_struct *task) { if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS)) task->sched_migrated = 1; } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_in(prev, task); if (__perf_sw_enabled(PERF_COUNT_SW_CPU_MIGRATIONS) && task->sched_migrated) { __perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); task->sched_migrated = 0; } } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { if (__perf_sw_enabled(PERF_COUNT_SW_CONTEXT_SWITCHES)) __perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); #ifdef CONFIG_CGROUP_PERF if (__perf_sw_enabled(PERF_COUNT_SW_CGROUP_SWITCHES) && perf_cgroup_from_task(prev, NULL) != perf_cgroup_from_task(next, NULL)) __perf_sw_event_sched(PERF_COUNT_SW_CGROUP_SWITCHES, 1, 0); #endif if (static_branch_unlikely(&perf_sched_events)) __perf_event_task_sched_out(prev, next); } extern void perf_event_mmap(struct vm_area_struct *vma); extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym); extern void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags); #ifdef CONFIG_GUEST_PERF_EVENTS extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs; DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state); DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip); DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr); static inline unsigned int perf_guest_state(void) { return static_call(__perf_guest_state)(); } static inline unsigned long perf_guest_get_ip(void) { return static_call(__perf_guest_get_ip)(); } static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return static_call(__perf_guest_handle_intel_pt_intr)(); } extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs); #else static inline unsigned int perf_guest_state(void) { return 0; } static inline unsigned long perf_guest_get_ip(void) { return 0; } static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; } #endif /* CONFIG_GUEST_PERF_EVENTS */ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); extern void perf_event_text_poke(const void *addr, const void *old_bytes, size_t old_len, const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); extern struct perf_callchain_entry * get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, u32 max_stack, bool crosstask, bool add_mark); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); extern struct perf_callchain_entry *get_callchain_entry(int *rctx); extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->contexts; return 0; } else { ctx->contexts_maxed = true; return -1; /* no more room, stop walking the stack */ } } static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) { if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { struct perf_callchain_entry *entry = ctx->entry; entry->ip[entry->nr++] = ip; ++ctx->nr; return 0; } else { return -1; /* no more room, stop walking the stack */ } } extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_sample_rate; extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); int perf_event_max_sample_rate_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int perf_event_max_stack_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); /* Access to perf_event_open(2) syscall. */ #define PERF_SECURITY_OPEN 0 /* Finer grained perf_event_open(2) access control. */ #define PERF_SECURITY_CPU 1 #define PERF_SECURITY_KERNEL 2 #define PERF_SECURITY_TRACEPOINT 3 static inline int perf_is_paranoid(void) { return sysctl_perf_event_paranoid > -1; } static inline int perf_allow_kernel(struct perf_event_attr *attr) { if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_KERNEL); } static inline int perf_allow_cpu(struct perf_event_attr *attr) { if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_CPU); } static inline int perf_allow_tracepoint(struct perf_event_attr *attr) { if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) return -EPERM; return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); } extern void perf_event_init(void); extern void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, struct pt_regs *regs, struct hlist_head *head, int rctx, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags # define perf_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) # define perf_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs #endif static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; } static inline bool needs_branch_stack(struct perf_event *event) { return event->attr.branch_sample_type != 0; } static inline bool has_aux(struct perf_event *event) { return event->pmu->setup_aux; } static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; } static inline bool has_addr_filter(struct perf_event *event) { return event->pmu->nr_addr_filters; } /* * An inherited event uses parent's filters */ static inline struct perf_addr_filters_head * perf_event_addr_filters(struct perf_event *event) { struct perf_addr_filters_head *ifh = &event->addr_filters; if (event->parent) ifh = &event->parent->addr_filters; return ifh; } extern void perf_event_addr_filters_sync(struct perf_event *event); extern void perf_report_aux_output_id(struct perf_event *event, u64 hw_id); extern int perf_output_begin(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_forward(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern int perf_output_begin_backward(struct perf_output_handle *handle, struct perf_sample_data *data, struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); extern unsigned int perf_output_skip(struct perf_output_handle *handle, unsigned int len); extern long perf_output_copy_aux(struct perf_output_handle *aux_handle, struct perf_output_handle *handle, unsigned long from, unsigned long to); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern u64 perf_swevent_set_period(struct perf_event *event); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); extern void perf_event_disable_local(struct perf_event *event); extern void perf_event_disable_inatomic(struct perf_event *event); extern void perf_event_task_tick(void); extern int perf_event_account_interrupt(struct perf_event *event); extern int perf_event_period(struct perf_event *event, u64 value); extern u64 perf_event_pause(struct perf_event *event, bool reset); #else /* !CONFIG_PERF_EVENTS: */ static inline void * perf_aux_output_begin(struct perf_output_handle *handle, struct perf_event *event) { return NULL; } static inline void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) { } static inline int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) { return -EINVAL; } static inline void * perf_get_aux(struct perf_output_handle *handle) { return NULL; } static inline void perf_event_task_migrate(struct task_struct *task) { } static inline void perf_event_task_sched_in(struct task_struct *prev, struct task_struct *task) { } static inline void perf_event_task_sched_out(struct task_struct *prev, struct task_struct *next) { } static inline int perf_event_init_task(struct task_struct *child, u64 clone_flags) { return 0; } static inline void perf_event_exit_task(struct task_struct *child) { } static inline void perf_event_free_task(struct task_struct *task) { } static inline void perf_event_delayed_put(struct task_struct *task) { } static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } static inline const struct perf_event *perf_get_event(struct file *file) { return ERR_PTR(-EINVAL); } static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) { return ERR_PTR(-EINVAL); } static inline int perf_event_read_local(struct perf_event *event, u64 *value, u64 *enabled, u64 *running) { return -EINVAL; } static inline void perf_event_print_debug(void) { } static inline int perf_event_task_disable(void) { return -EINVAL; } static inline int perf_event_task_enable(void) { return -EINVAL; } static inline int perf_event_refresh(struct perf_event *event, int refresh) { return -EINVAL; } static inline void perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } static inline void perf_bp_event(struct perf_event *event, void *data) { } static inline void perf_event_mmap(struct vm_area_struct *vma) { } typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister, const char *sym) { } static inline void perf_event_bpf_event(struct bpf_prog *prog, enum perf_bpf_event_type type, u16 flags) { } static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } static inline void perf_event_text_poke(const void *addr, const void *old_bytes, size_t old_len, const void *new_bytes, size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } static inline int perf_event_period(struct perf_event *event, u64 value) { return -EINVAL; } static inline u64 perf_event_pause(struct perf_event *event, bool reset) { return 0; } #endif #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) extern void perf_restore_debug_store(void); #else static inline void perf_restore_debug_store(void) { } #endif #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) struct perf_pmu_events_attr { struct device_attribute attr; u64 id; const char *event_str; }; struct perf_pmu_events_ht_attr { struct device_attribute attr; u64 id; const char *event_str_ht; const char *event_str_noht; }; struct perf_pmu_events_hybrid_attr { struct device_attribute attr; u64 id; const char *event_str; u64 pmu_type; }; struct perf_pmu_format_hybrid_attr { struct device_attribute attr; u64 pmu_type; }; ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, char *page); #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, _show, NULL), \ .id = _id, \ }; #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ static struct perf_pmu_events_attr _var = { \ .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ .id = 0, \ .event_str = _str, \ }; #define PMU_EVENT_ATTR_ID(_name, _show, _id) \ (&((struct perf_pmu_events_attr[]) { \ { .attr = __ATTR(_name, 0444, _show, NULL), \ .id = _id, } \ })[0].attr.attr) #define PMU_FORMAT_ATTR_SHOW(_name, _format) \ static ssize_t \ _name##_show(struct device *dev, \ struct device_attribute *attr, \ char *page) \ { \ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ return sprintf(page, _format "\n"); \ } \ #define PMU_FORMAT_ATTR(_name, _format) \ PMU_FORMAT_ATTR_SHOW(_name, _format) \ \ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) /* Performance counter hotplug functions */ #ifdef CONFIG_PERF_EVENTS int perf_event_init_cpu(unsigned int cpu); int perf_event_exit_cpu(unsigned int cpu); #else #define perf_event_init_cpu NULL #define perf_event_exit_cpu NULL #endif extern void arch_perf_update_userpage(struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now); /* * Snapshot branch stack on software events. * * Branch stack can be very useful in understanding software events. For * example, when a long function, e.g. sys_perf_event_open, returns an * errno, it is not obvious why the function failed. Branch stack could * provide very helpful information in this type of scenarios. * * On software event, it is necessary to stop the hardware branch recorder * fast. Otherwise, the hardware register/buffer will be flushed with * entries of the triggering event. Therefore, static call is used to * stop the hardware recorder. */ /* * cnt is the number of entries allocated for entries. * Return number of entries copied to . */ typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, unsigned int cnt); DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); #ifndef PERF_NEEDS_LOPWR_CB static inline void perf_lopwr_cb(bool mode) { } #endif #endif /* _LINUX_PERF_EVENT_H */
132 723 673 15 1 3 2 4 16 1 3 5 707 515 515 207 29 23 210 182 1 23 6 178 3 180 29 75 142 84 83 83 1 70 70 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 IBM Corporation * * Author: * Mimi Zohar <zohar@us.ibm.com> */ #include <linux/module.h> #include <linux/init.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/xattr.h> #include <linux/magic.h> #include <linux/ima.h> #include <linux/evm.h> #include <linux/fsverity.h> #include <keys/system_keyring.h> #include <uapi/linux/fsverity.h> #include "ima.h" #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM static char *ima_appraise_cmdline_default __initdata; core_param(ima_appraise, ima_appraise_cmdline_default, charp, 0); void __init ima_appraise_parse_cmdline(void) { const char *str = ima_appraise_cmdline_default; bool sb_state = arch_ima_get_secureboot(); int appraisal_state = ima_appraise; if (!str) return; if (strncmp(str, "off", 3) == 0) appraisal_state = 0; else if (strncmp(str, "log", 3) == 0) appraisal_state = IMA_APPRAISE_LOG; else if (strncmp(str, "fix", 3) == 0) appraisal_state = IMA_APPRAISE_FIX; else if (strncmp(str, "enforce", 7) == 0) appraisal_state = IMA_APPRAISE_ENFORCE; else pr_err("invalid \"%s\" appraise option", str); /* If appraisal state was changed, but secure boot is enabled, * keep its default */ if (sb_state) { if (!(appraisal_state & IMA_APPRAISE_ENFORCE)) pr_info("Secure boot enabled: ignoring ima_appraise=%s option", str); } else { ima_appraise = appraisal_state; } } #endif /* * is_ima_appraise_enabled - return appraise status * * Only return enabled, if not in ima_appraise="fix" or "log" modes. */ bool is_ima_appraise_enabled(void) { return ima_appraise & IMA_APPRAISE_ENFORCE; } /* * ima_must_appraise - set appraise flag * * Return 1 to appraise or hash */ int ima_must_appraise(struct mnt_idmap *idmap, struct inode *inode, int mask, enum ima_hooks func) { u32 secid; if (!ima_appraise) return 0; security_current_getsecid_subj(&secid); return ima_match_policy(idmap, inode, current_cred(), secid, func, mask, IMA_APPRAISE | IMA_HASH, NULL, NULL, NULL, NULL); } static int ima_fix_xattr(struct dentry *dentry, struct ima_iint_cache *iint) { int rc, offset; u8 algo = iint->ima_hash->algo; if (algo <= HASH_ALGO_SHA1) { offset = 1; iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST; } else { offset = 0; iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG; iint->ima_hash->xattr.ng.algo = algo; } rc = __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, XATTR_NAME_IMA, &iint->ima_hash->xattr.data[offset], (sizeof(iint->ima_hash->xattr) - offset) + iint->ima_hash->length, 0); return rc; } /* Return specific func appraised cached result */ enum integrity_status ima_get_cache_status(struct ima_iint_cache *iint, enum ima_hooks func) { switch (func) { case MMAP_CHECK: case MMAP_CHECK_REQPROT: return iint->ima_mmap_status; case BPRM_CHECK: return iint->ima_bprm_status; case CREDS_CHECK: return iint->ima_creds_status; case FILE_CHECK: case POST_SETATTR: return iint->ima_file_status; case MODULE_CHECK ... MAX_CHECK - 1: default: return iint->ima_read_status; } } static void ima_set_cache_status(struct ima_iint_cache *iint, enum ima_hooks func, enum integrity_status status) { switch (func) { case MMAP_CHECK: case MMAP_CHECK_REQPROT: iint->ima_mmap_status = status; break; case BPRM_CHECK: iint->ima_bprm_status = status; break; case CREDS_CHECK: iint->ima_creds_status = status; break; case FILE_CHECK: case POST_SETATTR: iint->ima_file_status = status; break; case MODULE_CHECK ... MAX_CHECK - 1: default: iint->ima_read_status = status; break; } } static void ima_cache_flags(struct ima_iint_cache *iint, enum ima_hooks func) { switch (func) { case MMAP_CHECK: case MMAP_CHECK_REQPROT: iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED); break; case BPRM_CHECK: iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED); break; case CREDS_CHECK: iint->flags |= (IMA_CREDS_APPRAISED | IMA_APPRAISED); break; case FILE_CHECK: case POST_SETATTR: iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED); break; case MODULE_CHECK ... MAX_CHECK - 1: default: iint->flags |= (IMA_READ_APPRAISED | IMA_APPRAISED); break; } } enum hash_algo ima_get_hash_algo(const struct evm_ima_xattr_data *xattr_value, int xattr_len) { struct signature_v2_hdr *sig; enum hash_algo ret; if (!xattr_value || xattr_len < 2) /* return default hash algo */ return ima_hash_algo; switch (xattr_value->type) { case IMA_VERITY_DIGSIG: sig = (typeof(sig))xattr_value; if (sig->version != 3 || xattr_len <= sizeof(*sig) || sig->hash_algo >= HASH_ALGO__LAST) return ima_hash_algo; return sig->hash_algo; case EVM_IMA_XATTR_DIGSIG: sig = (typeof(sig))xattr_value; if (sig->version != 2 || xattr_len <= sizeof(*sig) || sig->hash_algo >= HASH_ALGO__LAST) return ima_hash_algo; return sig->hash_algo; case IMA_XATTR_DIGEST_NG: /* first byte contains algorithm id */ ret = xattr_value->data[0]; if (ret < HASH_ALGO__LAST) return ret; break; case IMA_XATTR_DIGEST: /* this is for backward compatibility */ if (xattr_len == 21) { unsigned int zero = 0; if (!memcmp(&xattr_value->data[16], &zero, 4)) return HASH_ALGO_MD5; else return HASH_ALGO_SHA1; } else if (xattr_len == 17) return HASH_ALGO_MD5; break; } /* return default hash algo */ return ima_hash_algo; } int ima_read_xattr(struct dentry *dentry, struct evm_ima_xattr_data **xattr_value, int xattr_len) { int ret; ret = vfs_getxattr_alloc(&nop_mnt_idmap, dentry, XATTR_NAME_IMA, (char **)xattr_value, xattr_len, GFP_NOFS); if (ret == -EOPNOTSUPP) ret = 0; return ret; } /* * calc_file_id_hash - calculate the hash of the ima_file_id struct data * @type: xattr type [enum evm_ima_xattr_type] * @algo: hash algorithm [enum hash_algo] * @digest: pointer to the digest to be hashed * @hash: (out) pointer to the hash * * IMA signature version 3 disambiguates the data that is signed by * indirectly signing the hash of the ima_file_id structure data. * * Signing the ima_file_id struct is currently only supported for * IMA_VERITY_DIGSIG type xattrs. * * Return 0 on success, error code otherwise. */ static int calc_file_id_hash(enum evm_ima_xattr_type type, enum hash_algo algo, const u8 *digest, struct ima_digest_data *hash) { struct ima_file_id file_id = { .hash_type = IMA_VERITY_DIGSIG, .hash_algorithm = algo}; unsigned int unused = HASH_MAX_DIGESTSIZE - hash_digest_size[algo]; if (type != IMA_VERITY_DIGSIG) return -EINVAL; memcpy(file_id.hash, digest, hash_digest_size[algo]); hash->algo = algo; hash->length = hash_digest_size[algo]; return ima_calc_buffer_hash(&file_id, sizeof(file_id) - unused, hash); } /* * xattr_verify - verify xattr digest or signature * * Verify whether the hash or signature matches the file contents. * * Return 0 on success, error code otherwise. */ static int xattr_verify(enum ima_hooks func, struct ima_iint_cache *iint, struct evm_ima_xattr_data *xattr_value, int xattr_len, enum integrity_status *status, const char **cause) { struct ima_max_digest_data hash; struct signature_v2_hdr *sig; int rc = -EINVAL, hash_start = 0; int mask; switch (xattr_value->type) { case IMA_XATTR_DIGEST_NG: /* first byte contains algorithm id */ hash_start = 1; fallthrough; case IMA_XATTR_DIGEST: if (*status != INTEGRITY_PASS_IMMUTABLE) { if (iint->flags & IMA_DIGSIG_REQUIRED) { if (iint->flags & IMA_VERITY_REQUIRED) *cause = "verity-signature-required"; else *cause = "IMA-signature-required"; *status = INTEGRITY_FAIL; break; } clear_bit(IMA_DIGSIG, &iint->atomic_flags); } else { set_bit(IMA_DIGSIG, &iint->atomic_flags); } if (xattr_len - sizeof(xattr_value->type) - hash_start >= iint->ima_hash->length) /* * xattr length may be longer. md5 hash in previous * version occupied 20 bytes in xattr, instead of 16 */ rc = memcmp(&xattr_value->data[hash_start], iint->ima_hash->digest, iint->ima_hash->length); else rc = -EINVAL; if (rc) { *cause = "invalid-hash"; *status = INTEGRITY_FAIL; break; } *status = INTEGRITY_PASS; break; case EVM_IMA_XATTR_DIGSIG: set_bit(IMA_DIGSIG, &iint->atomic_flags); mask = IMA_DIGSIG_REQUIRED | IMA_VERITY_REQUIRED; if ((iint->flags & mask) == mask) { *cause = "verity-signature-required"; *status = INTEGRITY_FAIL; break; } sig = (typeof(sig))xattr_value; if (sig->version >= 3) { *cause = "invalid-signature-version"; *status = INTEGRITY_FAIL; break; } rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, (const char *)xattr_value, xattr_len, iint->ima_hash->digest, iint->ima_hash->length); if (rc == -EOPNOTSUPP) { *status = INTEGRITY_UNKNOWN; break; } if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc && func == KEXEC_KERNEL_CHECK) rc = integrity_digsig_verify(INTEGRITY_KEYRING_PLATFORM, (const char *)xattr_value, xattr_len, iint->ima_hash->digest, iint->ima_hash->length); if (rc) { *cause = "invalid-signature"; *status = INTEGRITY_FAIL; } else { *status = INTEGRITY_PASS; } break; case IMA_VERITY_DIGSIG: set_bit(IMA_DIGSIG, &iint->atomic_flags); if (iint->flags & IMA_DIGSIG_REQUIRED) { if (!(iint->flags & IMA_VERITY_REQUIRED)) { *cause = "IMA-signature-required"; *status = INTEGRITY_FAIL; break; } } sig = (typeof(sig))xattr_value; if (sig->version != 3) { *cause = "invalid-signature-version"; *status = INTEGRITY_FAIL; break; } rc = calc_file_id_hash(IMA_VERITY_DIGSIG, iint->ima_hash->algo, iint->ima_hash->digest, &hash.hdr); if (rc) { *cause = "sigv3-hashing-error"; *status = INTEGRITY_FAIL; break; } rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, (const char *)xattr_value, xattr_len, hash.digest, hash.hdr.length); if (rc) { *cause = "invalid-verity-signature"; *status = INTEGRITY_FAIL; } else { *status = INTEGRITY_PASS; } break; default: *status = INTEGRITY_UNKNOWN; *cause = "unknown-ima-data"; break; } return rc; } /* * modsig_verify - verify modsig signature * * Verify whether the signature matches the file contents. * * Return 0 on success, error code otherwise. */ static int modsig_verify(enum ima_hooks func, const struct modsig *modsig, enum integrity_status *status, const char **cause) { int rc; rc = integrity_modsig_verify(INTEGRITY_KEYRING_IMA, modsig); if (IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING) && rc && func == KEXEC_KERNEL_CHECK) rc = integrity_modsig_verify(INTEGRITY_KEYRING_PLATFORM, modsig); if (rc) { *cause = "invalid-signature"; *status = INTEGRITY_FAIL; } else { *status = INTEGRITY_PASS; } return rc; } /* * ima_check_blacklist - determine if the binary is blacklisted. * * Add the hash of the blacklisted binary to the measurement list, based * on policy. * * Returns -EPERM if the hash is blacklisted. */ int ima_check_blacklist(struct ima_iint_cache *iint, const struct modsig *modsig, int pcr) { enum hash_algo hash_algo; const u8 *digest = NULL; u32 digestsize = 0; int rc = 0; if (!(iint->flags & IMA_CHECK_BLACKLIST)) return 0; if (iint->flags & IMA_MODSIG_ALLOWED && modsig) { ima_get_modsig_digest(modsig, &hash_algo, &digest, &digestsize); rc = is_binary_blacklisted(digest, digestsize); } else if (iint->flags & IMA_DIGSIG_REQUIRED && iint->ima_hash) rc = is_binary_blacklisted(iint->ima_hash->digest, iint->ima_hash->length); if ((rc == -EPERM) && (iint->flags & IMA_MEASURE)) process_buffer_measurement(&nop_mnt_idmap, NULL, digest, digestsize, "blacklisted-hash", NONE, pcr, NULL, false, NULL, 0); return rc; } /* * ima_appraise_measurement - appraise file measurement * * Call evm_verifyxattr() to verify the integrity of 'security.ima'. * Assuming success, compare the xattr hash with the collected measurement. * * Return 0 on success, error code otherwise */ int ima_appraise_measurement(enum ima_hooks func, struct ima_iint_cache *iint, struct file *file, const unsigned char *filename, struct evm_ima_xattr_data *xattr_value, int xattr_len, const struct modsig *modsig) { static const char op[] = "appraise_data"; const char *cause = "unknown"; struct dentry *dentry = file_dentry(file); struct inode *inode = d_backing_inode(dentry); enum integrity_status status = INTEGRITY_UNKNOWN; int rc = xattr_len; bool try_modsig = iint->flags & IMA_MODSIG_ALLOWED && modsig; /* If not appraising a modsig, we need an xattr. */ if (!(inode->i_opflags & IOP_XATTR) && !try_modsig) return INTEGRITY_UNKNOWN; /* If reading the xattr failed and there's no modsig, error out. */ if (rc <= 0 && !try_modsig) { if (rc && rc != -ENODATA) goto out; if (iint->flags & IMA_DIGSIG_REQUIRED) { if (iint->flags & IMA_VERITY_REQUIRED) cause = "verity-signature-required"; else cause = "IMA-signature-required"; } else { cause = "missing-hash"; } status = INTEGRITY_NOLABEL; if (file->f_mode & FMODE_CREATED) iint->flags |= IMA_NEW_FILE; if ((iint->flags & IMA_NEW_FILE) && (!(iint->flags & IMA_DIGSIG_REQUIRED) || (inode->i_size == 0))) status = INTEGRITY_PASS; goto out; } status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc < 0 ? 0 : rc); switch (status) { case INTEGRITY_PASS: case INTEGRITY_PASS_IMMUTABLE: case INTEGRITY_UNKNOWN: break; case INTEGRITY_NOXATTRS: /* No EVM protected xattrs. */ /* It's fine not to have xattrs when using a modsig. */ if (try_modsig) break; fallthrough; case INTEGRITY_NOLABEL: /* No security.evm xattr. */ cause = "missing-HMAC"; goto out; case INTEGRITY_FAIL_IMMUTABLE: set_bit(IMA_DIGSIG, &iint->atomic_flags); cause = "invalid-fail-immutable"; goto out; case INTEGRITY_FAIL: /* Invalid HMAC/signature. */ cause = "invalid-HMAC"; goto out; default: WARN_ONCE(true, "Unexpected integrity status %d\n", status); } if (xattr_value) rc = xattr_verify(func, iint, xattr_value, xattr_len, &status, &cause); /* * If we have a modsig and either no imasig or the imasig's key isn't * known, then try verifying the modsig. */ if (try_modsig && (!xattr_value || xattr_value->type == IMA_XATTR_DIGEST_NG || rc == -ENOKEY)) rc = modsig_verify(func, modsig, &status, &cause); out: /* * File signatures on some filesystems can not be properly verified. * When such filesystems are mounted by an untrusted mounter or on a * system not willing to accept such a risk, fail the file signature * verification. */ if ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) && ((inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) || (iint->flags & IMA_FAIL_UNVERIFIABLE_SIGS))) { status = INTEGRITY_FAIL; cause = "unverifiable-signature"; integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, op, cause, rc, 0); } else if (status != INTEGRITY_PASS) { /* Fix mode, but don't replace file signatures. */ if ((ima_appraise & IMA_APPRAISE_FIX) && !try_modsig && (!xattr_value || xattr_value->type != EVM_IMA_XATTR_DIGSIG)) { if (!ima_fix_xattr(dentry, iint)) status = INTEGRITY_PASS; } /* * Permit new files with file/EVM portable signatures, but * without data. */ if (inode->i_size == 0 && iint->flags & IMA_NEW_FILE && test_bit(IMA_DIGSIG, &iint->atomic_flags)) { status = INTEGRITY_PASS; } integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, op, cause, rc, 0); } else { ima_cache_flags(iint, func); } ima_set_cache_status(iint, func, status); return status; } /* * ima_update_xattr - update 'security.ima' hash value */ void ima_update_xattr(struct ima_iint_cache *iint, struct file *file) { struct dentry *dentry = file_dentry(file); int rc = 0; /* do not collect and update hash for digital signatures */ if (test_bit(IMA_DIGSIG, &iint->atomic_flags)) return; if ((iint->ima_file_status != INTEGRITY_PASS) && !(iint->flags & IMA_HASH)) return; rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo, NULL); if (rc < 0) return; inode_lock(file_inode(file)); ima_fix_xattr(dentry, iint); inode_unlock(file_inode(file)); } /** * ima_inode_post_setattr - reflect file metadata changes * @idmap: idmap of the mount the inode was found from * @dentry: pointer to the affected dentry * @ia_valid: for the UID and GID status * * Changes to a dentry's metadata might result in needing to appraise. * * This function is called from notify_change(), which expects the caller * to lock the inode's i_mutex. */ static void ima_inode_post_setattr(struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) { struct inode *inode = d_backing_inode(dentry); struct ima_iint_cache *iint; int action; if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode) || !(inode->i_opflags & IOP_XATTR)) return; action = ima_must_appraise(idmap, inode, MAY_ACCESS, POST_SETATTR); iint = ima_iint_find(inode); if (iint) { set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags); if (!action) clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); } } /* * ima_protect_xattr - protect 'security.ima' * * Ensure that not just anyone can modify or remove 'security.ima'. */ static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len) { if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return 1; } return 0; } static void ima_reset_appraise_flags(struct inode *inode, int digsig) { struct ima_iint_cache *iint; if (!(ima_policy_flag & IMA_APPRAISE) || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find(inode); if (!iint) return; iint->measured_pcrs = 0; set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags); if (digsig) set_bit(IMA_DIGSIG, &iint->atomic_flags); else clear_bit(IMA_DIGSIG, &iint->atomic_flags); } /** * validate_hash_algo() - Block setxattr with unsupported hash algorithms * @dentry: object of the setxattr() * @xattr_value: userland supplied xattr value * @xattr_value_len: length of xattr_value * * The xattr value is mapped to its hash algorithm, and this algorithm * must be built in the kernel for the setxattr to be allowed. * * Emit an audit message when the algorithm is invalid. * * Return: 0 on success, else an error. */ static int validate_hash_algo(struct dentry *dentry, const struct evm_ima_xattr_data *xattr_value, size_t xattr_value_len) { char *path = NULL, *pathbuf = NULL; enum hash_algo xattr_hash_algo; const char *errmsg = "unavailable-hash-algorithm"; unsigned int allowed_hashes; xattr_hash_algo = ima_get_hash_algo(xattr_value, xattr_value_len); allowed_hashes = atomic_read(&ima_setxattr_allowed_hash_algorithms); if (allowed_hashes) { /* success if the algorithm is allowed in the ima policy */ if (allowed_hashes & (1U << xattr_hash_algo)) return 0; /* * We use a different audit message when the hash algorithm * is denied by a policy rule, instead of not being built * in the kernel image */ errmsg = "denied-hash-algorithm"; } else { if (likely(xattr_hash_algo == ima_hash_algo)) return 0; /* allow any xattr using an algorithm built in the kernel */ if (crypto_has_alg(hash_algo_name[xattr_hash_algo], 0, 0)) return 0; } pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); if (!pathbuf) return -EACCES; path = dentry_path(dentry, pathbuf, PATH_MAX); integrity_audit_msg(AUDIT_INTEGRITY_DATA, d_inode(dentry), path, "set_data", errmsg, -EACCES, 0); kfree(pathbuf); return -EACCES; } static int ima_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *xattr_name, const void *xattr_value, size_t xattr_value_len, int flags) { const struct evm_ima_xattr_data *xvalue = xattr_value; int digsig = 0; int result; int err; result = ima_protect_xattr(dentry, xattr_name, xattr_value, xattr_value_len); if (result == 1) { if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST)) return -EINVAL; err = validate_hash_algo(dentry, xvalue, xattr_value_len); if (err) return err; digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG); } else if (!strcmp(xattr_name, XATTR_NAME_EVM) && xattr_value_len > 0) { digsig = (xvalue->type == EVM_XATTR_PORTABLE_DIGSIG); } if (result == 1 || evm_revalidate_status(xattr_name)) { ima_reset_appraise_flags(d_backing_inode(dentry), digsig); if (result == 1) result = 0; } return result; } static int ima_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { if (evm_revalidate_status(acl_name)) ima_reset_appraise_flags(d_backing_inode(dentry), 0); return 0; } static int ima_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *xattr_name) { int result; result = ima_protect_xattr(dentry, xattr_name, NULL, 0); if (result == 1 || evm_revalidate_status(xattr_name)) { ima_reset_appraise_flags(d_backing_inode(dentry), 0); if (result == 1) result = 0; } return result; } static int ima_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { return ima_inode_set_acl(idmap, dentry, acl_name, NULL); } static struct security_hook_list ima_appraise_hooks[] __ro_after_init = { LSM_HOOK_INIT(inode_post_setattr, ima_inode_post_setattr), LSM_HOOK_INIT(inode_setxattr, ima_inode_setxattr), LSM_HOOK_INIT(inode_set_acl, ima_inode_set_acl), LSM_HOOK_INIT(inode_removexattr, ima_inode_removexattr), LSM_HOOK_INIT(inode_remove_acl, ima_inode_remove_acl), }; void __init init_ima_appraise_lsm(const struct lsm_id *lsmid) { security_add_hooks(ima_appraise_hooks, ARRAY_SIZE(ima_appraise_hooks), lsmid); }
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 // SPDX-License-Identifier: GPL-2.0+ /* * Transport & Protocol Driver for In-System Design, Inc. ISD200 ASIC * * Current development and maintenance: * (C) 2001-2002 Björn Stenberg (bjorn@haxx.se) * * Developed with the assistance of: * (C) 2002 Alan Stern <stern@rowland.org> * * Initial work: * (C) 2000 In-System Design, Inc. (support@in-system.com) * * The ISD200 ASIC does not natively support ATA devices. The chip * does implement an interface, the ATA Command Block (ATACB) which provides * a means of passing ATA commands and ATA register accesses to a device. * * History: * * 2002-10-19: Removed the specialized transfer routines. * (Alan Stern <stern@rowland.harvard.edu>) * 2001-02-24: Removed lots of duplicate code and simplified the structure. * (bjorn@haxx.se) * 2002-01-16: Fixed endianness bug so it works on the ppc arch. * (Luc Saillard <luc@saillard.org>) * 2002-01-17: All bitfields removed. * (bjorn@haxx.se) */ /* Include files */ #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/scatterlist.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-isd200" MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC"); MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS(USB_STORAGE); static int isd200_Initialization(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static struct usb_device_id isd200_usb_ids[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, isd200_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static struct us_unusual_dev isd200_unusual_dev_list[] = { # include "unusual_isd200.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* Timeout defines (in Seconds) */ #define ISD200_ENUM_BSY_TIMEOUT 35 #define ISD200_ENUM_DETECT_TIMEOUT 30 #define ISD200_DEFAULT_TIMEOUT 30 /* device flags */ #define DF_ATA_DEVICE 0x0001 #define DF_MEDIA_STATUS_ENABLED 0x0002 #define DF_REMOVABLE_MEDIA 0x0004 /* capability bit definitions */ #define CAPABILITY_DMA 0x01 #define CAPABILITY_LBA 0x02 /* command_setX bit definitions */ #define COMMANDSET_REMOVABLE 0x02 #define COMMANDSET_MEDIA_STATUS 0x10 /* ATA Vendor Specific defines */ #define ATA_ADDRESS_DEVHEAD_STD 0xa0 #define ATA_ADDRESS_DEVHEAD_LBA_MODE 0x40 #define ATA_ADDRESS_DEVHEAD_SLAVE 0x10 /* Action Select bits */ #define ACTION_SELECT_0 0x01 #define ACTION_SELECT_1 0x02 #define ACTION_SELECT_2 0x04 #define ACTION_SELECT_3 0x08 #define ACTION_SELECT_4 0x10 #define ACTION_SELECT_5 0x20 #define ACTION_SELECT_6 0x40 #define ACTION_SELECT_7 0x80 /* Register Select bits */ #define REG_ALTERNATE_STATUS 0x01 #define REG_DEVICE_CONTROL 0x01 #define REG_ERROR 0x02 #define REG_FEATURES 0x02 #define REG_SECTOR_COUNT 0x04 #define REG_SECTOR_NUMBER 0x08 #define REG_CYLINDER_LOW 0x10 #define REG_CYLINDER_HIGH 0x20 #define REG_DEVICE_HEAD 0x40 #define REG_STATUS 0x80 #define REG_COMMAND 0x80 /* ATA registers offset definitions */ #define ATA_REG_ERROR_OFFSET 1 #define ATA_REG_LCYL_OFFSET 4 #define ATA_REG_HCYL_OFFSET 5 #define ATA_REG_STATUS_OFFSET 7 /* ATA error definitions not in <linux/hdreg.h> */ #define ATA_ERROR_MEDIA_CHANGE 0x20 /* ATA command definitions not in <linux/hdreg.h> */ #define ATA_COMMAND_GET_MEDIA_STATUS 0xDA #define ATA_COMMAND_MEDIA_EJECT 0xED /* ATA drive control definitions */ #define ATA_DC_DISABLE_INTERRUPTS 0x02 #define ATA_DC_RESET_CONTROLLER 0x04 #define ATA_DC_REENABLE_CONTROLLER 0x00 /* * General purpose return codes */ #define ISD200_ERROR -1 #define ISD200_GOOD 0 /* * Transport return codes */ #define ISD200_TRANSPORT_GOOD 0 /* Transport good, command good */ #define ISD200_TRANSPORT_FAILED 1 /* Transport good, command failed */ #define ISD200_TRANSPORT_ERROR 2 /* Transport bad (i.e. device dead) */ /* driver action codes */ #define ACTION_READ_STATUS 0 #define ACTION_RESET 1 #define ACTION_REENABLE 2 #define ACTION_SOFT_RESET 3 #define ACTION_ENUM 4 #define ACTION_IDENTIFY 5 /* * ata_cdb struct */ union ata_cdb { struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char WriteData3F6; unsigned char WriteData1F1; unsigned char WriteData1F2; unsigned char WriteData1F3; unsigned char WriteData1F4; unsigned char WriteData1F5; unsigned char WriteData1F6; unsigned char WriteData1F7; unsigned char Reserved[3]; } generic; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char AlternateStatusByte; unsigned char ErrorByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char StatusByte; unsigned char Reserved[3]; } read; struct { unsigned char SignatureByte0; unsigned char SignatureByte1; unsigned char ActionSelect; unsigned char RegisterSelect; unsigned char TransferBlockSize; unsigned char DeviceControlByte; unsigned char FeaturesByte; unsigned char SectorCountByte; unsigned char SectorNumberByte; unsigned char CylinderLowByte; unsigned char CylinderHighByte; unsigned char DeviceHeadByte; unsigned char CommandByte; unsigned char Reserved[3]; } write; }; /* * Inquiry data structure. This is the data returned from the target * after it receives an inquiry. * * This structure may be extended by the number of bytes specified * in the field AdditionalLength. The defined size constant only * includes fields through ProductRevisionLevel. */ /* * DeviceType field */ #define DIRECT_ACCESS_DEVICE 0x00 /* disks */ #define DEVICE_REMOVABLE 0x80 struct inquiry_data { unsigned char DeviceType; unsigned char DeviceTypeModifier; unsigned char Versions; unsigned char Format; unsigned char AdditionalLength; unsigned char Reserved[2]; unsigned char Capability; unsigned char VendorId[8]; unsigned char ProductId[16]; unsigned char ProductRevisionLevel[4]; unsigned char VendorSpecific[20]; unsigned char Reserved3[40]; } __attribute__ ((packed)); /* * INQUIRY data buffer size */ #define INQUIRYDATABUFFERSIZE 36 /* * ISD200 CONFIG data struct */ #define ATACFG_TIMING 0x0f #define ATACFG_ATAPI_RESET 0x10 #define ATACFG_MASTER 0x20 #define ATACFG_BLOCKSIZE 0xa0 #define ATACFGE_LAST_LUN 0x07 #define ATACFGE_DESC_OVERRIDE 0x08 #define ATACFGE_STATE_SUSPEND 0x10 #define ATACFGE_SKIP_BOOT 0x20 #define ATACFGE_CONF_DESC2 0x40 #define ATACFGE_INIT_STATUS 0x80 #define CFG_CAPABILITY_SRST 0x01 struct isd200_config { unsigned char EventNotification; unsigned char ExternalClock; unsigned char ATAInitTimeout; unsigned char ATAConfig; unsigned char ATAMajorCommand; unsigned char ATAMinorCommand; unsigned char ATAExtraConfig; unsigned char Capability; }__attribute__ ((packed)); /* * ISD200 driver information struct */ struct isd200_info { struct inquiry_data InquiryData; u16 *id; struct isd200_config ConfigData; unsigned char *RegsBuf; unsigned char ATARegs[8]; unsigned char DeviceHead; unsigned char DeviceFlags; /* maximum number of LUNs supported */ unsigned char MaxLUNs; unsigned char cmnd[MAX_COMMAND_SIZE]; struct scsi_cmnd srb; struct scatterlist sg; }; /* * Read Capacity Data - returned in Big Endian format */ struct read_capacity_data { __be32 LogicalBlockAddress; __be32 BytesPerBlock; }; /* * Read Block Limits Data - returned in Big Endian format * This structure returns the maximum and minimum block * size for a TAPE device. */ struct read_block_limits { unsigned char Reserved; unsigned char BlockMaximumSize[3]; unsigned char BlockMinimumSize[2]; }; /* * Sense Data Format */ #define SENSE_ERRCODE 0x7f #define SENSE_ERRCODE_VALID 0x80 #define SENSE_FLAG_SENSE_KEY 0x0f #define SENSE_FLAG_BAD_LENGTH 0x20 #define SENSE_FLAG_END_OF_MEDIA 0x40 #define SENSE_FLAG_FILE_MARK 0x80 struct sense_data { unsigned char ErrorCode; unsigned char SegmentNumber; unsigned char Flags; unsigned char Information[4]; unsigned char AdditionalSenseLength; unsigned char CommandSpecificInformation[4]; unsigned char AdditionalSenseCode; unsigned char AdditionalSenseCodeQualifier; unsigned char FieldReplaceableUnitCode; unsigned char SenseKeySpecific[3]; } __attribute__ ((packed)); /* * Default request sense buffer size */ #define SENSE_BUFFER_SIZE 18 /*********************************************************************** * Helper routines ***********************************************************************/ /************************************************************************** * isd200_build_sense * * Builds an artificial sense buffer to report the results of a * failed command. * * RETURNS: * void */ static void isd200_build_sense(struct us_data *us, struct scsi_cmnd *srb) { struct isd200_info *info = (struct isd200_info *)us->extra; struct sense_data *buf = (struct sense_data *) &srb->sense_buffer[0]; unsigned char error = info->ATARegs[ATA_REG_ERROR_OFFSET]; if(error & ATA_ERROR_MEDIA_CHANGE) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_MCR) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = UNIT_ATTENTION; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_TRK0NF) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = NOT_READY; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else if (error & ATA_UNC) { buf->ErrorCode = 0x70 | SENSE_ERRCODE_VALID; buf->AdditionalSenseLength = 0xb; buf->Flags = DATA_PROTECT; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } else { buf->ErrorCode = 0; buf->AdditionalSenseLength = 0; buf->Flags = 0; buf->AdditionalSenseCode = 0; buf->AdditionalSenseCodeQualifier = 0; } } /*********************************************************************** * Transport routines ***********************************************************************/ /************************************************************************** * isd200_set_srb(), isd200_srb_set_bufflen() * * Two helpers to facilitate in initialization of scsi_cmnd structure * Will need to change when struct scsi_cmnd changes */ static void isd200_set_srb(struct isd200_info *info, enum dma_data_direction dir, void* buff, unsigned bufflen) { struct scsi_cmnd *srb = &info->srb; if (buff) sg_init_one(&info->sg, buff, bufflen); srb->sc_data_direction = dir; srb->sdb.table.sgl = buff ? &info->sg : NULL; srb->sdb.length = bufflen; srb->sdb.table.nents = buff ? 1 : 0; } static void isd200_srb_set_bufflen(struct scsi_cmnd *srb, unsigned bufflen) { srb->sdb.length = bufflen; } /************************************************************************** * isd200_action * * Routine for sending commands to the isd200 * * RETURNS: * ISD status code */ static int isd200_action( struct us_data *us, int action, void* pointer, int value ) { union ata_cdb ata; /* static to prevent this large struct being placed on the valuable stack */ static struct scsi_device srb_dev; struct isd200_info *info = (struct isd200_info *)us->extra; struct scsi_cmnd *srb = &info->srb; int status; memset(&ata, 0, sizeof(ata)); memcpy(srb->cmnd, info->cmnd, MAX_COMMAND_SIZE); srb->device = &srb_dev; ata.generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ata.generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ata.generic.TransferBlockSize = 1; switch ( action ) { case ACTION_READ_STATUS: usb_stor_dbg(us, " isd200_action(READ_STATUS)\n"); ata.generic.ActionSelect = ACTION_SELECT_0|ACTION_SELECT_2; ata.generic.RegisterSelect = REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_STATUS | REG_ERROR; isd200_set_srb(info, DMA_FROM_DEVICE, pointer, value); break; case ACTION_ENUM: usb_stor_dbg(us, " isd200_action(ENUM,0x%02x)\n", value); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4| ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD; ata.write.DeviceHeadByte = value; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_RESET: usb_stor_dbg(us, " isd200_action(RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_RESET_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_REENABLE: usb_stor_dbg(us, " isd200_action(REENABLE)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_2| ACTION_SELECT_3|ACTION_SELECT_4; ata.generic.RegisterSelect = REG_DEVICE_CONTROL; ata.write.DeviceControlByte = ATA_DC_REENABLE_CONTROLLER; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_SOFT_RESET: usb_stor_dbg(us, " isd200_action(SOFT_RESET)\n"); ata.generic.ActionSelect = ACTION_SELECT_1|ACTION_SELECT_5; ata.generic.RegisterSelect = REG_DEVICE_HEAD | REG_COMMAND; ata.write.DeviceHeadByte = info->DeviceHead; ata.write.CommandByte = ATA_CMD_DEV_RESET; isd200_set_srb(info, DMA_NONE, NULL, 0); break; case ACTION_IDENTIFY: usb_stor_dbg(us, " isd200_action(IDENTIFY)\n"); ata.generic.RegisterSelect = REG_COMMAND; ata.write.CommandByte = ATA_CMD_ID_ATA; isd200_set_srb(info, DMA_FROM_DEVICE, info->id, ATA_ID_WORDS * 2); break; default: usb_stor_dbg(us, "Error: Undefined action %d\n", action); return ISD200_ERROR; } memcpy(srb->cmnd, &ata, sizeof(ata.generic)); srb->cmd_len = sizeof(ata.generic); status = usb_stor_Bulk_transport(srb, us); if (status == USB_STOR_TRANSPORT_GOOD) status = ISD200_GOOD; else { usb_stor_dbg(us, " isd200_action(0x%02x) error: %d\n", action, status); status = ISD200_ERROR; /* need to reset device here */ } return status; } /************************************************************************** * isd200_read_regs * * Read ATA Registers * * RETURNS: * ISD status code */ static int isd200_read_regs( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_IssueATAReadRegs\n"); transferStatus = isd200_action( us, ACTION_READ_STATUS, info->RegsBuf, sizeof(info->ATARegs) ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error reading ATA registers\n"); retStatus = ISD200_ERROR; } else { memcpy(info->ATARegs, info->RegsBuf, sizeof(info->ATARegs)); usb_stor_dbg(us, " Got ATA Register[ATA_REG_ERROR_OFFSET] = 0x%x\n", info->ATARegs[ATA_REG_ERROR_OFFSET]); } return retStatus; } /************************************************************************** * Invoke the transport and basic error-handling/recovery methods * * This is used by the protocol layers to actually send the message to * the device and receive the response. */ static void isd200_invoke_transport( struct us_data *us, struct scsi_cmnd *srb, union ata_cdb *ataCdb ) { int need_auto_sense = 0; int transferStatus; int result; /* send the command to the transport layer */ memcpy(srb->cmnd, ataCdb, sizeof(ataCdb->generic)); srb->cmd_len = sizeof(ataCdb->generic); transferStatus = usb_stor_Bulk_transport(srb, us); /* * if the command gets aborted by the higher layers, we need to * short-circuit all other processing */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- command was aborted\n"); goto Handle_Abort; } switch (transferStatus) { case USB_STOR_TRANSPORT_GOOD: /* Indicate a good result */ srb->result = SAM_STAT_GOOD; break; case USB_STOR_TRANSPORT_NO_SENSE: usb_stor_dbg(us, "-- transport indicates protocol failure\n"); srb->result = SAM_STAT_CHECK_CONDITION; return; case USB_STOR_TRANSPORT_FAILED: usb_stor_dbg(us, "-- transport indicates command failure\n"); need_auto_sense = 1; break; case USB_STOR_TRANSPORT_ERROR: usb_stor_dbg(us, "-- transport indicates transport error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; default: usb_stor_dbg(us, "-- transport indicates unknown error\n"); srb->result = DID_ERROR << 16; /* Need reset here */ return; } if ((scsi_get_resid(srb) > 0) && !((srb->cmnd[0] == REQUEST_SENSE) || (srb->cmnd[0] == INQUIRY) || (srb->cmnd[0] == MODE_SENSE) || (srb->cmnd[0] == LOG_SENSE) || (srb->cmnd[0] == MODE_SENSE_10))) { usb_stor_dbg(us, "-- unexpectedly short transfer\n"); need_auto_sense = 1; } if (need_auto_sense) { result = isd200_read_regs(us); if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { usb_stor_dbg(us, "-- auto-sense aborted\n"); goto Handle_Abort; } if (result == ISD200_GOOD) { isd200_build_sense(us, srb); srb->result = SAM_STAT_CHECK_CONDITION; /* If things are really okay, then let's show that */ if ((srb->sense_buffer[2] & 0xf) == 0x0) srb->result = SAM_STAT_GOOD; } else { srb->result = DID_ERROR << 16; /* Need reset here */ } } /* * Regardless of auto-sense, if we _know_ we have an error * condition, show that in the result code */ if (transferStatus == USB_STOR_TRANSPORT_FAILED) srb->result = SAM_STAT_CHECK_CONDITION; return; /* * abort processing: the bulk-only transport requires a reset * following an abort */ Handle_Abort: srb->result = DID_ABORT << 16; /* permit the reset transfer to take place */ clear_bit(US_FLIDX_ABORTING, &us->dflags); /* Need reset here */ } #ifdef CONFIG_USB_STORAGE_DEBUG static void isd200_log_config(struct us_data *us, struct isd200_info *info) { usb_stor_dbg(us, " Event Notification: 0x%x\n", info->ConfigData.EventNotification); usb_stor_dbg(us, " External Clock: 0x%x\n", info->ConfigData.ExternalClock); usb_stor_dbg(us, " ATA Init Timeout: 0x%x\n", info->ConfigData.ATAInitTimeout); usb_stor_dbg(us, " ATAPI Command Block Size: 0x%x\n", (info->ConfigData.ATAConfig & ATACFG_BLOCKSIZE) >> 6); usb_stor_dbg(us, " Master/Slave Selection: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_MASTER); usb_stor_dbg(us, " ATAPI Reset: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_ATAPI_RESET); usb_stor_dbg(us, " ATA Timing: 0x%x\n", info->ConfigData.ATAConfig & ATACFG_TIMING); usb_stor_dbg(us, " ATA Major Command: 0x%x\n", info->ConfigData.ATAMajorCommand); usb_stor_dbg(us, " ATA Minor Command: 0x%x\n", info->ConfigData.ATAMinorCommand); usb_stor_dbg(us, " Init Status: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_INIT_STATUS); usb_stor_dbg(us, " Config Descriptor 2: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_CONF_DESC2); usb_stor_dbg(us, " Skip Device Boot: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_SKIP_BOOT); usb_stor_dbg(us, " ATA 3 State Suspend: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_STATE_SUSPEND); usb_stor_dbg(us, " Descriptor Override: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_DESC_OVERRIDE); usb_stor_dbg(us, " Last LUN Identifier: 0x%x\n", info->ConfigData.ATAExtraConfig & ATACFGE_LAST_LUN); usb_stor_dbg(us, " SRST Enable: 0x%x\n", info->ConfigData.ATAExtraConfig & CFG_CAPABILITY_SRST); } #endif /************************************************************************** * isd200_write_config * * Write the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_write_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; #ifdef CONFIG_USB_STORAGE_DEBUG usb_stor_dbg(us, "Entering isd200_write_config\n"); usb_stor_dbg(us, " Writing the following ISD200 Config Data:\n"); isd200_log_config(us, info); #endif /* let's send the command via the control pipe */ result = usb_stor_ctrl_transfer( us, us->send_ctrl_pipe, 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " ISD200 Config Data was written successfully\n"); } else { usb_stor_dbg(us, " Request to write ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_write_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_read_config * * Reads the ISD200 Configuration data * * RETURNS: * ISD status code */ static int isd200_read_config( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; int result; usb_stor_dbg(us, "Entering isd200_read_config\n"); /* read the configuration information from ISD200. Use this to */ /* determine what the special ATA CDB bytes are. */ result = usb_stor_ctrl_transfer( us, us->recv_ctrl_pipe, 0x02, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0000, 0x0002, (void *) &info->ConfigData, sizeof(info->ConfigData)); if (result >= 0) { usb_stor_dbg(us, " Retrieved the following ISD200 Config Data:\n"); #ifdef CONFIG_USB_STORAGE_DEBUG isd200_log_config(us, info); #endif } else { usb_stor_dbg(us, " Request to get ISD200 Config Data failed!\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_read_config %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_atapi_soft_reset * * Perform an Atapi Soft Reset on the device * * RETURNS: * NT status code */ static int isd200_atapi_soft_reset( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_atapi_soft_reset\n"); transferStatus = isd200_action( us, ACTION_SOFT_RESET, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing Atapi Soft Reset\n"); retStatus = ISD200_ERROR; } usb_stor_dbg(us, "Leaving isd200_atapi_soft_reset %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_srst * * Perform an SRST on the device * * RETURNS: * ISD status code */ static int isd200_srst( struct us_data *us ) { int retStatus = ISD200_GOOD; int transferStatus; usb_stor_dbg(us, "Entering isd200_SRST\n"); transferStatus = isd200_action( us, ACTION_RESET, NULL, 0 ); /* check to see if this request failed */ if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error issuing SRST\n"); retStatus = ISD200_ERROR; } else { /* delay 10ms to give the drive a chance to see it */ msleep(10); transferStatus = isd200_action( us, ACTION_REENABLE, NULL, 0 ); if (transferStatus != ISD200_TRANSPORT_GOOD) { usb_stor_dbg(us, " Error taking drive out of reset\n"); retStatus = ISD200_ERROR; } else { /* delay 50ms to give the drive a chance to recover after SRST */ msleep(50); } } usb_stor_dbg(us, "Leaving isd200_srst %08X\n", retStatus); return retStatus; } /************************************************************************** * isd200_try_enum * * Helper function for isd200_manual_enum(). Does ENUM and READ_STATUS * and tries to analyze the status registers * * RETURNS: * ISD status code */ static int isd200_try_enum(struct us_data *us, unsigned char master_slave, int detect ) { int status = ISD200_GOOD; unsigned long endTime; struct isd200_info *info = (struct isd200_info *)us->extra; unsigned char *regs = info->RegsBuf; int recheckAsMaster = 0; if ( detect ) endTime = jiffies + ISD200_ENUM_DETECT_TIMEOUT * HZ; else endTime = jiffies + ISD200_ENUM_BSY_TIMEOUT * HZ; /* loop until we detect !BSY or timeout */ while(1) { status = isd200_action( us, ACTION_ENUM, NULL, master_slave ); if ( status != ISD200_GOOD ) break; status = isd200_action( us, ACTION_READ_STATUS, regs, 8 ); if ( status != ISD200_GOOD ) break; if (!detect) { if (regs[ATA_REG_STATUS_OFFSET] & ATA_BUSY) { usb_stor_dbg(us, " %s status is still BSY, try again...\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); } else { usb_stor_dbg(us, " %s status !BSY, continue with next operation\n", master_slave == ATA_ADDRESS_DEVHEAD_STD ? "Master" : "Slave"); break; } } /* check for ATA_BUSY and */ /* ATA_DF (workaround ATA Zip drive) and */ /* ATA_ERR (workaround for Archos CD-ROM) */ else if (regs[ATA_REG_STATUS_OFFSET] & (ATA_BUSY | ATA_DF | ATA_ERR)) { usb_stor_dbg(us, " Status indicates it is not ready, try again...\n"); } /* check for DRDY, ATA devices set DRDY after SRST */ else if (regs[ATA_REG_STATUS_OFFSET] & ATA_DRDY) { usb_stor_dbg(us, " Identified ATA device\n"); info->DeviceFlags |= DF_ATA_DEVICE; info->DeviceHead = master_slave; break; } /* * check Cylinder High/Low to * determine if it is an ATAPI device */ else if (regs[ATA_REG_HCYL_OFFSET] == 0xEB && regs[ATA_REG_LCYL_OFFSET] == 0x14) { /* * It seems that the RICOH * MP6200A CD/RW drive will * report itself okay as a * slave when it is really a * master. So this check again * as a master device just to * make sure it doesn't report * itself okay as a master also */ if ((master_slave & ATA_ADDRESS_DEVHEAD_SLAVE) && !recheckAsMaster) { usb_stor_dbg(us, " Identified ATAPI device as slave. Rechecking again as master\n"); recheckAsMaster = 1; master_slave = ATA_ADDRESS_DEVHEAD_STD; } else { usb_stor_dbg(us, " Identified ATAPI device\n"); info->DeviceHead = master_slave; status = isd200_atapi_soft_reset(us); break; } } else { usb_stor_dbg(us, " Not ATA, not ATAPI - Weird\n"); break; } /* check for timeout on this request */ if (time_after_eq(jiffies, endTime)) { if (!detect) usb_stor_dbg(us, " BSY check timeout, just continue with next operation...\n"); else usb_stor_dbg(us, " Device detect timeout!\n"); break; } } return status; } /************************************************************************** * isd200_manual_enum * * Determines if the drive attached is an ATA or ATAPI and if it is a * master or slave. * * RETURNS: * ISD status code */ static int isd200_manual_enum(struct us_data *us) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus = ISD200_GOOD; usb_stor_dbg(us, "Entering isd200_manual_enum\n"); retStatus = isd200_read_config(us); if (retStatus == ISD200_GOOD) { int isslave; /* master or slave? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 0); if (retStatus == ISD200_GOOD) retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_SLAVE, 0); if (retStatus == ISD200_GOOD) { retStatus = isd200_srst(us); if (retStatus == ISD200_GOOD) /* ata or atapi? */ retStatus = isd200_try_enum( us, ATA_ADDRESS_DEVHEAD_STD, 1); } isslave = (info->DeviceHead & ATA_ADDRESS_DEVHEAD_SLAVE) ? 1 : 0; if (!(info->ConfigData.ATAConfig & ATACFG_MASTER)) { usb_stor_dbg(us, " Setting Master/Slave selection to %d\n", isslave); info->ConfigData.ATAConfig &= 0x3f; info->ConfigData.ATAConfig |= (isslave<<6); retStatus = isd200_write_config(us); } } usb_stor_dbg(us, "Leaving isd200_manual_enum %08X\n", retStatus); return(retStatus); } static void isd200_fix_driveid(u16 *id) { #ifndef __LITTLE_ENDIAN # ifdef __BIG_ENDIAN int i; for (i = 0; i < ATA_ID_WORDS; i++) id[i] = __le16_to_cpu(id[i]); # else # error "Please fix <asm/byteorder.h>" # endif #endif } static void isd200_dump_driveid(struct us_data *us, u16 *id) { usb_stor_dbg(us, " Identify Data Structure:\n"); usb_stor_dbg(us, " config = 0x%x\n", id[ATA_ID_CONFIG]); usb_stor_dbg(us, " cyls = 0x%x\n", id[ATA_ID_CYLS]); usb_stor_dbg(us, " heads = 0x%x\n", id[ATA_ID_HEADS]); usb_stor_dbg(us, " track_bytes = 0x%x\n", id[4]); usb_stor_dbg(us, " sector_bytes = 0x%x\n", id[5]); usb_stor_dbg(us, " sectors = 0x%x\n", id[ATA_ID_SECTORS]); usb_stor_dbg(us, " serial_no[0] = 0x%x\n", *(char *)&id[ATA_ID_SERNO]); usb_stor_dbg(us, " buf_type = 0x%x\n", id[20]); usb_stor_dbg(us, " buf_size = 0x%x\n", id[ATA_ID_BUF_SIZE]); usb_stor_dbg(us, " ecc_bytes = 0x%x\n", id[22]); usb_stor_dbg(us, " fw_rev[0] = 0x%x\n", *(char *)&id[ATA_ID_FW_REV]); usb_stor_dbg(us, " model[0] = 0x%x\n", *(char *)&id[ATA_ID_PROD]); usb_stor_dbg(us, " max_multsect = 0x%x\n", id[ATA_ID_MAX_MULTSECT] & 0xff); usb_stor_dbg(us, " dword_io = 0x%x\n", id[ATA_ID_DWORD_IO]); usb_stor_dbg(us, " capability = 0x%x\n", id[ATA_ID_CAPABILITY] >> 8); usb_stor_dbg(us, " tPIO = 0x%x\n", id[ATA_ID_OLD_PIO_MODES] >> 8); usb_stor_dbg(us, " tDMA = 0x%x\n", id[ATA_ID_OLD_DMA_MODES] >> 8); usb_stor_dbg(us, " field_valid = 0x%x\n", id[ATA_ID_FIELD_VALID]); usb_stor_dbg(us, " cur_cyls = 0x%x\n", id[ATA_ID_CUR_CYLS]); usb_stor_dbg(us, " cur_heads = 0x%x\n", id[ATA_ID_CUR_HEADS]); usb_stor_dbg(us, " cur_sectors = 0x%x\n", id[ATA_ID_CUR_SECTORS]); usb_stor_dbg(us, " cur_capacity = 0x%x\n", ata_id_u32(id, 57)); usb_stor_dbg(us, " multsect = 0x%x\n", id[ATA_ID_MULTSECT] & 0xff); usb_stor_dbg(us, " lba_capacity = 0x%x\n", ata_id_u32(id, ATA_ID_LBA_CAPACITY)); usb_stor_dbg(us, " command_set_1 = 0x%x\n", id[ATA_ID_COMMAND_SET_1]); usb_stor_dbg(us, " command_set_2 = 0x%x\n", id[ATA_ID_COMMAND_SET_2]); } /************************************************************************** * isd200_get_inquiry_data * * Get inquiry data * * RETURNS: * ISD status code */ static int isd200_get_inquiry_data( struct us_data *us ) { struct isd200_info *info = (struct isd200_info *)us->extra; int retStatus; u16 *id = info->id; usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n"); /* set default to Master */ info->DeviceHead = ATA_ADDRESS_DEVHEAD_STD; /* attempt to manually enumerate this device */ retStatus = isd200_manual_enum(us); if (retStatus == ISD200_GOOD) { int transferStatus; /* check for an ATA device */ if (info->DeviceFlags & DF_ATA_DEVICE) { /* this must be an ATA device */ /* perform an ATA Command Identify */ transferStatus = isd200_action( us, ACTION_IDENTIFY, id, ATA_ID_WORDS * 2); if (transferStatus != ISD200_TRANSPORT_GOOD) { /* Error issuing ATA Command Identify */ usb_stor_dbg(us, " Error issuing ATA Command Identify\n"); retStatus = ISD200_ERROR; } else { /* ATA Command Identify successful */ int i; __be16 *src; __u16 *dest; isd200_fix_driveid(id); isd200_dump_driveid(us, id); /* Prevent division by 0 in isd200_scsi_to_ata() */ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) { usb_stor_dbg(us, " Invalid ATA Identify data\n"); retStatus = ISD200_ERROR; goto Done; } memset(&info->InquiryData, 0, sizeof(info->InquiryData)); /* Standard IDE interface only supports disks */ info->InquiryData.DeviceType = DIRECT_ACCESS_DEVICE; /* The length must be at least 36 (5 + 31) */ info->InquiryData.AdditionalLength = 0x1F; if (id[ATA_ID_COMMAND_SET_1] & COMMANDSET_MEDIA_STATUS) { /* set the removable bit */ info->InquiryData.DeviceTypeModifier = DEVICE_REMOVABLE; info->DeviceFlags |= DF_REMOVABLE_MEDIA; } /* Fill in vendor identification fields */ src = (__be16 *)&id[ATA_ID_PROD]; dest = (__u16*)info->InquiryData.VendorId; for (i = 0; i < 4; i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_PROD + 8/2]; dest = (__u16*)info->InquiryData.ProductId; for (i=0;i<8;i++) dest[i] = be16_to_cpu(src[i]); src = (__be16 *)&id[ATA_ID_FW_REV]; dest = (__u16*)info->InquiryData.ProductRevisionLevel; for (i=0;i<2;i++) dest[i] = be16_to_cpu(src[i]); /* determine if it supports Media Status Notification */ if (id[ATA_ID_COMMAND_SET_2] & COMMANDSET_MEDIA_STATUS) { usb_stor_dbg(us, " Device supports Media Status Notification\n"); /* * Indicate that it is enabled, even * though it is not. * This allows the lock/unlock of the * media to work correctly. */ info->DeviceFlags |= DF_MEDIA_STATUS_ENABLED; } else info->DeviceFlags &= ~DF_MEDIA_STATUS_ENABLED; } } else { /* * this must be an ATAPI device * use an ATAPI protocol (Transparent SCSI) */ us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; usb_stor_dbg(us, "Protocol changed to: %s\n", us->protocol_name); /* Free driver structure */ us->extra_destructor(info); kfree(info); us->extra = NULL; us->extra_destructor = NULL; } } Done: usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus); return(retStatus); } /************************************************************************** * isd200_scsi_to_ata * * Translate SCSI commands to ATA commands. * * RETURNS: * 1 if the command needs to be sent to the transport layer * 0 otherwise */ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us, union ata_cdb * ataCdb) { struct isd200_info *info = (struct isd200_info *)us->extra; u16 *id = info->id; int sendToTransport = 1; unsigned char sectnum, head; unsigned short cylinder; unsigned long lba; unsigned long blockCount; unsigned char senseData[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; memset(ataCdb, 0, sizeof(union ata_cdb)); /* SCSI Command */ switch (srb->cmnd[0]) { case INQUIRY: usb_stor_dbg(us, " ATA OUT - INQUIRY\n"); /* copy InquiryData */ usb_stor_set_xfer_buf((unsigned char *) &info->InquiryData, sizeof(info->InquiryData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; break; case MODE_SENSE: usb_stor_dbg(us, " ATA OUT - SCSIOP_MODE_SENSE\n"); /* Initialize the return buffer */ usb_stor_set_xfer_buf(senseData, sizeof(senseData), srb); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case TEST_UNIT_READY: usb_stor_dbg(us, " ATA OUT - SCSIOP_TEST_UNIT_READY\n"); if (info->DeviceFlags & DF_MEDIA_STATUS_ENABLED) { ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Media Status not supported, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_CAPACITY: { unsigned long capacity; struct read_capacity_data readCapacityData; usb_stor_dbg(us, " ATA OUT - SCSIOP_READ_CAPACITY\n"); if (ata_id_has_lba(id)) capacity = ata_id_u32(id, ATA_ID_LBA_CAPACITY) - 1; else capacity = (id[ATA_ID_HEADS] * id[ATA_ID_CYLS] * id[ATA_ID_SECTORS]) - 1; readCapacityData.LogicalBlockAddress = cpu_to_be32(capacity); readCapacityData.BytesPerBlock = cpu_to_be32(0x200); usb_stor_set_xfer_buf((unsigned char *) &readCapacityData, sizeof(readCapacityData), srb); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case READ_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_READ\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_READ; break; case WRITE_10: usb_stor_dbg(us, " ATA OUT - SCSIOP_WRITE\n"); lba = be32_to_cpu(*(__be32 *)&srb->cmnd[2]); blockCount = (unsigned long)srb->cmnd[7]<<8 | (unsigned long)srb->cmnd[8]; if (ata_id_has_lba(id)) { sectnum = (unsigned char)(lba); cylinder = (unsigned short)(lba>>8); head = ATA_ADDRESS_DEVHEAD_LBA_MODE | (unsigned char)(lba>>24 & 0x0F); } else { sectnum = (u8)((lba % id[ATA_ID_SECTORS]) + 1); cylinder = (u16)(lba / (id[ATA_ID_SECTORS] * id[ATA_ID_HEADS])); head = (u8)((lba / id[ATA_ID_SECTORS]) % id[ATA_ID_HEADS]); } ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_SECTOR_COUNT | REG_SECTOR_NUMBER | REG_CYLINDER_LOW | REG_CYLINDER_HIGH | REG_DEVICE_HEAD | REG_COMMAND; ataCdb->write.SectorCountByte = (unsigned char)blockCount; ataCdb->write.SectorNumberByte = sectnum; ataCdb->write.CylinderHighByte = (unsigned char)(cylinder>>8); ataCdb->write.CylinderLowByte = (unsigned char)cylinder; ataCdb->write.DeviceHeadByte = (head | ATA_ADDRESS_DEVHEAD_STD); ataCdb->write.CommandByte = ATA_CMD_PIO_WRITE; break; case ALLOW_MEDIUM_REMOVAL: usb_stor_dbg(us, " ATA OUT - SCSIOP_MEDIUM_REMOVAL\n"); if (info->DeviceFlags & DF_REMOVABLE_MEDIA) { usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = (srb->cmnd[4] & 0x1) ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Not removable media, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; case START_STOP: usb_stor_dbg(us, " ATA OUT - SCSIOP_START_STOP_UNIT\n"); usb_stor_dbg(us, " srb->cmnd[4] = 0x%X\n", srb->cmnd[4]); if ((srb->cmnd[4] & 0x3) == 0x2) { usb_stor_dbg(us, " Media Eject\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 0; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_MEDIA_EJECT; } else if ((srb->cmnd[4] & 0x3) == 0x1) { usb_stor_dbg(us, " Get Media Status\n"); ataCdb->generic.SignatureByte0 = info->ConfigData.ATAMajorCommand; ataCdb->generic.SignatureByte1 = info->ConfigData.ATAMinorCommand; ataCdb->generic.TransferBlockSize = 1; ataCdb->generic.RegisterSelect = REG_COMMAND; ataCdb->write.CommandByte = ATA_COMMAND_GET_MEDIA_STATUS; isd200_srb_set_bufflen(srb, 0); } else { usb_stor_dbg(us, " Nothing to do, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } break; default: usb_stor_dbg(us, "Unsupported SCSI command - 0x%X\n", srb->cmnd[0]); srb->result = DID_ERROR << 16; sendToTransport = 0; break; } return(sendToTransport); } /************************************************************************** * isd200_free_info * * Frees the driver structure. */ static void isd200_free_info_ptrs(void *info_) { struct isd200_info *info = (struct isd200_info *) info_; if (info) { kfree(info->id); kfree(info->RegsBuf); kfree(info->srb.sense_buffer); } } /************************************************************************** * isd200_init_info * * Allocates (if necessary) and initializes the driver structure. * * RETURNS: * error status code */ static int isd200_init_info(struct us_data *us) { struct isd200_info *info; info = kzalloc(sizeof(struct isd200_info), GFP_KERNEL); if (!info) return -ENOMEM; info->id = kzalloc(ATA_ID_WORDS * 2, GFP_KERNEL); info->RegsBuf = kmalloc(sizeof(info->ATARegs), GFP_KERNEL); info->srb.sense_buffer = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); if (!info->id || !info->RegsBuf || !info->srb.sense_buffer) { isd200_free_info_ptrs(info); kfree(info); return -ENOMEM; } us->extra = info; us->extra_destructor = isd200_free_info_ptrs; return 0; } /************************************************************************** * Initialization for the ISD200 */ static int isd200_Initialization(struct us_data *us) { int rc = 0; usb_stor_dbg(us, "ISD200 Initialization...\n"); /* Initialize ISD200 info struct */ if (isd200_init_info(us) < 0) { usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n"); rc = -ENOMEM; } else { /* Get device specific data */ if (isd200_get_inquiry_data(us) != ISD200_GOOD) { usb_stor_dbg(us, "ISD200 Initialization Failure\n"); rc = -EINVAL; } else { usb_stor_dbg(us, "ISD200 Initialization complete\n"); } } return rc; } /************************************************************************** * Protocol and Transport for the ISD200 ASIC * * This protocol and transport are for ATA devices connected to an ISD200 * ASIC. An ATAPI device that is connected as a slave device will be * detected in the driver initialization function and the protocol will * be changed to an ATAPI protocol (Transparent SCSI). * */ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) { int sendToTransport, orig_bufflen; union ata_cdb ataCdb; /* Make sure driver was initialized */ if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); srb->result = DID_ERROR << 16; return; } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ orig_bufflen = scsi_bufflen(srb); sendToTransport = isd200_scsi_to_ata(srb, us, &ataCdb); /* send the command to the transport layer */ if (sendToTransport) isd200_invoke_transport(us, srb, &ataCdb); isd200_srb_set_bufflen(srb, orig_bufflen); } static struct scsi_host_template isd200_host_template; static int isd200_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - isd200_usb_ids) + isd200_unusual_dev_list, &isd200_host_template); if (result) return result; us->protocol_name = "ISD200 ATA/ATAPI"; us->proto_handler = isd200_ata_command; result = usb_stor_probe2(us); return result; } static struct usb_driver isd200_driver = { .name = DRV_NAME, .probe = isd200_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = isd200_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(isd200_driver, isd200_host_template, DRV_NAME);
63 30 6 28 1 2 27 1 2 26 25 4 55 2 12 37 3 10 26 61 36 50 56 24 1 31 16 3 1 11 9 8 13 13 17 11 6 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 // SPDX-License-Identifier: GPL-2.0 #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/utime.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <asm/unistd.h> #include <linux/filelock.h> static bool nsec_valid(long nsec) { if (nsec == UTIME_OMIT || nsec == UTIME_NOW) return true; return nsec >= 0 && nsec <= 999999999; } int vfs_utimes(const struct path *path, struct timespec64 *times) { int error; struct iattr newattrs; struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; if (times) { if (!nsec_valid(times[0].tv_nsec) || !nsec_valid(times[1].tv_nsec)) return -EINVAL; if (times[0].tv_nsec == UTIME_NOW && times[1].tv_nsec == UTIME_NOW) times = NULL; } error = mnt_want_write(path->mnt); if (error) goto out; newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { newattrs.ia_atime = times[0]; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { newattrs.ia_mtime = times[1]; newattrs.ia_valid |= ATTR_MTIME_SET; } /* * Tell setattr_prepare(), that this is an explicit time * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET * were used. */ newattrs.ia_valid |= ATTR_TIMES_SET; } else { newattrs.ia_valid |= ATTR_TOUCH; } retry_deleg: inode_lock(inode); error = notify_change(mnt_idmap(path->mnt), path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path->mnt); out: return error; } static int do_utimes_path(int dfd, const char __user *filename, struct timespec64 *times, int flags) { struct path path; int lookup_flags = 0, error; if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) return -EINVAL; if (!(flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) return error; error = vfs_utimes(&path, times); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } static int do_utimes_fd(int fd, struct timespec64 *times, int flags) { struct fd f; int error; if (flags) return -EINVAL; f = fdget(fd); if (!f.file) return -EBADF; error = vfs_utimes(&f.file->f_path, times); fdput(f); return error; } /* * do_utimes - change times on filename or file descriptor * @dfd: open file descriptor, -1 or AT_FDCWD * @filename: path name or NULL * @times: new times or NULL * @flags: zero or more flags (only AT_SYMLINK_NOFOLLOW for the moment) * * If filename is NULL and dfd refers to an open file, then operate on * the file. Otherwise look up filename, possibly using dfd as a * starting point. * * If times==NULL, set access and modification to current time, * must be owner or have write permission. * Else, update from *times, must be owner or super user. */ long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags) { if (filename == NULL && dfd != AT_FDCWD) return do_utimes_fd(dfd, times, flags); return do_utimes_path(dfd, filename, times, flags); } SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename, struct __kernel_timespec __user *, utimes, int, flags) { struct timespec64 tstimes[2]; if (utimes) { if ((get_timespec64(&tstimes[0], &utimes[0]) || get_timespec64(&tstimes[1], &utimes[1]))) return -EFAULT; /* Nothing to do, we must not even check the path. */ if (tstimes[0].tv_nsec == UTIME_OMIT && tstimes[1].tv_nsec == UTIME_OMIT) return 0; } return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags); } #ifdef __ARCH_WANT_SYS_UTIME /* * futimesat(), utimes() and utime() are older versions of utimensat() * that are provided for compatibility with traditional C libraries. * On modern architectures, we always use libc wrappers around * utimensat() instead. */ static long do_futimesat(int dfd, const char __user *filename, struct __kernel_old_timeval __user *utimes) { struct __kernel_old_timeval times[2]; struct timespec64 tstimes[2]; if (utimes) { if (copy_from_user(&times, utimes, sizeof(times))) return -EFAULT; /* This test is needed to catch all invalid values. If we would test only in do_utimes we would miss those invalid values truncated by the multiplication with 1000. Note that we also catch UTIME_{NOW,OMIT} here which are only valid for utimensat. */ if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 || times[1].tv_usec >= 1000000 || times[1].tv_usec < 0) return -EINVAL; tstimes[0].tv_sec = times[0].tv_sec; tstimes[0].tv_nsec = 1000 * times[0].tv_usec; tstimes[1].tv_sec = times[1].tv_sec; tstimes[1].tv_nsec = 1000 * times[1].tv_usec; } return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0); } SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename, struct __kernel_old_timeval __user *, utimes) { return do_futimesat(dfd, filename, utimes); } SYSCALL_DEFINE2(utimes, char __user *, filename, struct __kernel_old_timeval __user *, utimes) { return do_futimesat(AT_FDCWD, filename, utimes); } SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times) { struct timespec64 tv[2]; if (times) { if (get_user(tv[0].tv_sec, &times->actime) || get_user(tv[1].tv_sec, &times->modtime)) return -EFAULT; tv[0].tv_nsec = 0; tv[1].tv_nsec = 0; } return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0); } #endif #ifdef CONFIG_COMPAT_32BIT_TIME /* * Not all architectures have sys_utime, so implement this in terms * of sys_utimes. */ #ifdef __ARCH_WANT_SYS_UTIME32 SYSCALL_DEFINE2(utime32, const char __user *, filename, struct old_utimbuf32 __user *, t) { struct timespec64 tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t->actime) || get_user(tv[1].tv_sec, &t->modtime)) return -EFAULT; tv[0].tv_nsec = 0; tv[1].tv_nsec = 0; } return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0); } #endif SYSCALL_DEFINE4(utimensat_time32, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags) { struct timespec64 tv[2]; if (t) { if (get_old_timespec32(&tv[0], &t[0]) || get_old_timespec32(&tv[1], &t[1])) return -EFAULT; if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) return 0; } return do_utimes(dfd, filename, t ? tv : NULL, flags); } #ifdef __ARCH_WANT_SYS_UTIME32 static long do_compat_futimesat(unsigned int dfd, const char __user *filename, struct old_timeval32 __user *t) { struct timespec64 tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t[0].tv_sec) || get_user(tv[0].tv_nsec, &t[0].tv_usec) || get_user(tv[1].tv_sec, &t[1].tv_sec) || get_user(tv[1].tv_nsec, &t[1].tv_usec)) return -EFAULT; if (tv[0].tv_nsec >= 1000000 || tv[0].tv_nsec < 0 || tv[1].tv_nsec >= 1000000 || tv[1].tv_nsec < 0) return -EINVAL; tv[0].tv_nsec *= 1000; tv[1].tv_nsec *= 1000; } return do_utimes(dfd, filename, t ? tv : NULL, 0); } SYSCALL_DEFINE3(futimesat_time32, unsigned int, dfd, const char __user *, filename, struct old_timeval32 __user *, t) { return do_compat_futimesat(dfd, filename, t); } SYSCALL_DEFINE2(utimes_time32, const char __user *, filename, struct old_timeval32 __user *, t) { return do_compat_futimesat(AT_FDCWD, filename, t); } #endif #endif
1 546 374 718 133 10 4 414 3329 2189 2199 15430 14705 14779 2440 285 285 297 298 1666 1667 24 54 54 671 671 671 671 45 90 52 298 3 300 277 297 494 1023 942 1 93 508 327 112 36 12 31 70 61 57 4 328 328 467 465 23 23 3 3 2 1 152 23 102 3 12 12 9 4 12 32 32 217 12 170 3 32 558 1 399 13 120 156 156 155 13 149 155 79 80 7 80 24 24 350 75 80 246 24 9 21 12278 15 1155 2986 2016 3 909 293 1170 1160 7 14 12299 17 12286 14833 12351 21 2954 10 6 1114 2940 14726 14721 1655 16 13763 93 93 51 51 51 51 1 1 1 1 1 359 358 46 342 409 359 1 51 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/bvec.h> #include <linux/fault-inject-usercopy.h> #include <linux/uio.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/splice.h> #include <linux/compat.h> #include <linux/scatterlist.h> #include <linux/instrumented.h> #include <linux/iov_iter.h> static __always_inline size_t copy_to_user_iter(void __user *iter_to, size_t progress, size_t len, void *from, void *priv2) { if (should_fail_usercopy()) return len; if (access_ok(iter_to, len)) { from += progress; instrument_copy_to_user(iter_to, from, len); len = raw_copy_to_user(iter_to, from, len); } return len; } static __always_inline size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress, size_t len, void *from, void *priv2) { ssize_t res; if (should_fail_usercopy()) return len; from += progress; res = copy_to_user_nofault(iter_to, from, len); return res < 0 ? len : res; } static __always_inline size_t copy_from_user_iter(void __user *iter_from, size_t progress, size_t len, void *to, void *priv2) { size_t res = len; if (should_fail_usercopy()) return len; if (access_ok(iter_from, len)) { to += progress; instrument_copy_from_user_before(to, iter_from, len); res = raw_copy_from_user(to, iter_from, len); instrument_copy_from_user_after(to, iter_from, len, res); } return res; } static __always_inline size_t memcpy_to_iter(void *iter_to, size_t progress, size_t len, void *from, void *priv2) { memcpy(iter_to, from + progress, len); return 0; } static __always_inline size_t memcpy_from_iter(void *iter_from, size_t progress, size_t len, void *to, void *priv2) { memcpy(to + progress, iter_from, len); return 0; } /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator * @size: maximum length * * Fault in one or more iovecs of the given iov_iter, to a maximum length of * @size. For each iovec, fault in each page that constitutes the iovec. * * Returns the number of bytes not faulted in (like copy_to_user() and * copy_from_user()). * * Always returns 0 for non-userspace iterators. */ size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) { if (iter_is_ubuf(i)) { size_t n = min(size, iov_iter_count(i)); n -= fault_in_readable(i->ubuf + i->iov_offset, n); return size - n; } else if (iter_is_iovec(i)) { size_t count = min(size, iov_iter_count(i)); const struct iovec *p; size_t skip; size -= count; for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; if (unlikely(!len)) continue; ret = fault_in_readable(p->iov_base + skip, len); count -= len - ret; if (ret) break; } return count + size; } return 0; } EXPORT_SYMBOL(fault_in_iov_iter_readable); /* * fault_in_iov_iter_writeable - fault in iov iterator for writing * @i: iterator * @size: maximum length * * Faults in the iterator using get_user_pages(), i.e., without triggering * hardware page faults. This is primarily useful when we already know that * some or all of the pages in @i aren't in memory. * * Returns the number of bytes not faulted in, like copy_to_user() and * copy_from_user(). * * Always returns 0 for non-user-space iterators. */ size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) { if (iter_is_ubuf(i)) { size_t n = min(size, iov_iter_count(i)); n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); return size - n; } else if (iter_is_iovec(i)) { size_t count = min(size, iov_iter_count(i)); const struct iovec *p; size_t skip; size -= count; for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; if (unlikely(!len)) continue; ret = fault_in_safe_writeable(p->iov_base + skip, len); count -= len - ret; if (ret) break; } return count + size; } return 0; } EXPORT_SYMBOL(fault_in_iov_iter_writeable); void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, size_t count) { WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, .nofault = false, .data_source = direction, .__iov = iov, .nr_segs = nr_segs, .iov_offset = 0, .count = count }; } EXPORT_SYMBOL(iov_iter_init); size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); return iterate_and_advance(i, bytes, (void *)addr, copy_to_user_iter, memcpy_to_iter); } EXPORT_SYMBOL(_copy_to_iter); #ifdef CONFIG_ARCH_HAS_COPY_MC static __always_inline size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress, size_t len, void *from, void *priv2) { if (access_ok(iter_to, len)) { from += progress; instrument_copy_to_user(iter_to, from, len); len = copy_mc_to_user(iter_to, from, len); } return len; } static __always_inline size_t memcpy_to_iter_mc(void *iter_to, size_t progress, size_t len, void *from, void *priv2) { return copy_mc_to_kernel(iter_to, from + progress, len); } /** * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address * @bytes: total transfer length * @i: destination iterator * * The pmem driver deploys this for the dax operation * (dax_copy_to_iter()) for dax reads (bypass page-cache and the * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes * successfully copied. * * The main differences between this and typical _copy_to_iter(). * * * Typical tail/residue handling after a fault retries the copy * byte-by-byte until the fault happens again. Re-triggering machine * checks is potentially fatal so the implementation uses source * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * * * ITER_KVEC and ITER_BVEC can return short copies. Compare to * copy_to_iter() where only ITER_IOVEC attempts might return a short copy. * * Return: number of bytes copied (may be %0) */ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); return iterate_and_advance(i, bytes, (void *)addr, copy_to_user_iter_mc, memcpy_to_iter_mc); } EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ static __always_inline size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { return iterate_and_advance(i, bytes, addr, copy_from_user_iter, memcpy_from_iter); } size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); return __copy_from_iter(addr, bytes, i); } EXPORT_SYMBOL(_copy_from_iter); static __always_inline size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress, size_t len, void *to, void *priv2) { return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); } size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) return 0; return iterate_and_advance(i, bytes, addr, copy_from_user_iter_nocache, memcpy_from_iter); } EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE static __always_inline size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress, size_t len, void *to, void *priv2) { return __copy_from_user_flushcache(to + progress, iter_from, len); } static __always_inline size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress, size_t len, void *to, void *priv2) { memcpy_flushcache(to + progress, iter_from, len); return 0; } /** * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address * @bytes: total transfer length * @i: source iterator * * The pmem driver arranges for filesystem-dax to use this facility via * dax_copy_from_iter() for ensuring that writes to persistent memory * are flushed through the CPU cache. It is differentiated from * _copy_from_iter_nocache() in that guarantees all data is flushed for * all iterator types. The _copy_from_iter_nocache() only attempts to * bypass the cache for the ITER_IOVEC case, and on some archs may use * instructions that strand dirty-data in the cache. * * Return: number of bytes copied (may be %0) */ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) return 0; return iterate_and_advance(i, bytes, addr, copy_from_user_iter_flushcache, memcpy_from_iter_flushcache); } EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); #endif static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) { struct page *head; size_t v = n + offset; /* * The general case needs to access the page order in order * to compute the page size. * However, we mostly deal with order-0 pages and thus can * avoid a possible cache line miss for requests that fit all * page orders. */ if (n <= v && v <= PAGE_SIZE) return true; head = compound_head(page); v += (page - head) << PAGE_SHIFT; if (WARN_ON(n > v || v > page_size(head))) return false; return true; } size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t res = 0; if (!page_copy_sane(page, offset, bytes)) return 0; if (WARN_ON_ONCE(i->data_source)) return 0; page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); n = _copy_to_iter(kaddr + offset, n, i); kunmap_local(kaddr); res += n; bytes -= n; if (!bytes || !n) break; offset += n; if (offset == PAGE_SIZE) { page++; offset = 0; } } return res; } EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, struct iov_iter *i) { size_t res = 0; if (!page_copy_sane(page, offset, bytes)) return 0; if (WARN_ON_ONCE(i->data_source)) return 0; page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); n = iterate_and_advance(i, n, kaddr + offset, copy_to_user_iter_nofault, memcpy_to_iter); kunmap_local(kaddr); res += n; bytes -= n; if (!bytes || !n) break; offset += n; if (offset == PAGE_SIZE) { page++; offset = 0; } } return res; } EXPORT_SYMBOL(copy_page_to_iter_nofault); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t res = 0; if (!page_copy_sane(page, offset, bytes)) return 0; page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); n = _copy_from_iter(kaddr + offset, n, i); kunmap_local(kaddr); res += n; bytes -= n; if (!bytes || !n) break; offset += n; if (offset == PAGE_SIZE) { page++; offset = 0; } } return res; } EXPORT_SYMBOL(copy_page_from_iter); static __always_inline size_t zero_to_user_iter(void __user *iter_to, size_t progress, size_t len, void *priv, void *priv2) { return clear_user(iter_to, len); } static __always_inline size_t zero_to_iter(void *iter_to, size_t progress, size_t len, void *priv, void *priv2) { memset(iter_to, 0, len); return 0; } size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { return iterate_and_advance(i, bytes, NULL, zero_to_user_iter, zero_to_iter); } EXPORT_SYMBOL(iov_iter_zero); size_t copy_page_from_iter_atomic(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { size_t n, copied = 0; if (!page_copy_sane(page, offset, bytes)) return 0; if (WARN_ON_ONCE(!i->data_source)) return 0; do { char *p; n = bytes - copied; if (PageHighMem(page)) { page += offset / PAGE_SIZE; offset %= PAGE_SIZE; n = min_t(size_t, n, PAGE_SIZE - offset); } p = kmap_atomic(page) + offset; n = __copy_from_iter(p, n, i); kunmap_atomic(p); copied += n; offset += n; } while (PageHighMem(page) && copied != bytes && n > 0); return copied; } EXPORT_SYMBOL(copy_page_from_iter_atomic); static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { const struct bio_vec *bvec, *end; if (!i->count) return; i->count -= size; size += i->iov_offset; for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { if (likely(size < bvec->bv_len)) break; size -= bvec->bv_len; } i->iov_offset = size; i->nr_segs -= bvec - i->bvec; i->bvec = bvec; } static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) { const struct iovec *iov, *end; if (!i->count) return; i->count -= size; size += i->iov_offset; // from beginning of current segment for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { if (likely(size < iov->iov_len)) break; size -= iov->iov_len; } i->iov_offset = size; i->nr_segs -= iov - iter_iov(i); i->__iov = iov; } void iov_iter_advance(struct iov_iter *i, size_t size) { if (unlikely(i->count < size)) size = i->count; if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { i->iov_offset += size; i->count -= size; } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { /* iovec and kvec have identical layouts */ iov_iter_iovec_advance(i, size); } else if (iov_iter_is_bvec(i)) { iov_iter_bvec_advance(i, size); } else if (iov_iter_is_discard(i)) { i->count -= size; } } EXPORT_SYMBOL(iov_iter_advance); void iov_iter_revert(struct iov_iter *i, size_t unroll) { if (!unroll) return; if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; if (unlikely(iov_iter_is_discard(i))) return; if (unroll <= i->iov_offset) { i->iov_offset -= unroll; return; } unroll -= i->iov_offset; if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { BUG(); /* We should never go beyond the start of the specified * range since we might then be straying into pages that * aren't pinned. */ } else if (iov_iter_is_bvec(i)) { const struct bio_vec *bvec = i->bvec; while (1) { size_t n = (--bvec)->bv_len; i->nr_segs++; if (unroll <= n) { i->bvec = bvec; i->iov_offset = n - unroll; return; } unroll -= n; } } else { /* same logics for iovec and kvec */ const struct iovec *iov = iter_iov(i); while (1) { size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { i->__iov = iov; i->iov_offset = n - unroll; return; } unroll -= n; } } } EXPORT_SYMBOL(iov_iter_revert); /* * Return the count of just the current iov_iter segment. */ size_t iov_iter_single_seg_count(const struct iov_iter *i) { if (i->nr_segs > 1) { if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) return min(i->count, iter_iov(i)->iov_len - i->iov_offset); if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); } return i->count; } EXPORT_SYMBOL(iov_iter_single_seg_count); void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec, unsigned long nr_segs, size_t count) { WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_KVEC, .data_source = direction, .kvec = kvec, .nr_segs = nr_segs, .iov_offset = 0, .count = count }; } EXPORT_SYMBOL(iov_iter_kvec); void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count) { WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter){ .iter_type = ITER_BVEC, .data_source = direction, .bvec = bvec, .nr_segs = nr_segs, .iov_offset = 0, .count = count }; } EXPORT_SYMBOL(iov_iter_bvec); /** * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray * @i: The iterator to initialise. * @direction: The direction of the transfer. * @xarray: The xarray to access. * @start: The start file position. * @count: The size of the I/O buffer in bytes. * * Set up an I/O iterator to either draw data out of the pages attached to an * inode or to inject data into those pages. The pages *must* be prevented * from evaporation, either by taking a ref on them or locking them by the * caller. */ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count) { BUG_ON(direction & ~1); *i = (struct iov_iter) { .iter_type = ITER_XARRAY, .data_source = direction, .xarray = xarray, .xarray_start = start, .count = count, .iov_offset = 0 }; } EXPORT_SYMBOL(iov_iter_xarray); /** * iov_iter_discard - Initialise an I/O iterator that discards data * @i: The iterator to initialise. * @direction: The direction of the transfer. * @count: The size of the I/O buffer in bytes. * * Set up an I/O iterator that just discards everything that's written to it. * It's only available as a READ iterator. */ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) { BUG_ON(direction != READ); *i = (struct iov_iter){ .iter_type = ITER_DISCARD, .data_source = false, .count = count, .iov_offset = 0 }; } EXPORT_SYMBOL(iov_iter_discard); static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { const struct iovec *iov = iter_iov(i); size_t size = i->count; size_t skip = i->iov_offset; do { size_t len = iov->iov_len - skip; if (len > size) len = size; if (len & len_mask) return false; if ((unsigned long)(iov->iov_base + skip) & addr_mask) return false; iov++; size -= len; skip = 0; } while (size); return true; } static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { const struct bio_vec *bvec = i->bvec; unsigned skip = i->iov_offset; size_t size = i->count; do { size_t len = bvec->bv_len; if (len > size) len = size; if (len & len_mask) return false; if ((unsigned long)(bvec->bv_offset + skip) & addr_mask) return false; bvec++; size -= len; skip = 0; } while (size); return true; } /** * iov_iter_is_aligned() - Check if the addresses and lengths of each segments * are aligned to the parameters. * * @i: &struct iov_iter to restore * @addr_mask: bit mask to check against the iov element's addresses * @len_mask: bit mask to check against the iov element's lengths * * Return: false if any addresses or lengths intersect with the provided masks */ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, unsigned len_mask) { if (likely(iter_is_ubuf(i))) { if (i->count & len_mask) return false; if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) return false; return true; } if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) return iov_iter_aligned_iovec(i, addr_mask, len_mask); if (iov_iter_is_bvec(i)) return iov_iter_aligned_bvec(i, addr_mask, len_mask); if (iov_iter_is_xarray(i)) { if (i->count & len_mask) return false; if ((i->xarray_start + i->iov_offset) & addr_mask) return false; } return true; } EXPORT_SYMBOL_GPL(iov_iter_is_aligned); static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) { const struct iovec *iov = iter_iov(i); unsigned long res = 0; size_t size = i->count; size_t skip = i->iov_offset; do { size_t len = iov->iov_len - skip; if (len) { res |= (unsigned long)iov->iov_base + skip; if (len > size) len = size; res |= len; size -= len; } iov++; skip = 0; } while (size); return res; } static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) { const struct bio_vec *bvec = i->bvec; unsigned res = 0; size_t size = i->count; unsigned skip = i->iov_offset; do { size_t len = bvec->bv_len - skip; res |= (unsigned long)bvec->bv_offset + skip; if (len > size) len = size; res |= len; bvec++; size -= len; skip = 0; } while (size); return res; } unsigned long iov_iter_alignment(const struct iov_iter *i) { if (likely(iter_is_ubuf(i))) { size_t size = i->count; if (size) return ((unsigned long)i->ubuf + i->iov_offset) | size; return 0; } /* iovec and kvec have identical layouts */ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) return iov_iter_alignment_iovec(i); if (iov_iter_is_bvec(i)) return iov_iter_alignment_bvec(i); if (iov_iter_is_xarray(i)) return (i->xarray_start + i->iov_offset) | i->count; return 0; } EXPORT_SYMBOL(iov_iter_alignment); unsigned long iov_iter_gap_alignment(const struct iov_iter *i) { unsigned long res = 0; unsigned long v = 0; size_t size = i->count; unsigned k; if (iter_is_ubuf(i)) return 0; if (WARN_ON(!iter_is_iovec(i))) return ~0U; for (k = 0; k < i->nr_segs; k++) { const struct iovec *iov = iter_iov(i) + k; if (iov->iov_len) { unsigned long base = (unsigned long)iov->iov_base; if (v) // if not the first one res |= base | v; // this start | previous end v = base + iov->iov_len; if (size <= iov->iov_len) break; size -= iov->iov_len; } } return res; } EXPORT_SYMBOL(iov_iter_gap_alignment); static int want_pages_array(struct page ***res, size_t size, size_t start, unsigned int maxpages) { unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); if (count > maxpages) count = maxpages; WARN_ON(!count); // caller should've prevented that if (!*res) { *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); if (!*res) return 0; } return count; } static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, pgoff_t index, unsigned int nr_pages) { XA_STATE(xas, xa, index); struct page *page; unsigned int ret = 0; rcu_read_lock(); for (page = xas_load(&xas); page; page = xas_next(&xas)) { if (xas_retry(&xas, page)) continue; /* Has the page moved or been split? */ if (unlikely(page != xas_reload(&xas))) { xas_reset(&xas); continue; } pages[ret] = find_subpage(page, xas.xa_index); get_page(pages[ret]); if (++ret == nr_pages) break; } rcu_read_unlock(); return ret; } static ssize_t iter_xarray_get_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned maxpages, size_t *_start_offset) { unsigned nr, offset, count; pgoff_t index; loff_t pos; pos = i->xarray_start + i->iov_offset; index = pos >> PAGE_SHIFT; offset = pos & ~PAGE_MASK; *_start_offset = offset; count = want_pages_array(pages, maxsize, offset, maxpages); if (!count) return -ENOMEM; nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); if (nr == 0) return 0; maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); i->iov_offset += maxsize; i->count -= maxsize; return maxsize; } /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) { size_t skip; long k; if (iter_is_ubuf(i)) return (unsigned long)i->ubuf + i->iov_offset; for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { const struct iovec *iov = iter_iov(i) + k; size_t len = iov->iov_len - skip; if (unlikely(!len)) continue; if (*size > len) *size = len; return (unsigned long)iov->iov_base + skip; } BUG(); // if it had been empty, we wouldn't get called } /* must be done on non-empty ITER_BVEC one */ static struct page *first_bvec_segment(const struct iov_iter *i, size_t *size, size_t *start) { struct page *page; size_t skip = i->iov_offset, len; len = i->bvec->bv_len - skip; if (*size > len) *size = len; skip += i->bvec->bv_offset; page = i->bvec->bv_page + skip / PAGE_SIZE; *start = skip % PAGE_SIZE; return page; } static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, size_t *start) { unsigned int n, gup_flags = 0; if (maxsize > i->count) maxsize = i->count; if (!maxsize) return 0; if (maxsize > MAX_RW_COUNT) maxsize = MAX_RW_COUNT; if (likely(user_backed_iter(i))) { unsigned long addr; int res; if (iov_iter_rw(i) != WRITE) gup_flags |= FOLL_WRITE; if (i->nofault) gup_flags |= FOLL_NOFAULT; addr = first_iovec_segment(i, &maxsize); *start = addr % PAGE_SIZE; addr &= PAGE_MASK; n = want_pages_array(pages, maxsize, *start, maxpages); if (!n) return -ENOMEM; res = get_user_pages_fast(addr, n, gup_flags, *pages); if (unlikely(res <= 0)) return res; maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); iov_iter_advance(i, maxsize); return maxsize; } if (iov_iter_is_bvec(i)) { struct page **p; struct page *page; page = first_bvec_segment(i, &maxsize, start); n = want_pages_array(pages, maxsize, *start, maxpages); if (!n) return -ENOMEM; p = *pages; for (int k = 0; k < n; k++) get_page(p[k] = page + k); maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); i->count -= maxsize; i->iov_offset += maxsize; if (i->iov_offset == i->bvec->bv_len) { i->iov_offset = 0; i->bvec++; i->nr_segs--; } return maxsize; } if (iov_iter_is_xarray(i)) return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); return -EFAULT; } ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) { if (!maxpages) return 0; BUG_ON(!pages); return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); } EXPORT_SYMBOL(iov_iter_get_pages2); ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start) { ssize_t len; *pages = NULL; len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); if (len <= 0) { kvfree(*pages); *pages = NULL; } return len; } EXPORT_SYMBOL(iov_iter_get_pages_alloc2); static int iov_npages(const struct iov_iter *i, int maxpages) { size_t skip = i->iov_offset, size = i->count; const struct iovec *p; int npages = 0; for (p = iter_iov(i); size; skip = 0, p++) { unsigned offs = offset_in_page(p->iov_base + skip); size_t len = min(p->iov_len - skip, size); if (len) { size -= len; npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); if (unlikely(npages > maxpages)) return maxpages; } } return npages; } static int bvec_npages(const struct iov_iter *i, int maxpages) { size_t skip = i->iov_offset, size = i->count; const struct bio_vec *p; int npages = 0; for (p = i->bvec; size; skip = 0, p++) { unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; size_t len = min(p->bv_len - skip, size); size -= len; npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); if (unlikely(npages > maxpages)) return maxpages; } return npages; } int iov_iter_npages(const struct iov_iter *i, int maxpages) { if (unlikely(!i->count)) return 0; if (likely(iter_is_ubuf(i))) { unsigned offs = offset_in_page(i->ubuf + i->iov_offset); int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); return min(npages, maxpages); } /* iovec and kvec have identical layouts */ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) return iov_npages(i, maxpages); if (iov_iter_is_bvec(i)) return bvec_npages(i, maxpages); if (iov_iter_is_xarray(i)) { unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); return min(npages, maxpages); } return 0; } EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), flags); else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) /* iovec and kvec have identical layout */ return new->__iov = kmemdup(new->__iov, new->nr_segs * sizeof(struct iovec), flags); return NULL; } EXPORT_SYMBOL(dup_iter); static __noclone int copy_compat_iovec_from_user(struct iovec *iov, const struct iovec __user *uvec, u32 nr_segs) { const struct compat_iovec __user *uiov = (const struct compat_iovec __user *)uvec; int ret = -EFAULT; u32 i; if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT; for (i = 0; i < nr_segs; i++) { compat_uptr_t buf; compat_ssize_t len; unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); /* check for compat_size_t not fitting in compat_ssize_t .. */ if (len < 0) { ret = -EINVAL; goto uaccess_end; } iov[i].iov_base = compat_ptr(buf); iov[i].iov_len = len; } ret = 0; uaccess_end: user_access_end(); return ret; } static __noclone int copy_iovec_from_user(struct iovec *iov, const struct iovec __user *uiov, unsigned long nr_segs) { int ret = -EFAULT; if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) return -EFAULT; do { void __user *buf; ssize_t len; unsafe_get_user(len, &uiov->iov_len, uaccess_end); unsafe_get_user(buf, &uiov->iov_base, uaccess_end); /* check for size_t not fitting in ssize_t .. */ if (unlikely(len < 0)) { ret = -EINVAL; goto uaccess_end; } iov->iov_base = buf; iov->iov_len = len; uiov++; iov++; } while (--nr_segs); ret = 0; uaccess_end: user_access_end(); return ret; } struct iovec *iovec_from_user(const struct iovec __user *uvec, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_iov, bool compat) { struct iovec *iov = fast_iov; int ret; /* * SuS says "The readv() function *may* fail if the iovcnt argument was * less than or equal to 0, or greater than {IOV_MAX}. Linux has * traditionally returned zero for zero segments, so... */ if (nr_segs == 0) return iov; if (nr_segs > UIO_MAXIOV) return ERR_PTR(-EINVAL); if (nr_segs > fast_segs) { iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); if (!iov) return ERR_PTR(-ENOMEM); } if (unlikely(compat)) ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); else ret = copy_iovec_from_user(iov, uvec, nr_segs); if (ret) { if (iov != fast_iov) kfree(iov); return ERR_PTR(ret); } return iov; } /* * Single segment iovec supplied by the user, import it as ITER_UBUF. */ static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec, struct iovec **iovp, struct iov_iter *i, bool compat) { struct iovec *iov = *iovp; ssize_t ret; if (compat) ret = copy_compat_iovec_from_user(iov, uvec, 1); else ret = copy_iovec_from_user(iov, uvec, 1); if (unlikely(ret)) return ret; ret = import_ubuf(type, iov->iov_base, iov->iov_len, i); if (unlikely(ret)) return ret; *iovp = NULL; return i->count; } ssize_t __import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i, bool compat) { ssize_t total_len = 0; unsigned long seg; struct iovec *iov; if (nr_segs == 1) return __import_iovec_ubuf(type, uvec, iovp, i, compat); iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); if (IS_ERR(iov)) { *iovp = NULL; return PTR_ERR(iov); } /* * According to the Single Unix Specification we should return EINVAL if * an element length is < 0 when cast to ssize_t or if the total length * would overflow the ssize_t return value of the system call. * * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the * overflow case. */ for (seg = 0; seg < nr_segs; seg++) { ssize_t len = (ssize_t)iov[seg].iov_len; if (!access_ok(iov[seg].iov_base, len)) { if (iov != *iovp) kfree(iov); *iovp = NULL; return -EFAULT; } if (len > MAX_RW_COUNT - total_len) { len = MAX_RW_COUNT - total_len; iov[seg].iov_len = len; } total_len += len; } iov_iter_init(i, type, iov, nr_segs, total_len); if (iov == *iovp) *iovp = NULL; else *iovp = iov; return total_len; } /** * import_iovec() - Copy an array of &struct iovec from userspace * into the kernel, check that it is valid, and initialize a new * &struct iov_iter iterator to access it. * * @type: One of %READ or %WRITE. * @uvec: Pointer to the userspace array. * @nr_segs: Number of elements in userspace array. * @fast_segs: Number of elements in @iov. * @iovp: (input and output parameter) Pointer to pointer to (usually small * on-stack) kernel array. * @i: Pointer to iterator that will be initialized on success. * * If the array pointed to by *@iov is large enough to hold all @nr_segs, * then this function places %NULL in *@iov on return. Otherwise, a new * array will be allocated and the result placed in *@iov. This means that * the caller may call kfree() on *@iov regardless of whether the small * on-stack array was used or not (and regardless of whether this function * returns an error or not). * * Return: Negative error code on error, bytes imported on success */ ssize_t import_iovec(int type, const struct iovec __user *uvec, unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, struct iov_iter *i) { return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, in_compat_syscall()); } EXPORT_SYMBOL(import_iovec); int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i) { if (len > MAX_RW_COUNT) len = MAX_RW_COUNT; if (unlikely(!access_ok(buf, len))) return -EFAULT; iov_iter_ubuf(i, rw, buf, len); return 0; } EXPORT_SYMBOL_GPL(import_ubuf); /** * iov_iter_restore() - Restore a &struct iov_iter to the same state as when * iov_iter_save_state() was called. * * @i: &struct iov_iter to restore * @state: state to restore from * * Used after iov_iter_save_state() to bring restore @i, if operations may * have advanced it. * * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC */ void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) { if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) && !iter_is_ubuf(i)) && !iov_iter_is_kvec(i)) return; i->iov_offset = state->iov_offset; i->count = state->count; if (iter_is_ubuf(i)) return; /* * For the *vec iters, nr_segs + iov is constant - if we increment * the vec, then we also decrement the nr_segs count. Hence we don't * need to track both of these, just one is enough and we can deduct * the other from that. ITER_KVEC and ITER_IOVEC are the same struct * size, so we can just increment the iov pointer as they are unionzed. * ITER_BVEC _may_ be the same size on some archs, but on others it is * not. Be safe and handle it separately. */ BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); if (iov_iter_is_bvec(i)) i->bvec -= state->nr_segs - i->nr_segs; else i->__iov -= state->nr_segs - i->nr_segs; i->nr_segs = state->nr_segs; } /* * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not * get references on the pages, nor does it get a pin on them. */ static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) { struct page *page, **p; unsigned int nr = 0, offset; loff_t pos = i->xarray_start + i->iov_offset; pgoff_t index = pos >> PAGE_SHIFT; XA_STATE(xas, i->xarray, index); offset = pos & ~PAGE_MASK; *offset0 = offset; maxpages = want_pages_array(pages, maxsize, offset, maxpages); if (!maxpages) return -ENOMEM; p = *pages; rcu_read_lock(); for (page = xas_load(&xas); page; page = xas_next(&xas)) { if (xas_retry(&xas, page)) continue; /* Has the page moved or been split? */ if (unlikely(page != xas_reload(&xas))) { xas_reset(&xas); continue; } p[nr++] = find_subpage(page, xas.xa_index); if (nr == maxpages) break; } rcu_read_unlock(); maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); iov_iter_advance(i, maxsize); return maxsize; } /* * Extract a list of contiguous pages from an ITER_BVEC iterator. This does * not get references on the pages, nor does it get a pin on them. */ static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) { struct page **p, *page; size_t skip = i->iov_offset, offset, size; int k; for (;;) { if (i->nr_segs == 0) return 0; size = min(maxsize, i->bvec->bv_len - skip); if (size) break; i->iov_offset = 0; i->nr_segs--; i->bvec++; skip = 0; } skip += i->bvec->bv_offset; page = i->bvec->bv_page + skip / PAGE_SIZE; offset = skip % PAGE_SIZE; *offset0 = offset; maxpages = want_pages_array(pages, size, offset, maxpages); if (!maxpages) return -ENOMEM; p = *pages; for (k = 0; k < maxpages; k++) p[k] = page + k; size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); iov_iter_advance(i, size); return size; } /* * Extract a list of virtually contiguous pages from an ITER_KVEC iterator. * This does not get references on the pages, nor does it get a pin on them. */ static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) { struct page **p, *page; const void *kaddr; size_t skip = i->iov_offset, offset, len, size; int k; for (;;) { if (i->nr_segs == 0) return 0; size = min(maxsize, i->kvec->iov_len - skip); if (size) break; i->iov_offset = 0; i->nr_segs--; i->kvec++; skip = 0; } kaddr = i->kvec->iov_base + skip; offset = (unsigned long)kaddr & ~PAGE_MASK; *offset0 = offset; maxpages = want_pages_array(pages, size, offset, maxpages); if (!maxpages) return -ENOMEM; p = *pages; kaddr -= offset; len = offset + size; for (k = 0; k < maxpages; k++) { size_t seg = min_t(size_t, len, PAGE_SIZE); if (is_vmalloc_or_module_addr(kaddr)) page = vmalloc_to_page(kaddr); else page = virt_to_page(kaddr); p[k] = page; len -= seg; kaddr += PAGE_SIZE; } size = min_t(size_t, size, maxpages * PAGE_SIZE - offset); iov_iter_advance(i, size); return size; } /* * Extract a list of contiguous pages from a user iterator and get a pin on * each of them. This should only be used if the iterator is user-backed * (IOBUF/UBUF). * * It does not get refs on the pages, but the pages must be unpinned by the * caller once the transfer is complete. * * This is safe to be used where background IO/DMA *is* going to be modifying * the buffer; using a pin rather than a ref makes forces fork() to give the * child a copy of the page. */ static ssize_t iov_iter_extract_user_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) { unsigned long addr; unsigned int gup_flags = 0; size_t offset; int res; if (i->data_source == ITER_DEST) gup_flags |= FOLL_WRITE; if (extraction_flags & ITER_ALLOW_P2PDMA) gup_flags |= FOLL_PCI_P2PDMA; if (i->nofault) gup_flags |= FOLL_NOFAULT; addr = first_iovec_segment(i, &maxsize); *offset0 = offset = addr % PAGE_SIZE; addr &= PAGE_MASK; maxpages = want_pages_array(pages, maxsize, offset, maxpages); if (!maxpages) return -ENOMEM; res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages); if (unlikely(res <= 0)) return res; maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset); iov_iter_advance(i, maxsize); return maxsize; } /** * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator * @i: The iterator to extract from * @pages: Where to return the list of pages * @maxsize: The maximum amount of iterator to extract * @maxpages: The maximum size of the list of pages * @extraction_flags: Flags to qualify request * @offset0: Where to return the starting offset into (*@pages)[0] * * Extract a list of contiguous pages from the current point of the iterator, * advancing the iterator. The maximum number of pages and the maximum amount * of page contents can be set. * * If *@pages is NULL, a page list will be allocated to the required size and * *@pages will be set to its base. If *@pages is not NULL, it will be assumed * that the caller allocated a page list at least @maxpages in size and this * will be filled in. * * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA * be allowed on the pages extracted. * * The iov_iter_extract_will_pin() function can be used to query how cleanup * should be performed. * * Extra refs or pins on the pages may be obtained as follows: * * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be * added to the pages, but refs will not be taken. * iov_iter_extract_will_pin() will return true. * * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are * merely listed; no extra refs or pins are obtained. * iov_iter_extract_will_pin() will return 0. * * Note also: * * (*) Use with ITER_DISCARD is not supported as that has no content. * * On success, the function sets *@pages to the new pagelist, if allocated, and * sets *offset0 to the offset into the first page. * * It may also return -ENOMEM and -EFAULT. */ ssize_t iov_iter_extract_pages(struct iov_iter *i, struct page ***pages, size_t maxsize, unsigned int maxpages, iov_iter_extraction_t extraction_flags, size_t *offset0) { maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT); if (!maxsize) return 0; if (likely(user_backed_iter(i))) return iov_iter_extract_user_pages(i, pages, maxsize, maxpages, extraction_flags, offset0); if (iov_iter_is_kvec(i)) return iov_iter_extract_kvec_pages(i, pages, maxsize, maxpages, extraction_flags, offset0); if (iov_iter_is_bvec(i)) return iov_iter_extract_bvec_pages(i, pages, maxsize, maxpages, extraction_flags, offset0); if (iov_iter_is_xarray(i)) return iov_iter_extract_xarray_pages(i, pages, maxsize, maxpages, extraction_flags, offset0); return -EFAULT; } EXPORT_SYMBOL_GPL(iov_iter_extract_pages);
9 2 2 1 18 7 17 10 1 1 5 1 4 5 21 21 21 20 20 20 5 5 5 1 4 1 3 8 8 7 4 9 1 3 25 25 3 28 10 4 16 20 16 4 4 20 6 4 4 4 8 2 14 24 20 24 21 6 19 2 21 6 6 21 2 2 2 21 18 18 18 17 14 3 4 11 3 2 1 11 3 3 11 11 14 8 30 1595 24 1598 11 1 1 5 9 1 6 2 237 8 8 21 21 17 17 11 238 236 1 239 1 1 238 237 237 236 237 1 236 236 238 238 238 235 236 235 237 52 15 1 40 3 1 4 3 1 4 5 1 4 4 1 1 2 3 2 3 3 1 3 1 21 21 9 6 3 8 27 27 15 15 15 5 7 15 1 61 60 4 1 3 3 3 151 1351 1352 104 1 1239 1237 20 17 1 1 1 1 1 16 1 1240 21 3 3 20 1 16 1 2 1463 1340 119 1 1345 2 1 23 43 5 111 105 1350 1456 216 1247 1 1241 167 11 206 62 154 5 210 1 209 13 3 4 13 1336 1429 1430 2 1422 2 9 1412 10 1 50 1 1346 1399 1394 5 6 6 45 11 1466 1462 1466 1464 1 1 6 7 3 2 4 4 8 5 1 5 1 4 8 1 1 9 16 12 12 17 1 16 14 1 13 17 17 17 8 10 17 1 22 2 21 21 21 9 111 176 1 176 176 1 1 2 153 22 1400 2 1394 1 2 26 22 1 2 1 13 10 2 1 1 2 4 1 6 4 8 2 3 7 10 4 1 1 2 2 1 3 1 2 5 6 3 3 3 1 1 2 4 4 4 1 1 2 1 33 16 2 2 2 2 1 3 1 4 149 39 4 43 111 1 12 1 1 133 1 14 13 1 1 1 3 108 2 2 2 2 2 4 1 1 1 1 2 4 1 2 1 2 33 2 4 2 11 2 1 1 3 2 1 1 1 1 1 1 1 1 1 1 1 6 2 2 2 1 3 1 3 4 109 148 2 2 1 5 5 24 24 2 2 1 48 20 1 1 22 4 7 2 2 6 6 6 1306 1227 99 6 15 3 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 // SPDX-License-Identifier: GPL-2.0-or-later /* * TUN - Universal TUN/TAP device driver. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> * * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ */ /* * Changes: * * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 * Add TUNSETLINK ioctl to set the link encapsulation * * Mark Smith <markzzzsmith@yahoo.com.au> * Use eth_random_addr() for tap MAC address. * * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 * Fixes in packet dropping, queue length setting and queue wakeup. * Increased default tx queue length. * Added ethtool API. * Minor cleanups * * Daniel Podlejski <underley@underley.eu.org> * Modifications for 2.3.99-pre5 kernel. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "tun" #define DRV_VERSION "1.6" #define DRV_DESCRIPTION "Universal TUN/TAP device driver" #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/miscdevice.h> #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/compat.h> #include <linux/if.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_tun.h> #include <linux/if_vlan.h> #include <linux/crc32.h> #include <linux/math.h> #include <linux/nsproxy.h> #include <linux/virtio_net.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/sock.h> #include <net/xdp.h> #include <net/ip_tunnels.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/skb_array.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/mutex.h> #include <linux/ieee802154.h> #include <linux/if_ltalk.h> #include <uapi/linux/if_fddi.h> #include <uapi/linux/if_hippi.h> #include <uapi/linux/if_fc.h> #include <net/ax25.h> #include <net/rose.h> #include <net/6lowpan.h> #include <net/rps.h> #include <linux/uaccess.h> #include <linux/proc_fs.h> static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, * overload it to mean fasync when stored there. */ #define TUN_FASYNC IFF_ATTACH_QUEUE /* High bits in flags field are unused. */ #define TUN_VNET_LE 0x80000000 #define TUN_VNET_BE 0x40000000 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) #define GOODCOPY_LEN 128 #define FLT_EXACT_COUNT 8 struct tap_filter { unsigned int count; /* Number of addrs. Zero means disabled */ u32 mask[2]; /* Mask of the hashed addrs */ unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; }; /* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal * to max number of VCPUs in guest. */ #define MAX_TAP_QUEUES 256 #define MAX_TAP_FLOWS 4096 #define TUN_FLOW_EXPIRE (3 * HZ) /* A tun_file connects an open character device to a tuntap netdevice. It * also contains all socket related structures (except sock_fprog and tap_filter) * to serve as one transmit queue for tuntap device. The sock_fprog and * tap_filter were kept in tun_struct since they were used for filtering for the * netdevice not for a specific queue (at least I didn't see the requirement for * this). * * RCU usage: * The tun_file and tun_struct are loosely coupled, the pointer from one to the * other can only be read while rcu_read_lock or rtnl_lock is held. */ struct tun_file { struct sock sk; struct socket socket; struct tun_struct __rcu *tun; struct fasync_struct *fasync; /* only used for fasnyc */ unsigned int flags; union { u16 queue_index; unsigned int ifindex; }; struct napi_struct napi; bool napi_enabled; bool napi_frags_enabled; struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; }; struct tun_page { struct page *page; int count; }; struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; struct tun_struct *tun; u32 rxhash; u32 rps_rxhash; int queue_index; unsigned long updated ____cacheline_aligned_in_smp; }; #define TUN_NUM_FLOW_ENTRIES 1024 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1) struct tun_prog { struct rcu_head rcu; struct bpf_prog *prog; }; /* Since the socket were moved to tun_file, to preserve the behavior of persist * device, socket filter, sndbuf and vnet header size were restore when the * file were attached to a persist device. */ struct tun_struct { struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; unsigned int numqueues; unsigned int flags; kuid_t owner; kgid_t group; struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4) int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; struct sock_fprog fprog; /* protected by rtnl lock */ bool filter_attached; u32 msg_enable; spinlock_t lock; struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; struct timer_list flow_gc_timer; unsigned long ageing_time; unsigned int numdisabled; struct list_head disabled; void *security; u32 flow_count; u32 rx_batched; atomic_long_t rx_frame_errors; struct bpf_prog __rcu *xdp_prog; struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; /* init args */ struct file *file; struct ifreq *ifr; }; struct veth { __be16 h_vlan_proto; __be16 h_vlan_TCI; }; static void tun_flow_init(struct tun_struct *tun); static void tun_flow_uninit(struct tun_struct *tun); static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; struct sk_buff *skb; int received = 0; __skb_queue_head_init(&process_queue); spin_lock(&queue->lock); skb_queue_splice_tail_init(queue, &process_queue); spin_unlock(&queue->lock); while (received < budget && (skb = __skb_dequeue(&process_queue))) { napi_gro_receive(napi, skb); ++received; } if (!skb_queue_empty(&process_queue)) { spin_lock(&queue->lock); skb_queue_splice(&process_queue, queue); spin_unlock(&queue->lock); } return received; } static int tun_napi_poll(struct napi_struct *napi, int budget) { unsigned int received; received = tun_napi_receive(napi, budget); if (received < budget) napi_complete_done(napi, received); return received; } static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile, bool napi_en, bool napi_frags) { tfile->napi_enabled = napi_en; tfile->napi_frags_enabled = napi_en && napi_frags; if (napi_en) { netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll); napi_enable(&tfile->napi); } } static void tun_napi_enable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_enable(&tfile->napi); } static void tun_napi_disable(struct tun_file *tfile) { if (tfile->napi_enabled) napi_disable(&tfile->napi); } static void tun_napi_del(struct tun_file *tfile) { if (tfile->napi_enabled) netif_napi_del(&tfile->napi); } static bool tun_napi_frags_enabled(const struct tun_file *tfile) { return tfile->napi_frags_enabled; } #ifdef CONFIG_TUN_VNET_CROSS_LE static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) { return tun->flags & TUN_VNET_BE ? false : virtio_legacy_is_little_endian(); } static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) { int be = !!(tun->flags & TUN_VNET_BE); if (put_user(be, argp)) return -EFAULT; return 0; } static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) { int be; if (get_user(be, argp)) return -EFAULT; if (be) tun->flags |= TUN_VNET_BE; else tun->flags &= ~TUN_VNET_BE; return 0; } #else static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) { return virtio_legacy_is_little_endian(); } static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) { return -EINVAL; } static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) { return -EINVAL; } #endif /* CONFIG_TUN_VNET_CROSS_LE */ static inline bool tun_is_little_endian(struct tun_struct *tun) { return tun->flags & TUN_VNET_LE || tun_legacy_is_little_endian(tun); } static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) { return __virtio16_to_cpu(tun_is_little_endian(tun), val); } static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) { return __cpu_to_virtio16(tun_is_little_endian(tun), val); } static inline u32 tun_hashfn(u32 rxhash) { return rxhash & TUN_MASK_FLOW_ENTRIES; } static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) { struct tun_flow_entry *e; hlist_for_each_entry_rcu(e, head, hash_link) { if (e->rxhash == rxhash) return e; } return NULL; } static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, struct hlist_head *head, u32 rxhash, u16 queue_index) { struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC); if (e) { netif_info(tun, tx_queued, tun->dev, "create flow: hash %u index %u\n", rxhash, queue_index); e->updated = jiffies; e->rxhash = rxhash; e->rps_rxhash = 0; e->queue_index = queue_index; e->tun = tun; hlist_add_head_rcu(&e->hash_link, head); ++tun->flow_count; } return e; } static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) { netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n", e->rxhash, e->queue_index); hlist_del_rcu(&e->hash_link); kfree_rcu(e, rcu); --tun->flow_count; } static void tun_flow_flush(struct tun_struct *tun) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) tun_flow_delete(tun, e); } spin_unlock_bh(&tun->lock); } static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) { int i; spin_lock_bh(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { if (e->queue_index == queue_index) tun_flow_delete(tun, e); } } spin_unlock_bh(&tun->lock); } static void tun_flow_cleanup(struct timer_list *t) { struct tun_struct *tun = from_timer(tun, t, flow_gc_timer); unsigned long delay = tun->ageing_time; unsigned long next_timer = jiffies + delay; unsigned long count = 0; int i; spin_lock(&tun->lock); for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { struct tun_flow_entry *e; struct hlist_node *n; hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) { unsigned long this_timer; this_timer = e->updated + delay; if (time_before_eq(this_timer, jiffies)) { tun_flow_delete(tun, e); continue; } count++; if (time_before(this_timer, next_timer)) next_timer = this_timer; } } if (count) mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); spin_unlock(&tun->lock); } static void tun_flow_update(struct tun_struct *tun, u32 rxhash, struct tun_file *tfile) { struct hlist_head *head; struct tun_flow_entry *e; unsigned long delay = tun->ageing_time; u16 queue_index = tfile->queue_index; head = &tun->flows[tun_hashfn(rxhash)]; rcu_read_lock(); e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ if (READ_ONCE(e->queue_index) != queue_index) WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); } else { spin_lock_bh(&tun->lock); if (!tun_flow_find(head, rxhash) && tun->flow_count < MAX_TAP_FLOWS) tun_flow_create(tun, head, rxhash, queue_index); if (!timer_pending(&tun->flow_gc_timer)) mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + delay)); spin_unlock_bh(&tun->lock); } rcu_read_unlock(); } /* Save the hash received in the stack receive path and update the * flow_hash table accordingly. */ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash) { if (unlikely(e->rps_rxhash != hash)) e->rps_rxhash = hash; } /* We try to identify a flow through its rxhash. The reason that * we do not check rxq no. is because some cards(e.g 82599), chooses * the rxq based on the txq where the last packet of the flow comes. As * the userspace application move between processors, we may get a * different rxq no. here. */ static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_flow_entry *e; u32 txq, numqueues; numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); if (e) { tun_flow_save_rps_rxhash(e, txq); txq = e->queue_index; } else { txq = reciprocal_scale(txq, numqueues); } return txq; } static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb) { struct tun_prog *prog; u32 numqueues; u16 ret = 0; numqueues = READ_ONCE(tun->numqueues); if (!numqueues) return 0; prog = rcu_dereference(tun->steering_prog); if (prog) ret = bpf_prog_run_clear_cb(prog->prog, skb); return ret % numqueues; } static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { struct tun_struct *tun = netdev_priv(dev); u16 ret; rcu_read_lock(); if (rcu_dereference(tun->steering_prog)) ret = tun_ebpf_select_queue(tun, skb); else ret = tun_automq_select_queue(tun, skb); rcu_read_unlock(); return ret; } static inline bool tun_not_capable(struct tun_struct *tun) { const struct cred *cred = current_cred(); struct net *net = dev_net(tun->dev); return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); } static void tun_set_real_num_queues(struct tun_struct *tun) { netif_set_real_num_tx_queues(tun->dev, tun->numqueues); netif_set_real_num_rx_queues(tun->dev, tun->numqueues); } static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile) { tfile->detached = tun; list_add_tail(&tfile->next, &tun->disabled); ++tun->numdisabled; } static struct tun_struct *tun_enable_queue(struct tun_file *tfile) { struct tun_struct *tun = tfile->detached; tfile->detached = NULL; list_del_init(&tfile->next); --tun->numdisabled; return tun; } void tun_ptr_free(void *ptr) { if (!ptr) return; if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); xdp_return_frame(xdpf); } else { __skb_array_destroy_skb(ptr); } } EXPORT_SYMBOL_GPL(tun_ptr_free); static void tun_queue_purge(struct tun_file *tfile) { void *ptr; while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); } static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; struct tun_struct *tun; tun = rtnl_dereference(tfile->tun); if (tun && clean) { if (!tfile->detached) tun_napi_disable(tfile); tun_napi_del(tfile); } if (tun && !tfile->detached) { u16 index = tfile->queue_index; BUG_ON(index >= tun->numqueues); rcu_assign_pointer(tun->tfiles[index], tun->tfiles[tun->numqueues - 1]); ntfile = rtnl_dereference(tun->tfiles[index]); ntfile->queue_index = index; ntfile->xdp_rxq.queue_index = index; rcu_assign_pointer(tun->tfiles[tun->numqueues - 1], NULL); --tun->numqueues; if (clean) { RCU_INIT_POINTER(tfile->tun, NULL); sock_put(&tfile->sk); } else { tun_disable_queue(tun, tfile); tun_napi_disable(tfile); } synchronize_net(); tun_flow_delete_by_queue(tun, tun->numqueues + 1); /* Drop read queue */ tun_queue_purge(tfile); tun_set_real_num_queues(tun); } else if (tfile->detached && clean) { tun = tun_enable_queue(tfile); sock_put(&tfile->sk); } if (clean) { if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { netif_carrier_off(tun->dev); if (!(tun->flags & IFF_PERSIST) && tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } if (tun) xdp_rxq_info_unreg(&tfile->xdp_rxq); ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); } } static void tun_detach(struct tun_file *tfile, bool clean) { struct tun_struct *tun; struct net_device *dev; rtnl_lock(); tun = rtnl_dereference(tfile->tun); dev = tun ? tun->dev : NULL; __tun_detach(tfile, clean); if (dev) netdev_state_change(dev); rtnl_unlock(); if (clean) sock_put(&tfile->sk); } static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); BUG_ON(!tfile); tun_napi_disable(tfile); tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); --tun->numqueues; } list_for_each_entry(tfile, &tun->disabled, next) { tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN; tfile->socket.sk->sk_data_ready(tfile->socket.sk); RCU_INIT_POINTER(tfile->tun, NULL); } BUG_ON(tun->numqueues != 0); synchronize_net(); for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tun_napi_del(tfile); /* Drop read queue */ tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_napi_del(tfile); tun_enable_queue(tfile); tun_queue_purge(tfile); xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter, bool napi, bool napi_frags, bool publish_tun) { struct tun_file *tfile = file->private_data; struct net_device *dev = tun->dev; int err; err = security_tun_dev_attach(tfile->socket.sk, tun->security); if (err < 0) goto out; err = -EINVAL; if (rtnl_dereference(tfile->tun) && !tfile->detached) goto out; err = -EBUSY; if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1) goto out; err = -E2BIG; if (!tfile->detached && tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES) goto out; err = 0; /* Re-attach the filter to persist device */ if (!skip_filter && (tun->filter_attached == true)) { lock_sock(tfile->socket.sk); err = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (!err) goto out; } if (!tfile->detached && ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; if (tfile->detached) { /* Re-attach detached tfile, updating XDP queue_index */ WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); if (tfile->xdp_rxq.queue_index != tfile->queue_index) tfile->xdp_rxq.queue_index = tfile->queue_index; } else { /* Setup XDP RX-queue info, for new tfile getting attached */ err = xdp_rxq_info_reg(&tfile->xdp_rxq, tun->dev, tfile->queue_index, 0); if (err < 0) goto out; err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err < 0) { xdp_rxq_info_unreg(&tfile->xdp_rxq); goto out; } err = 0; } if (tfile->detached) { tun_enable_queue(tfile); tun_napi_enable(tfile); } else { sock_hold(&tfile->sk); tun_napi_init(tun, tfile, napi, napi_frags); } if (rtnl_dereference(tun->xdp_prog)) sock_set_flag(&tfile->sk, SOCK_XDP); /* device is allowed to go away first, so no need to hold extra * refcnt. */ /* Publish tfile->tun and tun->tfiles only after we've fully * initialized tfile; otherwise we risk using half-initialized * object. */ if (publish_tun) rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; tun_set_real_num_queues(tun); out: return err; } static struct tun_struct *tun_get(struct tun_file *tfile) { struct tun_struct *tun; rcu_read_lock(); tun = rcu_dereference(tfile->tun); if (tun) dev_hold(tun->dev); rcu_read_unlock(); return tun; } static void tun_put(struct tun_struct *tun) { dev_put(tun->dev); } /* TAP filtering */ static void addr_hash_set(u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; mask[n >> 5] |= (1 << (n & 31)); } static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) { int n = ether_crc(ETH_ALEN, addr) >> 26; return mask[n >> 5] & (1 << (n & 31)); } static int update_filter(struct tap_filter *filter, void __user *arg) { struct { u8 u[ETH_ALEN]; } *addr; struct tun_filter uf; int err, alen, n, nexact; if (copy_from_user(&uf, arg, sizeof(uf))) return -EFAULT; if (!uf.count) { /* Disabled */ filter->count = 0; return 0; } alen = ETH_ALEN * uf.count; addr = memdup_user(arg + sizeof(uf), alen); if (IS_ERR(addr)) return PTR_ERR(addr); /* The filter is updated without holding any locks. Which is * perfectly safe. We disable it first and in the worst * case we'll accept a few undesired packets. */ filter->count = 0; wmb(); /* Use first set of addresses as an exact filter */ for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) memcpy(filter->addr[n], addr[n].u, ETH_ALEN); nexact = n; /* Remaining multicast addresses are hashed, * unicast will leave the filter disabled. */ memset(filter->mask, 0, sizeof(filter->mask)); for (; n < uf.count; n++) { if (!is_multicast_ether_addr(addr[n].u)) { err = 0; /* no filter */ goto free_addr; } addr_hash_set(filter->mask, addr[n].u); } /* For ALLMULTI just set the mask to all ones. * This overrides the mask populated above. */ if ((uf.flags & TUN_FLT_ALLMULTI)) memset(filter->mask, ~0, sizeof(filter->mask)); /* Now enable the filter */ wmb(); filter->count = nexact; /* Return the number of exact filters */ err = nexact; free_addr: kfree(addr); return err; } /* Returns: 0 - drop, !=0 - accept */ static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) { /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect * at this point. */ struct ethhdr *eh = (struct ethhdr *) skb->data; int i; /* Exact match */ for (i = 0; i < filter->count; i++) if (ether_addr_equal(eh->h_dest, filter->addr[i])) return 1; /* Inexact match (multicast only) */ if (is_multicast_ether_addr(eh->h_dest)) return addr_hash_test(filter->mask, eh->h_dest); return 0; } /* * Checks whether the packet is accepted or not. * Returns: 0 - drop, !=0 - accept */ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) { if (!filter->count) return 1; return run_filter(filter, skb); } /* Network device part of the driver */ static const struct ethtool_ops tun_ethtool_ops; static int tun_net_init(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); struct ifreq *ifr = tun->ifr; int err; spin_lock_init(&tun->lock); err = security_tun_dev_alloc_security(&tun->security); if (err < 0) return err; tun_flow_init(tun); dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features | NETIF_F_LLTX; dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, false); if (err < 0) { tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); return err; } return 0; } /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { tun_detach_all(dev); } /* Net device open. */ static int tun_net_open(struct net_device *dev) { netif_tx_start_all_queues(dev); return 0; } /* Net device close. */ static int tun_net_close(struct net_device *dev) { netif_tx_stop_all_queues(dev); return 0; } /* Net device start xmit */ static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb) { #ifdef CONFIG_RPS if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) { /* Select queue was not called for the skbuff, so we extract the * RPS hash and save it into the flow_table here. */ struct tun_flow_entry *e; __u32 rxhash; rxhash = __skb_get_hash_symmetric(skb); e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash); if (e) tun_flow_save_rps_rxhash(e, rxhash); } #endif } static unsigned int run_ebpf_filter(struct tun_struct *tun, struct sk_buff *skb, int len) { struct tun_prog *prog = rcu_dereference(tun->filter_prog); if (prog) len = bpf_prog_run_clear_cb(prog->prog, skb); return len; } /* Net device start xmit */ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); enum skb_drop_reason drop_reason; int txq = skb->queue_mapping; struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); /* Drop packet if interface is not attached */ if (!tfile) { drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (!rcu_dereference(tun->steering_prog)) tun_automq_xmit(tun, skb); netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len); /* Drop if the filter does not like it. * This is a noop if the filter is disabled. * Filter can be enabled only for the TAP devices. */ if (!check_filter(&tun->txflt, skb)) { drop_reason = SKB_DROP_REASON_TAP_TXFILTER; goto drop; } if (tfile->socket.sk->sk_filter && sk_filter(tfile->socket.sk, skb)) { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; goto drop; } len = run_ebpf_filter(tun, skb, len); if (len == 0) { drop_reason = SKB_DROP_REASON_TAP_FILTER; goto drop; } if (pskb_trim(skb, len)) { drop_reason = SKB_DROP_REASON_NOMEM; goto drop; } if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } skb_tx_timestamp(skb); /* Orphan the skb - required as we might hang on to it * for indefinite time. */ skb_orphan(skb); nf_reset_ct(skb); if (ptr_ring_produce(&tfile->tx_ring, skb)) { drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } /* NETIF_F_LLTX requires to do our own update of trans_start */ queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); rcu_read_unlock(); return NETDEV_TX_OK; drop: dev_core_stats_tx_dropped_inc(dev); skb_tx_error(skb); kfree_skb_reason(skb, drop_reason); rcu_read_unlock(); return NET_XMIT_DROP; } static void tun_net_mclist(struct net_device *dev) { /* * This callback is supposed to deal with mc filter in * _rx_ path and has nothing to do with the _tx_ path. * In rx path we always accept everything userspace gives us. */ } static netdev_features_t tun_net_fix_features(struct net_device *dev, netdev_features_t features) { struct tun_struct *tun = netdev_priv(dev); return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); } static void tun_set_headroom(struct net_device *dev, int new_hr) { struct tun_struct *tun = netdev_priv(dev); if (new_hr < NET_SKB_PAD) new_hr = NET_SKB_PAD; tun->align = new_hr; } static void tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct tun_struct *tun = netdev_priv(dev); dev_get_tstats64(dev, stats); stats->rx_frame_errors += (unsigned long)atomic_long_read(&tun->rx_frame_errors); } static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; struct bpf_prog *old_prog; int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } list_for_each_entry(tfile, &tun->disabled, next) { if (prog) sock_set_flag(&tfile->sk, SOCK_XDP); else sock_reset_flag(&tfile->sk, SOCK_XDP); } return 0; } static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); default: return -EINVAL; } } static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) { if (new_carrier) { struct tun_struct *tun = netdev_priv(dev); if (!tun->numqueues) return -EPERM; netif_carrier_on(dev); } else { netif_carrier_off(dev); } return 0; } static const struct net_device_ops tun_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_select_queue = tun_select_queue, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_change_carrier = tun_net_change_carrier, }; static void __tun_xdp_flush_tfile(struct tun_file *tfile) { /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) kill_fasync(&tfile->fasync, SIGIO, POLL_IN); tfile->socket.sk->sk_data_ready(tfile->socket.sk); } static int tun_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags) { struct tun_struct *tun = netdev_priv(dev); struct tun_file *tfile; u32 numqueues; int nxmit = 0; int i; if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) return -EINVAL; rcu_read_lock(); resample: numqueues = READ_ONCE(tun->numqueues); if (!numqueues) { rcu_read_unlock(); return -ENXIO; /* Caller will free/return all frames */ } tfile = rcu_dereference(tun->tfiles[smp_processor_id() % numqueues]); if (unlikely(!tfile)) goto resample; spin_lock(&tfile->tx_ring.producer_lock); for (i = 0; i < n; i++) { struct xdp_frame *xdp = frames[i]; /* Encode the XDP flag into lowest bit for consumer to differ * XDP buffer from sk_buff. */ void *frame = tun_xdp_to_ptr(xdp); if (__ptr_ring_produce(&tfile->tx_ring, frame)) { dev_core_stats_tx_dropped_inc(dev); break; } nxmit++; } spin_unlock(&tfile->tx_ring.producer_lock); if (flags & XDP_XMIT_FLUSH) __tun_xdp_flush_tfile(tfile); rcu_read_unlock(); return nxmit; } static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) { struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp); int nxmit; if (unlikely(!frame)) return -EOVERFLOW; nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH); if (!nxmit) xdp_return_frame_rx_napi(frame); return nxmit; } static const struct net_device_ops tap_netdev_ops = { .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, .ndo_start_xmit = tun_net_xmit, .ndo_fix_features = tun_net_fix_features, .ndo_set_rx_mode = tun_net_mclist, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_select_queue = tun_select_queue, .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, .ndo_change_carrier = tun_net_change_carrier, }; static void tun_flow_init(struct tun_struct *tun) { int i; for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) INIT_HLIST_HEAD(&tun->flows[i]); tun->ageing_time = TUN_FLOW_EXPIRE; timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0); mod_timer(&tun->flow_gc_timer, round_jiffies_up(jiffies + tun->ageing_time)); } static void tun_flow_uninit(struct tun_struct *tun) { del_timer_sync(&tun->flow_gc_timer); tun_flow_flush(tun); } #define MIN_MTU 68 #define MAX_MTU 65535 /* Initialize net device. */ static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: dev->netdev_ops = &tun_netdev_ops; dev->header_ops = &ip_tunnel_header_ops; /* Point-to-Point TUN Device */ dev->hard_header_len = 0; dev->addr_len = 0; dev->mtu = 1500; /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; break; case IFF_TAP: dev->netdev_ops = &tap_netdev_ops; /* Ethernet TAP Device */ ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; eth_hw_addr_random(dev); /* Currently tun does not support XDP, only tap does. */ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT; break; } dev->min_mtu = MIN_MTU; dev->max_mtu = MAX_MTU - dev->hard_header_len; } static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) { struct sock *sk = tfile->socket.sk; return (tun->dev->flags & IFF_UP) && sock_writeable(sk); } /* Character device part */ /* Poll */ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) { struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); struct sock *sk; __poll_t mask = 0; if (!tun) return EPOLLERR; sk = tfile->socket.sk; poll_wait(file, sk_sleep(sk), wait); if (!ptr_ring_empty(&tfile->tx_ring)) mask |= EPOLLIN | EPOLLRDNORM; /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to * guarantee EPOLLOUT to be raised by either here or * tun_sock_write_space(). Then process could get notification * after it writes to a down device and meets -EIO. */ if (tun_sock_writeable(tun, tfile) || (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && tun_sock_writeable(tun, tfile))) mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) mask = EPOLLERR; tun_put(tun); return mask; } static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, size_t len, const struct iov_iter *it) { struct sk_buff *skb; size_t linear; int err; int i; if (it->nr_segs > MAX_SKB_FRAGS + 1 || len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); skb = napi_get_frags(&tfile->napi); local_bh_enable(); if (!skb) return ERR_PTR(-ENOMEM); linear = iov_iter_single_seg_count(it); err = __skb_grow(skb, linear); if (err) goto free; skb->len = len; skb->data_len = len - linear; skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { const struct iovec *iov = iter_iov(it); size_t fragsz = iov->iov_len; struct page *page; void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } frag = netdev_alloc_frag(fragsz); if (!frag) { err = -ENOMEM; goto free; } page = virt_to_head_page(frag); skb_fill_page_desc(skb, i - 1, page, frag - page_address(page), fragsz); } return skb; free: /* frees skb and all frags allocated with napi_alloc_frag() */ napi_free_frags(&tfile->napi); return ERR_PTR(err); } /* prepad is the amount to reserve at front. len is length after that. * linear is a hint as to how much to copy (usually headers). */ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, size_t prepad, size_t len, size_t linear, int noblock) { struct sock *sk = tfile->socket.sk; struct sk_buff *skb; int err; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE) linear = len; if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER); skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, &err, PAGE_ALLOC_COSTLY_ORDER); if (!skb) return ERR_PTR(err); skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, int more) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; struct sk_buff_head process_queue; u32 rx_batched = tun->rx_batched; bool rcv = false; if (!rx_batched || (!more && skb_queue_empty(queue))) { local_bh_disable(); skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); return; } spin_lock(&queue->lock); if (!more || skb_queue_len(queue) == rx_batched) { __skb_queue_head_init(&process_queue); skb_queue_splice_tail_init(queue, &process_queue); rcv = true; } else { __skb_queue_tail(queue, skb); } spin_unlock(&queue->lock); if (rcv) { struct sk_buff *nskb; local_bh_disable(); while ((nskb = __skb_dequeue(&process_queue))) { skb_record_rx_queue(nskb, tfile->queue_index); netif_receive_skb(nskb); } skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); local_bh_enable(); } } static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, int len, int noblock, bool zerocopy) { if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) return false; if (tfile->socket.sk->sk_sndbuf != INT_MAX) return false; if (!noblock) return false; if (zerocopy) return false; if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; return true; } static struct sk_buff *__tun_build_skb(struct tun_file *tfile, struct page_frag *alloc_frag, char *buf, int buflen, int len, int pad) { struct sk_buff *skb = build_skb(buf, buflen); if (!skb) return ERR_PTR(-ENOMEM); skb_reserve(skb, pad); skb_put(skb, len); skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); alloc_frag->offset += buflen; return skb; } static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, struct xdp_buff *xdp, u32 act) { int err; switch (act) { case XDP_REDIRECT: err = xdp_do_redirect(tun->dev, xdp, xdp_prog); if (err) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_TX: err = tun_xdp_tx(tun->dev, xdp); if (err < 0) { dev_core_stats_rx_dropped_inc(tun->dev); return err; } dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data); break; case XDP_PASS: break; default: bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act); fallthrough; case XDP_ABORTED: trace_xdp_exception(tun->dev, xdp_prog, act); fallthrough; case XDP_DROP: dev_core_stats_rx_dropped_inc(tun->dev); break; } return act; } static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, struct virtio_net_hdr *hdr, int len, int *skb_xdp) { struct page_frag *alloc_frag = &current->task_frag; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); char *buf; size_t copied; int pad = TUN_RX_PAD; int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) return ERR_PTR(-ENOMEM); buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; copied = copy_page_from_iter(alloc_frag->page, alloc_frag->offset + pad, len, from); if (copied != len) return ERR_PTR(-EFAULT); /* There's a small window that XDP may be set after the check * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); } *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { struct xdp_buff xdp; u32 act; xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); xdp_prepare_buff(&xdp, buf, pad, len, false); act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); if (err < 0) { if (act == XDP_REDIRECT || act == XDP_TX) put_page(alloc_frag->page); goto out; } if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) goto out; pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; } rcu_read_unlock(); local_bh_enable(); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); out: rcu_read_unlock(); local_bh_enable(); return NULL; } /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, int noblock, bool more) { struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t total_len = iov_iter_count(from); size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; int copylen; bool zerocopy = false; int err; u32 rxhash = 0; int skb_xdp = 1; bool frags = tun_napi_frags_enabled(tfile); enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (!(tun->flags & IFF_NO_PI)) { if (len < sizeof(pi)) return -EINVAL; len -= sizeof(pi); if (!copy_from_iter_full(&pi, sizeof(pi), from)) return -EFAULT; } if (tun->flags & IFF_VNET_HDR) { int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); if (len < vnet_hdr_sz) return -EINVAL; len -= vnet_hdr_sz; if (!copy_from_iter_full(&gso, sizeof(gso), from)) return -EFAULT; if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); if (tun16_to_cpu(tun, gso.hdr_len) > len) return -EINVAL; iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; if (unlikely(len < ETH_HLEN || (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) return -EINVAL; } good_linear = SKB_MAX_HEAD(align); if (msg_control) { struct iov_iter i = *from; /* There are 256 bytes to be copied in skb, so there is * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; linear = copylen; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) zerocopy = true; } if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { /* For the packet that is not easy to be processed * (e.g gso or jumbo packet), we will do it at after * skb was created with generic XDP routine. */ skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp); err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (!skb) return total_len; } else { if (!zerocopy) { copylen = len; if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) linear = good_linear; else linear = tun16_to_cpu(tun, gso.hdr_len); } if (frags) { mutex_lock(&tfile->napi_mutex); skb = tun_napi_alloc_frags(tfile, copylen, from); /* tun_napi_alloc_frags() enforces a layout for the skb. * If zerocopy is enabled, then this layout will be * overwritten by zerocopy_sg_from_iter(). */ zerocopy = false; } else { if (!linear) linear = min_t(size_t, good_linear, copylen); skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); } err = PTR_ERR_OR_ZERO(skb); if (err) goto drop; if (zerocopy) err = zerocopy_sg_from_iter(skb, from); else err = skb_copy_datagram_from_iter(skb, 0, from, len); if (err) { err = -EFAULT; drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT; goto drop; } } if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { atomic_long_inc(&tun->rx_frame_errors); err = -EINVAL; goto free_skb; } switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; switch (ip_version) { case 4: pi.proto = htons(ETH_P_IP); break; case 6: pi.proto = htons(ETH_P_IPV6); break; default: err = -EINVAL; goto drop; } } skb_reset_mac_header(skb); skb->protocol = pi.proto; skb->dev = tun->dev; break; case IFF_TAP: if (frags && !pskb_may_pull(skb, ETH_HLEN)) { err = -ENOMEM; drop_reason = SKB_DROP_REASON_HDR_TRUNC; goto drop; } skb->protocol = eth_type_trans(skb, tun->dev); break; } /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; uarg->callback(NULL, uarg, false); } skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { struct bpf_prog *xdp_prog; int ret; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); goto unlock_frags; } } rcu_read_unlock(); local_bh_enable(); } /* Compute the costly rx hash only if needed for flow updates. * We may get a very small possibility of OOO during switching, not * worth to optimize. */ if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); rcu_read_lock(); if (unlikely(!(tun->dev->flags & IFF_UP))) { err = -EIO; rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_READY; goto drop; } if (frags) { u32 headlen; /* Exercise flow dissector code path. */ skb_push(skb, ETH_HLEN); headlen = eth_get_headlen(tun->dev, skb->data, skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { WARN_ON_ONCE(1); err = -ENOMEM; dev_core_stats_rx_dropped_inc(tun->dev); napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); return err; } if (likely(napi_schedule_prep(&tfile->napi))) { local_bh_disable(); napi_gro_frags(&tfile->napi); napi_complete(&tfile->napi); local_bh_enable(); } else { err = -EBUSY; goto napi_busy; } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; int queue_len; spin_lock_bh(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock_bh(&queue->lock); rcu_read_unlock(); err = -EBUSY; goto free_skb; } __skb_queue_tail(queue, skb); queue_len = skb_queue_len(queue); spin_unlock(&queue->lock); if (!more || queue_len > NAPI_POLL_WEIGHT) napi_schedule(&tfile->napi); local_bh_enable(); } else if (!IS_ENABLED(CONFIG_4KSTACKS)) { tun_rx_batched(tun, tfile, skb, more); } else { netif_rx(skb); } rcu_read_unlock(); preempt_disable(); dev_sw_netstats_rx_add(tun->dev, len); preempt_enable(); if (rxhash) tun_flow_update(tun, rxhash, tfile); return total_len; drop: if (err != -EAGAIN) dev_core_stats_rx_dropped_inc(tun->dev); free_skb: if (!IS_ERR_OR_NULL(skb)) kfree_skb_reason(skb, drop_reason); unlock_frags: if (frags) { tfile->napi.skb = NULL; mutex_unlock(&tfile->napi_mutex); } return err ?: total_len; } static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t result; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; result = tun_get_user(tun, tfile, NULL, from, noblock, false); tun_put(tun); return result; } static ssize_t tun_put_user_xdp(struct tun_struct *tun, struct tun_file *tfile, struct xdp_frame *xdp_frame, struct iov_iter *iter) { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; size_t ret; if (tun->flags & IFF_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) return -EINVAL; if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, ret); preempt_enable(); return ret; } /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, struct sk_buff *skb, struct iov_iter *iter) { struct tun_pi pi = { 0, skb->protocol }; ssize_t total; int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; if (tun->flags & IFF_VNET_HDR) vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); total = skb->len + vlan_hlen + vnet_hdr_sz; if (!(tun->flags & IFF_NO_PI)) { if (iov_iter_count(iter) < sizeof(pi)) return -EINVAL; total += sizeof(pi); if (iov_iter_count(iter) < total) { /* Packet will be striped */ pi.flags |= TUN_PKT_STRIP; } if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi)) return -EFAULT; } if (vnet_hdr_sz) { struct virtio_net_hdr gso; if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; if (virtio_net_hdr_from_skb(skb, &gso, tun_is_little_endian(tun), true, vlan_hlen)) { struct skb_shared_info *sinfo = skb_shinfo(skb); if (net_ratelimit()) { netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), tun16_to_cpu(tun, gso.hdr_len)); print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 16, 1, skb->head, min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); } WARN_ON_ONCE(1); return -EINVAL; } if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); } if (vlan_hlen) { int ret; struct veth veth; veth.h_vlan_proto = skb->vlan_proto; veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) goto done; ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) goto done; } skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); done: /* caller is in process context, */ preempt_disable(); dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen); preempt_enable(); return total; } static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) goto out; if (noblock) { error = -EAGAIN; goto out; } add_wait_queue(&tfile->socket.wq.wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); ptr = ptr_ring_consume(&tfile->tx_ring); if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; break; } if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { error = -EFAULT; break; } schedule(); } __set_current_state(TASK_RUNNING); remove_wait_queue(&tfile->socket.wq.wait, &wait); out: *err = error; return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, int noblock, void *ptr) { ssize_t ret; int err; if (!iov_iter_count(to)) { tun_ptr_free(ptr); return 0; } if (!ptr) { /* Read frames from ring */ ptr = tun_ring_recv(tfile, noblock, &err); if (!ptr) return err; } if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); ret = tun_put_user_xdp(tun, tfile, xdpf, to); xdp_return_frame(xdpf); } else { struct sk_buff *skb = ptr; ret = tun_put_user(tun, tfile, skb, to); if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); } return ret; } static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct tun_file *tfile = file->private_data; struct tun_struct *tun = tun_get(tfile); ssize_t len = iov_iter_count(to), ret; int noblock = 0; if (!tun) return -EBADFD; if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT)) noblock = 1; ret = tun_do_read(tun, tfile, to, noblock, NULL); ret = min_t(ssize_t, ret, len); if (ret > 0) iocb->ki_pos = ret; tun_put(tun); return ret; } static void tun_prog_free(struct rcu_head *rcu) { struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu); bpf_prog_destroy(prog->prog); kfree(prog); } static int __tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, struct bpf_prog *prog) { struct tun_prog *old, *new = NULL; if (prog) { new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM; new->prog = prog; } spin_lock_bh(&tun->lock); old = rcu_dereference_protected(*prog_p, lockdep_is_held(&tun->lock)); rcu_assign_pointer(*prog_p, new); spin_unlock_bh(&tun->lock); if (old) call_rcu(&old->rcu, tun_prog_free); return 0; } static void tun_free_netdev(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); BUG_ON(!(list_empty(&tun->disabled))); tun_flow_uninit(tun); security_tun_dev_free_security(tun->security); __tun_set_ebpf(tun, &tun->steering_prog, NULL); __tun_set_ebpf(tun, &tun->filter_prog, NULL); } static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); tun->owner = INVALID_UID; tun->group = INVALID_GID; tun_default_link_ksettings(dev, &tun->link_ksettings); dev->ethtool_ops = &tun_ethtool_ops; dev->needs_free_netdev = true; dev->priv_destructor = tun_free_netdev; /* We prefer our own queue length */ dev->tx_queue_len = TUN_READQ_SIZE; } /* Trivial set of netlink ops to allow deleting tun or tap * device with netlink. */ static int tun_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { NL_SET_ERR_MSG(extack, "tun/tap creation via rtnetlink is not supported."); return -EOPNOTSUPP; } static size_t tun_get_size(const struct net_device *dev) { BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t)); BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t)); return nla_total_size(sizeof(uid_t)) + /* OWNER */ nla_total_size(sizeof(gid_t)) + /* GROUP */ nla_total_size(sizeof(u8)) + /* TYPE */ nla_total_size(sizeof(u8)) + /* PI */ nla_total_size(sizeof(u8)) + /* VNET_HDR */ nla_total_size(sizeof(u8)) + /* PERSIST */ nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */ nla_total_size(sizeof(u32)) + /* NUM_QUEUES */ nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */ 0; } static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK)) goto nla_put_failure; if (uid_valid(tun->owner) && nla_put_u32(skb, IFLA_TUN_OWNER, from_kuid_munged(current_user_ns(), tun->owner))) goto nla_put_failure; if (gid_valid(tun->group) && nla_put_u32(skb, IFLA_TUN_GROUP, from_kgid_munged(current_user_ns(), tun->group))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST))) goto nla_put_failure; if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE, !!(tun->flags & IFF_MULTI_QUEUE))) goto nla_put_failure; if (tun->flags & IFF_MULTI_QUEUE) { if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues)) goto nla_put_failure; if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES, tun->numdisabled)) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static struct rtnl_link_ops tun_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct tun_struct), .setup = tun_setup, .validate = tun_validate, .get_size = tun_get_size, .fill_info = tun_fill_info, }; static void tun_sock_write_space(struct sock *sk) { struct tun_file *tfile; wait_queue_head_t *wqueue; if (!sock_writeable(sk)) return; if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) return; wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) wake_up_interruptible_sync_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); tfile = container_of(sk, struct tun_file, sk); kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } static void tun_put_page(struct tun_page *tpage) { if (tpage->page) __page_frag_cache_drain(tpage->page, tpage->count); } static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, struct xdp_buff *xdp, int *flush, struct tun_page *tpage) { unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; struct bpf_prog *xdp_prog; struct sk_buff *skb = NULL; struct sk_buff_head *queue; u32 rxhash = 0, act; int buflen = hdr->buflen; int ret = 0; bool skb_xdp = false; struct page *page; xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { if (gso->gso_type) { skb_xdp = true; goto build; } xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); xdp_set_data_meta_invalid(xdp); act = bpf_prog_run_xdp(xdp_prog, xdp); ret = tun_xdp_act(tun, xdp_prog, xdp, act); if (ret < 0) { put_page(virt_to_head_page(xdp->data)); return ret; } switch (ret) { case XDP_REDIRECT: *flush = true; fallthrough; case XDP_TX: return 0; case XDP_PASS: break; default: page = virt_to_head_page(xdp->data); if (tpage->page == page) { ++tpage->count; } else { tun_put_page(tpage); tpage->page = page; tpage->count = 1; } return 0; } } build: skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { ret = -ENOMEM; goto out; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); ret = -EINVAL; goto out; } skb->protocol = eth_type_trans(skb, tun->dev); skb_reset_network_header(skb); skb_probe_transport_header(skb); skb_record_rx_queue(skb, tfile->queue_index); if (skb_xdp) { ret = do_xdp_generic(xdp_prog, &skb); if (ret != XDP_PASS) { ret = 0; goto out; } } if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); if (tfile->napi_enabled) { queue = &tfile->sk.sk_write_queue; spin_lock(&queue->lock); if (unlikely(tfile->detached)) { spin_unlock(&queue->lock); kfree_skb(skb); return -EBUSY; } __skb_queue_tail(queue, skb); spin_unlock(&queue->lock); ret = 1; } else { netif_receive_skb(skb); ret = 0; } /* No need to disable preemption here since this function is * always called with bh disabled */ dev_sw_netstats_rx_add(tun->dev, datasize); if (rxhash) tun_flow_update(tun, rxhash, tfile); out: return ret; } static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); struct tun_msg_ctl *ctl = m->msg_control; struct xdp_buff *xdp; if (!tun) return -EBADFD; if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { struct tun_page tpage; int n = ctl->num; int flush = 0, queued = 0; memset(&tpage, 0, sizeof(tpage)); local_bh_disable(); rcu_read_lock(); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage); if (ret > 0) queued += ret; } if (flush) xdp_do_flush(); if (tfile->napi_enabled && queued > 0) napi_schedule(&tfile->napi); rcu_read_unlock(); local_bh_enable(); tun_put_page(&tpage); ret = total_len; goto out; } ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); out: tun_put(tun); return ret; } static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { ret = -EINVAL; goto out_put_tun; } if (flags & MSG_ERRQUEUE) { ret = sock_recv_errqueue(sock->sk, m, total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } out: tun_put(tun); return ret; out_put_tun: tun_put(tun); out_free: tun_ptr_free(ptr); return ret; } static int tun_ptr_peek_len(void *ptr) { if (likely(ptr)) { if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); return xdpf->len; } return __skb_array_len_with_tag(ptr); } else { return 0; } } static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun; int ret = 0; tun = tun_get(tfile); if (!tun) return 0; ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; } /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { .peek_len = tun_peek_len, .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, }; static struct proto tun_proto = { .name = "tun", .owner = THIS_MODULE, .obj_size = sizeof(struct tun_file), }; static int tun_flags(struct tun_struct *tun) { return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP); } static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return sysfs_emit(buf, "0x%x\n", tun_flags(tun)); } static ssize_t owner_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return uid_valid(tun->owner)? sysfs_emit(buf, "%u\n", from_kuid_munged(current_user_ns(), tun->owner)) : sysfs_emit(buf, "-1\n"); } static ssize_t group_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); return gid_valid(tun->group) ? sysfs_emit(buf, "%u\n", from_kgid_munged(current_user_ns(), tun->group)) : sysfs_emit(buf, "-1\n"); } static DEVICE_ATTR_RO(tun_flags); static DEVICE_ATTR_RO(owner); static DEVICE_ATTR_RO(group); static struct attribute *tun_dev_attrs[] = { &dev_attr_tun_flags.attr, &dev_attr_owner.attr, &dev_attr_group.attr, NULL }; static const struct attribute_group tun_attr_group = { .attrs = tun_dev_attrs }; static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) { struct tun_struct *tun; struct tun_file *tfile = file->private_data; struct net_device *dev; int err; if (tfile->detached) return -EINVAL; if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!(ifr->ifr_flags & IFF_NAPI) || (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP) return -EINVAL; } dev = __dev_get_by_name(net, ifr->ifr_name); if (dev) { if (ifr->ifr_flags & IFF_TUN_EXCL) return -EBUSY; if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) tun = netdev_priv(dev); else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) tun = netdev_priv(dev); else return -EINVAL; if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) != !!(tun->flags & IFF_MULTI_QUEUE)) return -EINVAL; if (tun_not_capable(tun)) return -EPERM; err = security_tun_dev_open(tun->security); if (err < 0) return err; err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, ifr->ifr_flags & IFF_NAPI, ifr->ifr_flags & IFF_NAPI_FRAGS, true); if (err < 0) return err; if (tun->flags & IFF_MULTI_QUEUE && (tun->numqueues + tun->numdisabled > 1)) { /* One or more queue has already been attached, no need * to initialize the device again. */ netdev_state_change(dev); return 0; } tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); netdev_state_change(dev); } else { char *name; unsigned long flags = 0; int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_create(); if (err < 0) return err; /* Set dev type */ if (ifr->ifr_flags & IFF_TUN) { /* TUN device */ flags |= IFF_TUN; name = "tun%d"; } else if (ifr->ifr_flags & IFF_TAP) { /* TAP device */ flags |= IFF_TAP; name = "tap%d"; } else return -EINVAL; if (*ifr->ifr_name) name = ifr->ifr_name; dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, NET_NAME_UNKNOWN, tun_setup, queues, queues); if (!dev) return -ENOMEM; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; dev->ifindex = tfile->ifindex; dev->sysfs_groups[0] = &tun_attr_group; tun = netdev_priv(dev); tun->dev = dev; tun->flags = flags; tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); tun->ifr = ifr; tun->file = file; tun_net_initialize(dev); err = register_netdevice(tun->dev); if (err < 0) { free_netdev(dev); return err; } /* free_netdev() won't check refcnt, to avoid race * with dev_put() we need publish tun after registration. */ rcu_assign_pointer(tfile->tun, tun); } if (ifr->ifr_flags & IFF_NO_CARRIER) netif_carrier_off(tun->dev); else netif_carrier_on(tun->dev); /* Make sure persistent devices do not get stuck in * xoff state. */ if (netif_running(tun->dev)) netif_tx_wake_all_queues(tun->dev); strcpy(ifr->ifr_name, tun->dev->name); return 0; } static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr) { strcpy(ifr->ifr_name, tun->dev->name); ifr->ifr_flags = tun_flags(tun); } /* This is like a cut-down ethtool ops, except done via tun fd so no * privs required. */ static int set_offload(struct tun_struct *tun, unsigned long arg) { netdev_features_t features = 0; if (arg & TUN_F_CSUM) { features |= NETIF_F_HW_CSUM; arg &= ~TUN_F_CSUM; if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) { features |= NETIF_F_TSO_ECN; arg &= ~TUN_F_TSO_ECN; } if (arg & TUN_F_TSO4) features |= NETIF_F_TSO; if (arg & TUN_F_TSO6) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } arg &= ~TUN_F_UFO; /* TODO: for now USO4 and USO6 should work simultaneously */ if (arg & TUN_F_USO4 && arg & TUN_F_USO6) { features |= NETIF_F_GSO_UDP_L4; arg &= ~(TUN_F_USO4 | TUN_F_USO6); } } /* This gives the user a way to test for new features in future by * trying to set them. */ if (arg) return -EINVAL; tun->set_features = features; tun->dev->wanted_features &= ~TUN_USER_FEATURES; tun->dev->wanted_features |= features; netdev_update_features(tun->dev); return 0; } static void tun_detach_filter(struct tun_struct *tun, int n) { int i; struct tun_file *tfile; for (i = 0; i < n; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); sk_detach_filter(tfile->socket.sk); release_sock(tfile->socket.sk); } tun->filter_attached = false; } static int tun_attach_filter(struct tun_struct *tun) { int i, ret = 0; struct tun_file *tfile; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); lock_sock(tfile->socket.sk); ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); release_sock(tfile->socket.sk); if (ret) { tun_detach_filter(tun, i); return ret; } } tun->filter_attached = true; return ret; } static void tun_set_sndbuf(struct tun_struct *tun) { struct tun_file *tfile; int i; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_sndbuf = tun->sndbuf; } } static int tun_set_queue(struct file *file, struct ifreq *ifr) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; int ret = 0; rtnl_lock(); if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { tun = tfile->detached; if (!tun) { ret = -EINVAL; goto unlock; } ret = security_tun_dev_attach_queue(tun->security); if (ret < 0) goto unlock; ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, tun->flags & IFF_NAPI_FRAGS, true); } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { tun = rtnl_dereference(tfile->tun); if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) ret = -EINVAL; else __tun_detach(tfile, false); } else ret = -EINVAL; if (ret >= 0) netdev_state_change(tun->dev); unlock: rtnl_unlock(); return ret; } static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, void __user *data) { struct bpf_prog *prog; int fd; if (copy_from_user(&fd, data, sizeof(fd))) return -EFAULT; if (fd == -1) { prog = NULL; } else { prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); } return __tun_set_ebpf(tun, prog_p, prog); } /* Return correct value for tun->dev->addr_len based on tun->dev->type. */ static unsigned char tun_get_addr_len(unsigned short type) { switch (type) { case ARPHRD_IP6GRE: case ARPHRD_TUNNEL6: return sizeof(struct in6_addr); case ARPHRD_IPGRE: case ARPHRD_TUNNEL: case ARPHRD_SIT: return 4; case ARPHRD_ETHER: return ETH_ALEN; case ARPHRD_IEEE802154: case ARPHRD_IEEE802154_MONITOR: return IEEE802154_EXTENDED_ADDR_LEN; case ARPHRD_PHONET_PIPE: case ARPHRD_PPP: case ARPHRD_NONE: return 0; case ARPHRD_6LOWPAN: return EUI64_ADDR_LEN; case ARPHRD_FDDI: return FDDI_K_ALEN; case ARPHRD_HIPPI: return HIPPI_ALEN; case ARPHRD_IEEE802: return FC_ALEN; case ARPHRD_ROSE: return ROSE_ADDR_LEN; case ARPHRD_NETROM: return AX25_ADDR_LEN; case ARPHRD_LOCALTLK: return LTALK_ALEN; default: return 0; } } static long __tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg, int ifreq_len) { struct tun_file *tfile = file->private_data; struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; unsigned int carrier; struct ifreq ifr; kuid_t owner; kgid_t group; int ifindex; int sndbuf; int vnet_hdr_sz; int le; int ret; bool do_notify = false; if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) return -EFAULT; } else { memset(&ifr, 0, sizeof(ifr)); } if (cmd == TUNGETFEATURES) { /* Currently this just means: "what IFF flags are valid?". * This is needed because we never checked for invalid flags on * TUNSETIFF. */ return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); } else if (cmd == SIOCGSKNS) { if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; return open_related_ns(&net->ns, get_net_ns); } rtnl_lock(); tun = tun_get(tfile); if (cmd == TUNSETIFF) { ret = -EEXIST; if (tun) goto unlock; ifr.ifr_name[IFNAMSIZ-1] = '\0'; ret = tun_set_iff(net, file, &ifr); if (ret) goto unlock; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; goto unlock; } if (cmd == TUNSETIFINDEX) { ret = -EPERM; if (tun) goto unlock; ret = -EFAULT; if (copy_from_user(&ifindex, argp, sizeof(ifindex))) goto unlock; ret = -EINVAL; if (ifindex < 0) goto unlock; ret = 0; tfile->ifindex = ifindex; goto unlock; } ret = -EBADFD; if (!tun) goto unlock; netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd); net = dev_net(tun->dev); ret = 0; switch (cmd) { case TUNGETIFF: tun_get_iff(tun, &ifr); if (tfile->detached) ifr.ifr_flags |= IFF_DETACH_QUEUE; if (!tfile->socket.sk->sk_filter) ifr.ifr_flags |= IFF_NOFILTER; if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case TUNSETNOCSUM: /* Disable/Enable checksum */ /* [unimplemented] */ netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n", arg ? "disabled" : "enabled"); break; case TUNSETPERSIST: /* Disable/Enable persist mode. Keep an extra reference to the * module to prevent the module being unprobed. */ if (arg && !(tun->flags & IFF_PERSIST)) { tun->flags |= IFF_PERSIST; __module_get(THIS_MODULE); do_notify = true; } if (!arg && (tun->flags & IFF_PERSIST)) { tun->flags &= ~IFF_PERSIST; module_put(THIS_MODULE); do_notify = true; } netif_info(tun, drv, tun->dev, "persist %s\n", arg ? "enabled" : "disabled"); break; case TUNSETOWNER: /* Set owner of the device */ owner = make_kuid(current_user_ns(), arg); if (!uid_valid(owner)) { ret = -EINVAL; break; } tun->owner = owner; do_notify = true; netif_info(tun, drv, tun->dev, "owner set to %u\n", from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ group = make_kgid(current_user_ns(), arg); if (!gid_valid(group)) { ret = -EINVAL; break; } tun->group = group; do_notify = true; netif_info(tun, drv, tun->dev, "group set to %u\n", from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: /* Only allow setting the type when the interface is down */ if (tun->dev->flags & IFF_UP) { netif_info(tun, drv, tun->dev, "Linktype set failed because interface is up\n"); ret = -EBUSY; } else { ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, tun->dev); ret = notifier_to_errno(ret); if (ret) { netif_info(tun, drv, tun->dev, "Refused to change device type\n"); break; } tun->dev->type = (int) arg; tun->dev->addr_len = tun_get_addr_len(tun->dev->type); netif_info(tun, drv, tun->dev, "linktype set to %d\n", tun->dev->type); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, tun->dev); } break; case TUNSETDEBUG: tun->msg_enable = (u32)arg; break; case TUNSETOFFLOAD: ret = set_offload(tun, arg); break; case TUNSETTXFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = update_filter(&tun->txflt, (void __user *)arg); break; case SIOCGIFHWADDR: /* Get hw address */ dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name); if (copy_to_user(argp, &ifr, ifreq_len)) ret = -EFAULT; break; case SIOCSIFHWADDR: /* Set hw address */ ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: sndbuf = tfile->socket.sk->sk_sndbuf; if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) ret = -EFAULT; break; case TUNSETSNDBUF: if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { ret = -EFAULT; break; } if (sndbuf <= 0) { ret = -EINVAL; break; } tun->sndbuf = sndbuf; tun_set_sndbuf(tun); break; case TUNGETVNETHDRSZ: vnet_hdr_sz = tun->vnet_hdr_sz; if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) ret = -EFAULT; break; case TUNSETVNETHDRSZ: if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { ret = -EFAULT; break; } if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { ret = -EINVAL; break; } tun->vnet_hdr_sz = vnet_hdr_sz; break; case TUNGETVNETLE: le = !!(tun->flags & TUN_VNET_LE); if (put_user(le, (int __user *)argp)) ret = -EFAULT; break; case TUNSETVNETLE: if (get_user(le, (int __user *)argp)) { ret = -EFAULT; break; } if (le) tun->flags |= TUN_VNET_LE; else tun->flags &= ~TUN_VNET_LE; break; case TUNGETVNETBE: ret = tun_get_vnet_be(tun, argp); break; case TUNSETVNETBE: ret = tun_set_vnet_be(tun, argp); break; case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) break; ret = tun_attach_filter(tun); break; case TUNDETACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = 0; tun_detach_filter(tun, tun->numqueues); break; case TUNGETFILTER: ret = -EINVAL; if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) break; ret = -EFAULT; if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog))) break; ret = 0; break; case TUNSETSTEERINGEBPF: ret = tun_set_ebpf(tun, &tun->steering_prog, argp); break; case TUNSETFILTEREBPF: ret = tun_set_ebpf(tun, &tun->filter_prog, argp); break; case TUNSETCARRIER: ret = -EFAULT; if (copy_from_user(&carrier, argp, sizeof(carrier))) goto unlock; ret = tun_net_change_carrier(tun->dev, (bool)carrier); break; case TUNGETDEVNETNS: ret = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) goto unlock; ret = open_related_ns(&net->ns, get_net_ns); break; default: ret = -EINVAL; break; } if (do_notify) netdev_state_change(tun->dev); unlock: rtnl_unlock(); if (tun) tun_put(tun); return ret; } static long tun_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); } #ifdef CONFIG_COMPAT static long tun_chr_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case TUNSETIFF: case TUNGETIFF: case TUNSETTXFILTER: case TUNGETSNDBUF: case TUNSETSNDBUF: case SIOCGIFHWADDR: case SIOCSIFHWADDR: arg = (unsigned long)compat_ptr(arg); break; default: arg = (compat_ulong_t)arg; break; } /* * compat_ifreq is shorter than ifreq, so we must not access beyond * the end of that structure. All fields that are used in this * driver are compatible though, we don't need to convert the * contents. */ return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); } #endif /* CONFIG_COMPAT */ static int tun_chr_fasync(int fd, struct file *file, int on) { struct tun_file *tfile = file->private_data; int ret; if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) goto out; if (on) { __f_setown(file, task_pid(current), PIDTYPE_TGID, 0); tfile->flags |= TUN_FASYNC; } else tfile->flags &= ~TUN_FASYNC; ret = 0; out: return ret; } static int tun_chr_open(struct inode *inode, struct file * file) { struct net *net = current->nsproxy->net_ns; struct tun_file *tfile; tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto, 0); if (!tfile) return -ENOMEM; if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { sk_free(&tfile->sk); return -ENOMEM; } mutex_init(&tfile->napi_mutex); RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; init_waitqueue_head(&tfile->socket.wq.wait); tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); /* tun groks IOCB_NOWAIT just fine, mark it as such */ file->f_mode |= FMODE_NOWAIT; return 0; } static int tun_chr_close(struct inode *inode, struct file *file) { struct tun_file *tfile = file->private_data; tun_detach(tfile, true); return 0; } #ifdef CONFIG_PROC_FS static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file) { struct tun_file *tfile = file->private_data; struct tun_struct *tun; struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); rtnl_lock(); tun = tun_get(tfile); if (tun) tun_get_iff(tun, &ifr); rtnl_unlock(); if (tun) tun_put(tun); seq_printf(m, "iff:\t%s\n", ifr.ifr_name); } #endif static const struct file_operations tun_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read_iter = tun_chr_read_iter, .write_iter = tun_chr_write_iter, .poll = tun_chr_poll, .unlocked_ioctl = tun_chr_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = tun_chr_compat_ioctl, #endif .open = tun_chr_open, .release = tun_chr_close, .fasync = tun_chr_fasync, #ifdef CONFIG_PROC_FS .show_fdinfo = tun_chr_show_fdinfo, #endif }; static struct miscdevice tun_miscdev = { .minor = TUN_MINOR, .name = "tun", .nodename = "net/tun", .fops = &tun_fops, }; /* ethtool interface */ static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); cmd->base.speed = SPEED_10000; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_TP; cmd->base.phy_address = 0; cmd->base.autoneg = AUTONEG_DISABLE; } static int tun_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(cmd, &tun->link_ksettings, sizeof(*cmd)); return 0; } static int tun_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct tun_struct *tun = netdev_priv(dev); memcpy(&tun->link_ksettings, cmd, sizeof(*cmd)); return 0; } static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct tun_struct *tun = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: strscpy(info->bus_info, "tun", sizeof(info->bus_info)); break; case IFF_TAP: strscpy(info->bus_info, "tap", sizeof(info->bus_info)); break; } } static u32 tun_get_msglevel(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); return tun->msg_enable; } static void tun_set_msglevel(struct net_device *dev, u32 value) { struct tun_struct *tun = netdev_priv(dev); tun->msg_enable = value; } static int tun_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); ec->rx_max_coalesced_frames = tun->rx_batched; return 0; } static int tun_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kernel_coal, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT) tun->rx_batched = NAPI_POLL_WEIGHT; else tun->rx_batched = ec->rx_max_coalesced_frames; return 0; } static void tun_get_channels(struct net_device *dev, struct ethtool_channels *channels) { struct tun_struct *tun = netdev_priv(dev); channels->combined_count = tun->numqueues; channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1; } static const struct ethtool_ops tun_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES, .get_drvinfo = tun_get_drvinfo, .get_msglevel = tun_get_msglevel, .set_msglevel = tun_set_msglevel, .get_link = ethtool_op_get_link, .get_channels = tun_get_channels, .get_ts_info = ethtool_op_get_ts_info, .get_coalesce = tun_get_coalesce, .set_coalesce = tun_set_coalesce, .get_link_ksettings = tun_get_link_ksettings, .set_link_ksettings = tun_set_link_ksettings, }; static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) rings[i++] = &tfile->tx_ring; ret = ptr_ring_resize_multiple(rings, n, dev->tx_queue_len, GFP_KERNEL, tun_ptr_free); kfree(rings); return ret; } static int tun_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct tun_struct *tun = netdev_priv(dev); int i; if (dev->rtnl_link_ops != &tun_link_ops) return NOTIFY_DONE; switch (event) { case NETDEV_CHANGE_TX_QUEUE_LEN: if (tun_queue_resize(tun)) return NOTIFY_BAD; break; case NETDEV_UP: for (i = 0; i < tun->numqueues; i++) { struct tun_file *tfile; tfile = rtnl_dereference(tun->tfiles[i]); tfile->socket.sk->sk_write_space(tfile->socket.sk); } break; default: break; } return NOTIFY_DONE; } static struct notifier_block tun_notifier_block __read_mostly = { .notifier_call = tun_device_event, }; static int __init tun_init(void) { int ret = 0; pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); ret = rtnl_link_register(&tun_link_ops); if (ret) { pr_err("Can't register link_ops\n"); goto err_linkops; } ret = misc_register(&tun_miscdev); if (ret) { pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } ret = register_netdevice_notifier(&tun_notifier_block); if (ret) { pr_err("Can't register netdevice notifier\n"); goto err_notifier; } return 0; err_notifier: misc_deregister(&tun_miscdev); err_misc: rtnl_link_unregister(&tun_link_ops); err_linkops: return ret; } static void __exit tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); unregister_netdevice_notifier(&tun_notifier_block); } /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ struct socket *tun_get_socket(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->socket; } EXPORT_SYMBOL_GPL(tun_get_socket); struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) return ERR_PTR(-EINVAL); tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); return &tfile->tx_ring; } EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(TUN_MINOR); MODULE_ALIAS("devname:net/tun");
31 31 31 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 // SPDX-License-Identifier: GPL-2.0-or-later /* * uvc_ctrl.c -- USB Video Class driver - Controls * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ #include <asm/barrier.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/uvc.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <media/v4l2-ctrls.h> #include "uvcvideo.h" #define UVC_CTRL_DATA_CURRENT 0 #define UVC_CTRL_DATA_BACKUP 1 #define UVC_CTRL_DATA_MIN 2 #define UVC_CTRL_DATA_MAX 3 #define UVC_CTRL_DATA_RES 4 #define UVC_CTRL_DATA_DEF 5 #define UVC_CTRL_DATA_LAST 6 /* ------------------------------------------------------------------------ * Controls */ static const struct uvc_control_info uvc_ctrls[] = { { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BRIGHTNESS_CONTROL, .index = 0, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_CONTRAST_CONTROL, .index = 1, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_CONTROL, .index = 2, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SATURATION_CONTROL, .index = 3, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SHARPNESS_CONTROL, .index = 4, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAMMA_CONTROL, .index = 5, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL, .index = 6, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .index = 7, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL, .index = 8, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAIN_CONTROL, .index = 9, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .index = 10, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_AUTO_CONTROL, .index = 11, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL, .index = 12, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL, .index = 13, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_DIGITAL_MULTIPLIER_CONTROL, .index = 14, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL, .index = 15, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_ANALOG_VIDEO_STANDARD_CONTROL, .index = 16, .size = 1, .flags = UVC_CTRL_FLAG_GET_CUR, }, { .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_ANALOG_LOCK_STATUS_CONTROL, .index = 17, .size = 1, .flags = UVC_CTRL_FLAG_GET_CUR, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_SCANNING_MODE_CONTROL, .index = 0, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_MODE_CONTROL, .index = 1, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_PRIORITY_CONTROL, .index = 2, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL, .index = 3, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_RELATIVE_CONTROL, .index = 4, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL, .index = 5, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_RELATIVE_CONTROL, .index = 6, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL, .index = 7, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_RELATIVE_CONTROL, .index = 8, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL, .index = 9, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, .index = 10, .size = 3, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .index = 11, .size = 8, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, .index = 12, .size = 4, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ROLL_ABSOLUTE_CONTROL, .index = 13, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_RANGE | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ROLL_RELATIVE_CONTROL, .index = 14, .size = 2, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_AUTO_CONTROL, .index = 17, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_RESTORE, }, { .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PRIVACY_CONTROL, .index = 18, .size = 1, .flags = UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_RESTORE | UVC_CTRL_FLAG_AUTO_UPDATE, }, { .entity = UVC_GUID_EXT_GPIO_CONTROLLER, .selector = UVC_CT_PRIVACY_CONTROL, .index = 0, .size = 1, .flags = UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE, }, }; static const u32 uvc_control_classes[] = { V4L2_CID_CAMERA_CLASS, V4L2_CID_USER_CLASS, }; static const int exposure_auto_mapping[] = { 2, 1, 4, 8 }; /* * This function translates the V4L2 menu index @idx, as exposed to userspace as * the V4L2 control value, to the corresponding UVC control value used by the * device. The custom menu_mapping in the control @mapping is used when * available, otherwise the function assumes that the V4L2 and UVC values are * identical. * * For controls of type UVC_CTRL_DATA_TYPE_BITMASK, the UVC control value is * expressed as a bitmask and is thus guaranteed to have a single bit set. * * The function returns -EINVAL if the V4L2 menu index @idx isn't valid for the * control, which includes all controls whose type isn't UVC_CTRL_DATA_TYPE_ENUM * or UVC_CTRL_DATA_TYPE_BITMASK. */ static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping, u32 idx) { if (!test_bit(idx, &mapping->menu_mask)) return -EINVAL; if (mapping->menu_mapping) return mapping->menu_mapping[idx]; return idx; } static const char * uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx) { if (!test_bit(idx, &mapping->menu_mask)) return NULL; if (mapping->menu_names) return mapping->menu_names[idx]; return v4l2_ctrl_get_menu(mapping->id)[idx]; } static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, u8 query, const u8 *data) { s8 zoom = (s8)data[0]; switch (query) { case UVC_GET_CUR: return (zoom == 0) ? 0 : (zoom > 0 ? data[2] : -data[2]); case UVC_GET_MIN: case UVC_GET_MAX: case UVC_GET_RES: case UVC_GET_DEF: default: return data[2]; } } static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, s32 value, u8 *data) { data[0] = value == 0 ? 0 : (value > 0) ? 1 : 0xff; data[2] = min((int)abs(value), 0xff); } static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, u8 query, const u8 *data) { unsigned int first = mapping->offset / 8; s8 rel = (s8)data[first]; switch (query) { case UVC_GET_CUR: return (rel == 0) ? 0 : (rel > 0 ? data[first+1] : -data[first+1]); case UVC_GET_MIN: return -data[first+1]; case UVC_GET_MAX: case UVC_GET_RES: case UVC_GET_DEF: default: return data[first+1]; } } static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, s32 value, u8 *data) { unsigned int first = mapping->offset / 8; data[first] = value == 0 ? 0 : (value > 0) ? 1 : 0xff; data[first+1] = min_t(int, abs(value), 0xff); } static const struct uvc_control_mapping uvc_ctrl_mappings[] = { { .id = V4L2_CID_BRIGHTNESS, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BRIGHTNESS_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_CONTRAST, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_CONTRAST_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_HUE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .master_id = V4L2_CID_HUE_AUTO, .master_manual = 0, }, { .id = V4L2_CID_SATURATION, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SATURATION_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_SHARPNESS, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_SHARPNESS_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_GAMMA, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAMMA_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_BACKLIGHT_COMPENSATION, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_GAIN, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_GAIN_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_HUE_AUTO, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_HUE_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, .slave_ids = { V4L2_CID_HUE, }, }, { .id = V4L2_CID_EXPOSURE_AUTO, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_MODE_CONTROL, .size = 4, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_BITMASK, .menu_mapping = exposure_auto_mapping, .menu_mask = GENMASK(V4L2_EXPOSURE_APERTURE_PRIORITY, V4L2_EXPOSURE_AUTO), .slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, }, }, { .id = V4L2_CID_EXPOSURE_AUTO_PRIORITY, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_AE_PRIORITY_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_EXPOSURE_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL, .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, .master_id = V4L2_CID_EXPOSURE_AUTO, .master_manual = V4L2_EXPOSURE_MANUAL, }, { .id = V4L2_CID_AUTO_WHITE_BALANCE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, .slave_ids = { V4L2_CID_WHITE_BALANCE_TEMPERATURE, }, }, { .id = V4L2_CID_WHITE_BALANCE_TEMPERATURE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, .master_id = V4L2_CID_AUTO_WHITE_BALANCE, .master_manual = 0, }, { .id = V4L2_CID_AUTO_WHITE_BALANCE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, .slave_ids = { V4L2_CID_BLUE_BALANCE, V4L2_CID_RED_BALANCE }, }, { .id = V4L2_CID_BLUE_BALANCE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .master_id = V4L2_CID_AUTO_WHITE_BALANCE, .master_manual = 0, }, { .id = V4L2_CID_RED_BALANCE, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL, .size = 16, .offset = 16, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .master_id = V4L2_CID_AUTO_WHITE_BALANCE, .master_manual = 0, }, { .id = V4L2_CID_FOCUS_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, .master_id = V4L2_CID_FOCUS_AUTO, .master_manual = 0, }, { .id = V4L2_CID_FOCUS_AUTO, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_FOCUS_AUTO_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, .slave_ids = { V4L2_CID_FOCUS_ABSOLUTE, }, }, { .id = V4L2_CID_IRIS_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_IRIS_RELATIVE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_IRIS_RELATIVE_CONTROL, .size = 8, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_ZOOM_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_UNSIGNED, }, { .id = V4L2_CID_ZOOM_CONTINUOUS, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_ZOOM_RELATIVE_CONTROL, .size = 0, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .get = uvc_ctrl_get_zoom, .set = uvc_ctrl_set_zoom, }, { .id = V4L2_CID_PAN_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .size = 32, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_TILT_ABSOLUTE, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL, .size = 32, .offset = 32, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, }, { .id = V4L2_CID_PAN_SPEED, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, .size = 16, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .get = uvc_ctrl_get_rel_speed, .set = uvc_ctrl_set_rel_speed, }, { .id = V4L2_CID_TILT_SPEED, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PANTILT_RELATIVE_CONTROL, .size = 16, .offset = 16, .v4l2_type = V4L2_CTRL_TYPE_INTEGER, .data_type = UVC_CTRL_DATA_TYPE_SIGNED, .get = uvc_ctrl_get_rel_speed, .set = uvc_ctrl_set_rel_speed, }, { .id = V4L2_CID_PRIVACY, .entity = UVC_GUID_UVC_CAMERA, .selector = UVC_CT_PRIVACY_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, { .id = V4L2_CID_PRIVACY, .entity = UVC_GUID_EXT_GPIO_CONTROLLER, .selector = UVC_CT_PRIVACY_CONTROL, .size = 1, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_BOOLEAN, .data_type = UVC_CTRL_DATA_TYPE_BOOLEAN, }, }; const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = { .id = V4L2_CID_POWER_LINE_FREQUENCY, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .size = 2, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_ENUM, .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ, V4L2_CID_POWER_LINE_FREQUENCY_50HZ), }; const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11 = { .id = V4L2_CID_POWER_LINE_FREQUENCY, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .size = 2, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_ENUM, .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ, V4L2_CID_POWER_LINE_FREQUENCY_DISABLED), }; static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc11[] = { &uvc_ctrl_power_line_mapping_uvc11, NULL, /* Sentinel */ }; static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc15 = { .id = V4L2_CID_POWER_LINE_FREQUENCY, .entity = UVC_GUID_UVC_PROCESSING, .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL, .size = 2, .offset = 0, .v4l2_type = V4L2_CTRL_TYPE_MENU, .data_type = UVC_CTRL_DATA_TYPE_ENUM, .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_AUTO, V4L2_CID_POWER_LINE_FREQUENCY_DISABLED), }; static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc15[] = { &uvc_ctrl_power_line_mapping_uvc15, NULL, /* Sentinel */ }; /* ------------------------------------------------------------------------ * Utility functions */ static inline u8 *uvc_ctrl_data(struct uvc_control *ctrl, int id) { return ctrl->uvc_data + id * ctrl->info.size; } static inline int uvc_test_bit(const u8 *data, int bit) { return (data[bit >> 3] >> (bit & 7)) & 1; } static inline void uvc_clear_bit(u8 *data, int bit) { data[bit >> 3] &= ~(1 << (bit & 7)); } /* * Extract the bit string specified by mapping->offset and mapping->size * from the little-endian data stored at 'data' and return the result as * a signed 32bit integer. Sign extension will be performed if the mapping * references a signed data type. */ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, u8 query, const u8 *data) { int bits = mapping->size; int offset = mapping->offset; s32 value = 0; u8 mask; data += offset / 8; offset &= 7; mask = ((1LL << bits) - 1) << offset; while (1) { u8 byte = *data & mask; value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); bits -= 8 - (offset > 0 ? offset : 0); if (bits <= 0) break; offset -= 8; mask = (1 << bits) - 1; data++; } /* Sign-extend the value if needed. */ if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) value |= -(value & (1 << (mapping->size - 1))); return value; } /* * Set the bit string specified by mapping->offset and mapping->size * in the little-endian data stored at 'data' to the value 'value'. */ static void uvc_set_le_value(struct uvc_control_mapping *mapping, s32 value, u8 *data) { int bits = mapping->size; int offset = mapping->offset; u8 mask; /* * According to the v4l2 spec, writing any value to a button control * should result in the action belonging to the button control being * triggered. UVC devices however want to see a 1 written -> override * value. */ if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) value = -1; data += offset / 8; offset &= 7; for (; bits > 0; data++) { mask = ((1LL << bits) - 1) << offset; *data = (*data & ~mask) | ((value << offset) & mask); value >>= offset ? offset : 8; bits -= 8 - offset; offset = 0; } } /* ------------------------------------------------------------------------ * Terminal and unit management */ static int uvc_entity_match_guid(const struct uvc_entity *entity, const u8 guid[16]) { return memcmp(entity->guid, guid, sizeof(entity->guid)) == 0; } /* ------------------------------------------------------------------------ * UVC Controls */ static void __uvc_find_control(struct uvc_entity *entity, u32 v4l2_id, struct uvc_control_mapping **mapping, struct uvc_control **control, int next) { struct uvc_control *ctrl; struct uvc_control_mapping *map; unsigned int i; if (entity == NULL) return; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; list_for_each_entry(map, &ctrl->info.mappings, list) { if ((map->id == v4l2_id) && !next) { *control = ctrl; *mapping = map; return; } if ((*mapping == NULL || (*mapping)->id > map->id) && (map->id > v4l2_id) && next) { *control = ctrl; *mapping = map; } } } } static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, u32 v4l2_id, struct uvc_control_mapping **mapping) { struct uvc_control *ctrl = NULL; struct uvc_entity *entity; int next = v4l2_id & V4L2_CTRL_FLAG_NEXT_CTRL; *mapping = NULL; /* Mask the query flags. */ v4l2_id &= V4L2_CTRL_ID_MASK; /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next); if (ctrl && !next) return ctrl; } if (ctrl == NULL && !next) uvc_dbg(chain->dev, CONTROL, "Control 0x%08x not found\n", v4l2_id); return ctrl; } static int uvc_ctrl_populate_cache(struct uvc_video_chain *chain, struct uvc_control *ctrl) { int ret; if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) { ret = uvc_query_ctrl(chain->dev, UVC_GET_DEF, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN) { ret = uvc_query_ctrl(chain->dev, UVC_GET_MIN, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) { ret = uvc_query_ctrl(chain->dev, UVC_GET_MAX, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX), ctrl->info.size); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) { ret = uvc_query_ctrl(chain->dev, UVC_GET_RES, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), ctrl->info.size); if (ret < 0) { if (UVC_ENTITY_TYPE(ctrl->entity) != UVC_VC_EXTENSION_UNIT) return ret; /* * GET_RES is mandatory for XU controls, but some * cameras still choke on it. Ignore errors and set the * resolution value to zero. */ uvc_warn_once(chain->dev, UVC_WARN_XU_GET_RES, "UVC non compliance - GET_RES failed on " "an XU control. Enabling workaround.\n"); memset(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES), 0, ctrl->info.size); } } ctrl->cached = 1; return 0; } static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping, const u8 *data) { s32 value = mapping->get(mapping, UVC_GET_CUR, data); if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { unsigned int i; for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { u32 menu_value; if (!test_bit(i, &mapping->menu_mask)) continue; menu_value = uvc_mapping_get_menu_value(mapping, i); if (menu_value == value) { value = i; break; } } } return value; } static int __uvc_ctrl_load_cur(struct uvc_video_chain *chain, struct uvc_control *ctrl) { u8 *data; int ret; if (ctrl->loaded) return 0; data = uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT); if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) { memset(data, 0, ctrl->info.size); ctrl->loaded = 1; return 0; } if (ctrl->entity->get_cur) ret = ctrl->entity->get_cur(chain->dev, ctrl->entity, ctrl->info.selector, data, ctrl->info.size); else ret = uvc_query_ctrl(chain->dev, UVC_GET_CUR, ctrl->entity->id, chain->dev->intfnum, ctrl->info.selector, data, ctrl->info.size); if (ret < 0) return ret; ctrl->loaded = 1; return ret; } static int __uvc_ctrl_get(struct uvc_video_chain *chain, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 *value) { int ret; if ((ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) == 0) return -EACCES; ret = __uvc_ctrl_load_cur(chain, ctrl); if (ret < 0) return ret; *value = __uvc_ctrl_get_value(mapping, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); return 0; } static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id, u32 found_id) { bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL; unsigned int i; req_id &= V4L2_CTRL_ID_MASK; for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) { if (!(chain->ctrl_class_bitmap & BIT(i))) continue; if (!find_next) { if (uvc_control_classes[i] == req_id) return i; continue; } if (uvc_control_classes[i] > req_id && uvc_control_classes[i] < found_id) return i; } return -ENODEV; } static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id, u32 found_id, struct v4l2_queryctrl *v4l2_ctrl) { int idx; idx = __uvc_query_v4l2_class(chain, req_id, found_id); if (idx < 0) return -ENODEV; memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl)); v4l2_ctrl->id = uvc_control_classes[idx]; strscpy(v4l2_ctrl->name, v4l2_ctrl_get_name(v4l2_ctrl->id), sizeof(v4l2_ctrl->name)); v4l2_ctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS; v4l2_ctrl->flags = V4L2_CTRL_FLAG_WRITE_ONLY | V4L2_CTRL_FLAG_READ_ONLY; return 0; } /* * Check if control @v4l2_id can be accessed by the given control @ioctl * (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS). * * For set operations on slave controls, check if the master's value is set to * manual, either in the others controls set in the same ioctl call, or from * the master's current value. This catches VIDIOC_S_EXT_CTRLS calls that set * both the master and slave control, such as for instance setting * auto_exposure=1, exposure_time_absolute=251. */ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id, const struct v4l2_ext_controls *ctrls, unsigned long ioctl) { struct uvc_control_mapping *master_map = NULL; struct uvc_control *master_ctrl = NULL; struct uvc_control_mapping *mapping; struct uvc_control *ctrl; bool read = ioctl == VIDIOC_G_EXT_CTRLS; s32 val; int ret; int i; if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0) return -EACCES; ctrl = uvc_find_control(chain, v4l2_id, &mapping); if (!ctrl) return -EINVAL; if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read) return -EACCES; if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read) return -EACCES; if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id) return 0; /* * Iterate backwards in cases where the master control is accessed * multiple times in the same ioctl. We want the last value. */ for (i = ctrls->count - 1; i >= 0; i--) { if (ctrls->controls[i].id == mapping->master_id) return ctrls->controls[i].value == mapping->master_manual ? 0 : -EACCES; } __uvc_find_control(ctrl->entity, mapping->master_id, &master_map, &master_ctrl, 0); if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) return 0; ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val); if (ret >= 0 && val != mapping->master_manual) return -EACCES; return 0; } static const char *uvc_map_get_name(const struct uvc_control_mapping *map) { const char *name; if (map->name) return map->name; name = v4l2_ctrl_get_name(map->id); if (name) return name; return "Unknown Control"; } static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl, struct uvc_control_mapping *mapping) { /* * Some controls, like CT_AE_MODE_CONTROL, use GET_RES to represent * the number of bits supported. Those controls do not list GET_MAX * as supported. */ if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) return mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) return mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); return ~0; } static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, struct v4l2_queryctrl *v4l2_ctrl) { struct uvc_control_mapping *master_map = NULL; struct uvc_control *master_ctrl = NULL; unsigned int i; memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl)); v4l2_ctrl->id = mapping->id; v4l2_ctrl->type = mapping->v4l2_type; strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping), sizeof(v4l2_ctrl->name)); v4l2_ctrl->flags = 0; if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) v4l2_ctrl->flags |= V4L2_CTRL_FLAG_WRITE_ONLY; if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) v4l2_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; if (mapping->master_id) __uvc_find_control(ctrl->entity, mapping->master_id, &master_map, &master_ctrl, 0); if (master_ctrl && (master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) { s32 val; int ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val); if (ret < 0) return ret; if (val != mapping->master_manual) v4l2_ctrl->flags |= V4L2_CTRL_FLAG_INACTIVE; } if (!ctrl->cached) { int ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_DEF) { v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_DEF)); } switch (mapping->v4l2_type) { case V4L2_CTRL_TYPE_MENU: v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1; v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1; v4l2_ctrl->step = 1; for (i = 0; BIT(i) <= mapping->menu_mask; ++i) { u32 menu_value; if (!test_bit(i, &mapping->menu_mask)) continue; menu_value = uvc_mapping_get_menu_value(mapping, i); if (menu_value == v4l2_ctrl->default_value) { v4l2_ctrl->default_value = i; break; } } return 0; case V4L2_CTRL_TYPE_BOOLEAN: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = 1; v4l2_ctrl->step = 1; return 0; case V4L2_CTRL_TYPE_BUTTON: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = 0; v4l2_ctrl->step = 0; return 0; case V4L2_CTRL_TYPE_BITMASK: v4l2_ctrl->minimum = 0; v4l2_ctrl->maximum = uvc_get_ctrl_bitmap(ctrl, mapping); v4l2_ctrl->step = 0; return 0; default: break; } if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MIN) v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN)); if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX) v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES) v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); return 0; } int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, struct v4l2_queryctrl *v4l2_ctrl) { struct uvc_control *ctrl; struct uvc_control_mapping *mapping; int ret; ret = mutex_lock_interruptible(&chain->ctrl_mutex); if (ret < 0) return -ERESTARTSYS; /* Check if the ctrl is a know class */ if (!(v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL)) { ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, 0, v4l2_ctrl); if (!ret) goto done; } ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); if (ctrl == NULL) { ret = -EINVAL; goto done; } /* * If we're enumerating control with V4L2_CTRL_FLAG_NEXT_CTRL, check if * a class should be inserted between the previous control and the one * we have just found. */ if (v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL) { ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, mapping->id, v4l2_ctrl); if (!ret) goto done; } ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* * Mapping V4L2 controls to UVC controls can be straightforward if done well. * Most of the UVC controls exist in V4L2, and can be mapped directly. Some * must be grouped (for instance the Red Balance, Blue Balance and Do White * Balance V4L2 controls use the White Balance Component UVC control) or * otherwise translated. The approach we take here is to use a translation * table for the controls that can be mapped directly, and handle the others * manually. */ int uvc_query_v4l2_menu(struct uvc_video_chain *chain, struct v4l2_querymenu *query_menu) { struct uvc_control_mapping *mapping; struct uvc_control *ctrl; u32 index = query_menu->index; u32 id = query_menu->id; const char *name; int ret; memset(query_menu, 0, sizeof(*query_menu)); query_menu->id = id; query_menu->index = index; if (index >= BITS_PER_TYPE(mapping->menu_mask)) return -EINVAL; ret = mutex_lock_interruptible(&chain->ctrl_mutex); if (ret < 0) return -ERESTARTSYS; ctrl = uvc_find_control(chain, query_menu->id, &mapping); if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { ret = -EINVAL; goto done; } if (!test_bit(query_menu->index, &mapping->menu_mask)) { ret = -EINVAL; goto done; } if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) { int mask; if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) goto done; } mask = uvc_mapping_get_menu_value(mapping, query_menu->index); if (mask < 0) { ret = mask; goto done; } if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & mask)) { ret = -EINVAL; goto done; } } name = uvc_mapping_get_menu_name(mapping, query_menu->index); if (!name) { ret = -EINVAL; goto done; } strscpy(query_menu->name, name, sizeof(query_menu->name)); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* -------------------------------------------------------------------------- * Ctrl event handling */ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain, struct v4l2_event *ev, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 value, u32 changes) { struct v4l2_queryctrl v4l2_ctrl; __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl); memset(ev, 0, sizeof(*ev)); ev->type = V4L2_EVENT_CTRL; ev->id = v4l2_ctrl.id; ev->u.ctrl.value = value; ev->u.ctrl.changes = changes; ev->u.ctrl.type = v4l2_ctrl.type; ev->u.ctrl.flags = v4l2_ctrl.flags; ev->u.ctrl.minimum = v4l2_ctrl.minimum; ev->u.ctrl.maximum = v4l2_ctrl.maximum; ev->u.ctrl.step = v4l2_ctrl.step; ev->u.ctrl.default_value = v4l2_ctrl.default_value; } /* * Send control change events to all subscribers for the @ctrl control. By * default the subscriber that generated the event, as identified by @handle, * is not notified unless it has set the V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK flag. * @handle can be NULL for asynchronous events related to auto-update controls, * in which case all subscribers are notified. */ static void uvc_ctrl_send_event(struct uvc_video_chain *chain, struct uvc_fh *handle, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 value, u32 changes) { struct v4l2_fh *originator = handle ? &handle->vfh : NULL; struct v4l2_subscribed_event *sev; struct v4l2_event ev; if (list_empty(&mapping->ev_subs)) return; uvc_ctrl_fill_event(chain, &ev, ctrl, mapping, value, changes); list_for_each_entry(sev, &mapping->ev_subs, node) { if (sev->fh != originator || (sev->flags & V4L2_EVENT_SUB_FL_ALLOW_FEEDBACK) || (changes & V4L2_EVENT_CTRL_CH_FLAGS)) v4l2_event_queue_fh(sev->fh, &ev); } } /* * Send control change events for the slave of the @master control identified * by the V4L2 ID @slave_id. The @handle identifies the event subscriber that * generated the event and may be NULL for auto-update events. */ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain, struct uvc_fh *handle, struct uvc_control *master, u32 slave_id) { struct uvc_control_mapping *mapping = NULL; struct uvc_control *ctrl = NULL; u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; s32 val = 0; __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0); if (ctrl == NULL) return; if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0) changes |= V4L2_EVENT_CTRL_CH_VALUE; uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes); } void uvc_ctrl_status_event(struct uvc_video_chain *chain, struct uvc_control *ctrl, const u8 *data) { struct uvc_control_mapping *mapping; struct uvc_fh *handle; unsigned int i; mutex_lock(&chain->ctrl_mutex); handle = ctrl->handle; ctrl->handle = NULL; list_for_each_entry(mapping, &ctrl->info.mappings, list) { s32 value = __uvc_ctrl_get_value(mapping, data); /* * handle may be NULL here if the device sends auto-update * events without a prior related control set from userspace. */ for (i = 0; i < ARRAY_SIZE(mapping->slave_ids); ++i) { if (!mapping->slave_ids[i]) break; uvc_ctrl_send_slave_event(chain, handle, ctrl, mapping->slave_ids[i]); } uvc_ctrl_send_event(chain, handle, ctrl, mapping, value, V4L2_EVENT_CTRL_CH_VALUE); } mutex_unlock(&chain->ctrl_mutex); } static void uvc_ctrl_status_event_work(struct work_struct *work) { struct uvc_device *dev = container_of(work, struct uvc_device, async_ctrl.work); struct uvc_ctrl_work *w = &dev->async_ctrl; int ret; uvc_ctrl_status_event(w->chain, w->ctrl, w->data); /* The barrier is needed to synchronize with uvc_status_stop(). */ if (smp_load_acquire(&dev->flush_status)) return; /* Resubmit the URB. */ w->urb->interval = dev->int_ep->desc.bInterval; ret = usb_submit_urb(w->urb, GFP_KERNEL); if (ret < 0) dev_err(&dev->udev->dev, "Failed to resubmit status URB (%d).\n", ret); } bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain, struct uvc_control *ctrl, const u8 *data) { struct uvc_device *dev = chain->dev; struct uvc_ctrl_work *w = &dev->async_ctrl; if (list_empty(&ctrl->info.mappings)) { ctrl->handle = NULL; return false; } w->data = data; w->urb = urb; w->chain = chain; w->ctrl = ctrl; schedule_work(&w->work); return true; } static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls, unsigned int xctrls_count, u32 id) { unsigned int i; for (i = 0; i < xctrls_count; ++i) { if (xctrls[i].id == id) return true; } return false; } static void uvc_ctrl_send_events(struct uvc_fh *handle, const struct v4l2_ext_control *xctrls, unsigned int xctrls_count) { struct uvc_control_mapping *mapping; struct uvc_control *ctrl; u32 changes = V4L2_EVENT_CTRL_CH_VALUE; unsigned int i; unsigned int j; for (i = 0; i < xctrls_count; ++i) { ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) /* Notification will be sent from an Interrupt event. */ continue; for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) { u32 slave_id = mapping->slave_ids[j]; if (!slave_id) break; /* * We can skip sending an event for the slave if the * slave is being modified in the same transaction. */ if (uvc_ctrl_xctrls_has_control(xctrls, xctrls_count, slave_id)) continue; uvc_ctrl_send_slave_event(handle->chain, handle, ctrl, slave_id); } /* * If the master is being modified in the same transaction * flags may change too. */ if (mapping->master_id && uvc_ctrl_xctrls_has_control(xctrls, xctrls_count, mapping->master_id)) changes |= V4L2_EVENT_CTRL_CH_FLAGS; uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping, xctrls[i].value, changes); } } static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems) { struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh); struct uvc_control_mapping *mapping; struct uvc_control *ctrl; int ret; ret = mutex_lock_interruptible(&handle->chain->ctrl_mutex); if (ret < 0) return -ERESTARTSYS; if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0) { ret = 0; goto done; } ctrl = uvc_find_control(handle->chain, sev->id, &mapping); if (ctrl == NULL) { ret = -EINVAL; goto done; } list_add_tail(&sev->node, &mapping->ev_subs); if (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL) { struct v4l2_event ev; u32 changes = V4L2_EVENT_CTRL_CH_FLAGS; s32 val = 0; if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0) changes |= V4L2_EVENT_CTRL_CH_VALUE; uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val, changes); /* * Mark the queue as active, allowing this initial event to be * accepted. */ sev->elems = elems; v4l2_event_queue_fh(sev->fh, &ev); } done: mutex_unlock(&handle->chain->ctrl_mutex); return ret; } static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev) { struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh); mutex_lock(&handle->chain->ctrl_mutex); if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0) goto done; list_del(&sev->node); done: mutex_unlock(&handle->chain->ctrl_mutex); } const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops = { .add = uvc_ctrl_add_event, .del = uvc_ctrl_del_event, .replace = v4l2_ctrl_replace, .merge = v4l2_ctrl_merge, }; /* -------------------------------------------------------------------------- * Control transactions * * To make extended set operations as atomic as the hardware allows, controls * are handled using begin/commit/rollback operations. * * At the beginning of a set request, uvc_ctrl_begin should be called to * initialize the request. This function acquires the control lock. * * When setting a control, the new value is stored in the control data field * at position UVC_CTRL_DATA_CURRENT. The control is then marked as dirty for * later processing. If the UVC and V4L2 control sizes differ, the current * value is loaded from the hardware before storing the new value in the data * field. * * After processing all controls in the transaction, uvc_ctrl_commit or * uvc_ctrl_rollback must be called to apply the pending changes to the * hardware or revert them. When applying changes, all controls marked as * dirty will be modified in the UVC device, and the dirty flag will be * cleared. When reverting controls, the control data field * UVC_CTRL_DATA_CURRENT is reverted to its previous value * (UVC_CTRL_DATA_BACKUP) for all dirty controls. Both functions release the * control lock. */ int uvc_ctrl_begin(struct uvc_video_chain *chain) { return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0; } static int uvc_ctrl_commit_entity(struct uvc_device *dev, struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl) { struct uvc_control *ctrl; unsigned int i; int ret; if (entity == NULL) return 0; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; /* * Reset the loaded flag for auto-update controls that were * marked as loaded in uvc_ctrl_get/uvc_ctrl_set to prevent * uvc_ctrl_get from using the cached value, and for write-only * controls to prevent uvc_ctrl_set from setting bits not * explicitly set by the user. */ if (ctrl->info.flags & UVC_CTRL_FLAG_AUTO_UPDATE || !(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR)) ctrl->loaded = 0; if (!ctrl->dirty) continue; if (!rollback) ret = uvc_query_ctrl(dev, UVC_SET_CUR, ctrl->entity->id, dev->intfnum, ctrl->info.selector, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); else ret = 0; if (rollback || ret < 0) memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP), ctrl->info.size); ctrl->dirty = 0; if (ret < 0) { if (err_ctrl) *err_ctrl = ctrl; return ret; } } return 0; } static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity, struct v4l2_ext_controls *ctrls, struct uvc_control *uvc_control) { struct uvc_control_mapping *mapping = NULL; struct uvc_control *ctrl_found = NULL; unsigned int i; if (!entity) return ctrls->count; for (i = 0; i < ctrls->count; i++) { __uvc_find_control(entity, ctrls->controls[i].id, &mapping, &ctrl_found, 0); if (uvc_control == ctrl_found) return i; } return ctrls->count; } int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, struct v4l2_ext_controls *ctrls) { struct uvc_video_chain *chain = handle->chain; struct uvc_control *err_ctrl; struct uvc_entity *entity; int ret = 0; /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback, &err_ctrl); if (ret < 0) goto done; } if (!rollback) uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count); done: if (ret < 0 && ctrls) ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls, err_ctrl); mutex_unlock(&chain->ctrl_mutex); return ret; } int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl) { struct uvc_control *ctrl; struct uvc_control_mapping *mapping; if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0) return -EACCES; ctrl = uvc_find_control(chain, xctrl->id, &mapping); if (ctrl == NULL) return -EINVAL; return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value); } int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl) { struct uvc_video_chain *chain = handle->chain; struct uvc_control *ctrl; struct uvc_control_mapping *mapping; s32 value; u32 step; s32 min; s32 max; int ret; if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0) return -EACCES; ctrl = uvc_find_control(chain, xctrl->id, &mapping); if (ctrl == NULL) return -EINVAL; if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR)) return -EACCES; /* Clamp out of range values. */ switch (mapping->v4l2_type) { case V4L2_CTRL_TYPE_INTEGER: if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } min = mapping->get(mapping, UVC_GET_MIN, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MIN)); max = mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); if (step == 0) step = 1; xctrl->value = min + DIV_ROUND_CLOSEST((u32)(xctrl->value - min), step) * step; if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) xctrl->value = clamp(xctrl->value, min, max); else xctrl->value = clamp_t(u32, xctrl->value, min, max); value = xctrl->value; break; case V4L2_CTRL_TYPE_BITMASK: if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } xctrl->value &= uvc_get_ctrl_bitmap(ctrl, mapping); value = xctrl->value; break; case V4L2_CTRL_TYPE_BOOLEAN: xctrl->value = clamp(xctrl->value, 0, 1); value = xctrl->value; break; case V4L2_CTRL_TYPE_MENU: if (xctrl->value < (ffs(mapping->menu_mask) - 1) || xctrl->value > (fls(mapping->menu_mask) - 1)) return -ERANGE; if (!test_bit(xctrl->value, &mapping->menu_mask)) return -EINVAL; value = uvc_mapping_get_menu_value(mapping, xctrl->value); /* * Valid menu indices are reported by the GET_RES request for * UVC controls that support it. */ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) { if (!ctrl->cached) { ret = uvc_ctrl_populate_cache(chain, ctrl); if (ret < 0) return ret; } if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & value)) return -EINVAL; } break; default: value = xctrl->value; break; } /* * If the mapping doesn't span the whole UVC control, the current value * needs to be loaded from the device to perform the read-modify-write * operation. */ if ((ctrl->info.size * 8) != mapping->size) { ret = __uvc_ctrl_load_cur(chain, ctrl); if (ret < 0) return ret; } /* Backup the current value in case we need to rollback later. */ if (!ctrl->dirty) { memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP), uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT), ctrl->info.size); } mapping->set(mapping, value, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) ctrl->handle = handle; ctrl->dirty = 1; ctrl->modified = 1; return 0; } /* -------------------------------------------------------------------------- * Dynamic controls */ /* * Retrieve flags for a given control */ static int uvc_ctrl_get_flags(struct uvc_device *dev, const struct uvc_control *ctrl, struct uvc_control_info *info) { u8 *data; int ret; data = kmalloc(1, GFP_KERNEL); if (data == NULL) return -ENOMEM; if (ctrl->entity->get_info) ret = ctrl->entity->get_info(dev, ctrl->entity, ctrl->info.selector, data); else ret = uvc_query_ctrl(dev, UVC_GET_INFO, ctrl->entity->id, dev->intfnum, info->selector, data, 1); if (!ret) info->flags |= (data[0] & UVC_CONTROL_CAP_GET ? UVC_CTRL_FLAG_GET_CUR : 0) | (data[0] & UVC_CONTROL_CAP_SET ? UVC_CTRL_FLAG_SET_CUR : 0) | (data[0] & UVC_CONTROL_CAP_AUTOUPDATE ? UVC_CTRL_FLAG_AUTO_UPDATE : 0) | (data[0] & UVC_CONTROL_CAP_ASYNCHRONOUS ? UVC_CTRL_FLAG_ASYNCHRONOUS : 0); kfree(data); return ret; } static void uvc_ctrl_fixup_xu_info(struct uvc_device *dev, const struct uvc_control *ctrl, struct uvc_control_info *info) { struct uvc_ctrl_fixup { struct usb_device_id id; u8 entity; u8 selector; u8 flags; }; static const struct uvc_ctrl_fixup fixups[] = { { { USB_DEVICE(0x046d, 0x08c2) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, { { USB_DEVICE(0x046d, 0x08cc) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, { { USB_DEVICE(0x046d, 0x0994) }, 9, 1, UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_DEF | UVC_CTRL_FLAG_SET_CUR | UVC_CTRL_FLAG_AUTO_UPDATE }, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(fixups); ++i) { if (!usb_match_one_id(dev->intf, &fixups[i].id)) continue; if (fixups[i].entity == ctrl->entity->id && fixups[i].selector == info->selector) { info->flags = fixups[i].flags; return; } } } /* * Query control information (size and flags) for XU controls. */ static int uvc_ctrl_fill_xu_info(struct uvc_device *dev, const struct uvc_control *ctrl, struct uvc_control_info *info) { u8 *data; int ret; data = kmalloc(2, GFP_KERNEL); if (data == NULL) return -ENOMEM; memcpy(info->entity, ctrl->entity->guid, sizeof(info->entity)); info->index = ctrl->index; info->selector = ctrl->index + 1; /* Query and verify the control length (GET_LEN) */ ret = uvc_query_ctrl(dev, UVC_GET_LEN, ctrl->entity->id, dev->intfnum, info->selector, data, 2); if (ret < 0) { uvc_dbg(dev, CONTROL, "GET_LEN failed on control %pUl/%u (%d)\n", info->entity, info->selector, ret); goto done; } info->size = le16_to_cpup((__le16 *)data); info->flags = UVC_CTRL_FLAG_GET_MIN | UVC_CTRL_FLAG_GET_MAX | UVC_CTRL_FLAG_GET_RES | UVC_CTRL_FLAG_GET_DEF; ret = uvc_ctrl_get_flags(dev, ctrl, info); if (ret < 0) { uvc_dbg(dev, CONTROL, "Failed to get flags for control %pUl/%u (%d)\n", info->entity, info->selector, ret); goto done; } uvc_ctrl_fixup_xu_info(dev, ctrl, info); uvc_dbg(dev, CONTROL, "XU control %pUl/%u queried: len %u, flags { get %u set %u auto %u }\n", info->entity, info->selector, info->size, (info->flags & UVC_CTRL_FLAG_GET_CUR) ? 1 : 0, (info->flags & UVC_CTRL_FLAG_SET_CUR) ? 1 : 0, (info->flags & UVC_CTRL_FLAG_AUTO_UPDATE) ? 1 : 0); done: kfree(data); return ret; } static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_info *info); static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev, struct uvc_control *ctrl) { struct uvc_control_info info; int ret; if (ctrl->initialized) return 0; ret = uvc_ctrl_fill_xu_info(dev, ctrl, &info); if (ret < 0) return ret; ret = uvc_ctrl_add_info(dev, ctrl, &info); if (ret < 0) uvc_dbg(dev, CONTROL, "Failed to initialize control %pUl/%u on device %s entity %u\n", info.entity, info.selector, dev->udev->devpath, ctrl->entity->id); return ret; } int uvc_xu_ctrl_query(struct uvc_video_chain *chain, struct uvc_xu_control_query *xqry) { struct uvc_entity *entity; struct uvc_control *ctrl; unsigned int i; bool found; u32 reqflags; u16 size; u8 *data = NULL; int ret; /* Find the extension unit. */ found = false; list_for_each_entry(entity, &chain->entities, chain) { if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT && entity->id == xqry->unit) { found = true; break; } } if (!found) { uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n", xqry->unit); return -ENOENT; } /* Find the control and perform delayed initialization if needed. */ found = false; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (ctrl->index == xqry->selector - 1) { found = true; break; } } if (!found) { uvc_dbg(chain->dev, CONTROL, "Control %pUl/%u not found\n", entity->guid, xqry->selector); return -ENOENT; } if (mutex_lock_interruptible(&chain->ctrl_mutex)) return -ERESTARTSYS; ret = uvc_ctrl_init_xu_ctrl(chain->dev, ctrl); if (ret < 0) { ret = -ENOENT; goto done; } /* Validate the required buffer size and flags for the request */ reqflags = 0; size = ctrl->info.size; switch (xqry->query) { case UVC_GET_CUR: reqflags = UVC_CTRL_FLAG_GET_CUR; break; case UVC_GET_MIN: reqflags = UVC_CTRL_FLAG_GET_MIN; break; case UVC_GET_MAX: reqflags = UVC_CTRL_FLAG_GET_MAX; break; case UVC_GET_DEF: reqflags = UVC_CTRL_FLAG_GET_DEF; break; case UVC_GET_RES: reqflags = UVC_CTRL_FLAG_GET_RES; break; case UVC_SET_CUR: reqflags = UVC_CTRL_FLAG_SET_CUR; break; case UVC_GET_LEN: size = 2; break; case UVC_GET_INFO: size = 1; break; default: ret = -EINVAL; goto done; } if (size != xqry->size) { ret = -ENOBUFS; goto done; } if (reqflags && !(ctrl->info.flags & reqflags)) { ret = -EBADRQC; goto done; } data = kmalloc(size, GFP_KERNEL); if (data == NULL) { ret = -ENOMEM; goto done; } if (xqry->query == UVC_SET_CUR && copy_from_user(data, xqry->data, size)) { ret = -EFAULT; goto done; } ret = uvc_query_ctrl(chain->dev, xqry->query, xqry->unit, chain->dev->intfnum, xqry->selector, data, size); if (ret < 0) goto done; if (xqry->query != UVC_SET_CUR && copy_to_user(xqry->data, data, size)) ret = -EFAULT; done: kfree(data); mutex_unlock(&chain->ctrl_mutex); return ret; } /* -------------------------------------------------------------------------- * Suspend/resume */ /* * Restore control values after resume, skipping controls that haven't been * changed. * * TODO * - Don't restore modified controls that are back to their default value. * - Handle restore order (Auto-Exposure Mode should be restored before * Exposure Time). */ int uvc_ctrl_restore_values(struct uvc_device *dev) { struct uvc_control *ctrl; struct uvc_entity *entity; unsigned int i; int ret; /* Walk the entities list and restore controls when possible. */ list_for_each_entry(entity, &dev->entities, list) { for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (!ctrl->initialized || !ctrl->modified || (ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0) continue; dev_dbg(&dev->udev->dev, "restoring control %pUl/%u/%u\n", ctrl->info.entity, ctrl->info.index, ctrl->info.selector); ctrl->dirty = 1; } ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL); if (ret < 0) return ret; } return 0; } /* -------------------------------------------------------------------------- * Control and mapping handling */ /* * Add control information to a given control. */ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_info *info) { ctrl->info = *info; INIT_LIST_HEAD(&ctrl->info.mappings); /* Allocate an array to save control values (cur, def, max, etc.) */ ctrl->uvc_data = kzalloc(ctrl->info.size * UVC_CTRL_DATA_LAST + 1, GFP_KERNEL); if (!ctrl->uvc_data) return -ENOMEM; ctrl->initialized = 1; uvc_dbg(dev, CONTROL, "Added control %pUl/%u to device %s entity %u\n", ctrl->info.entity, ctrl->info.selector, dev->udev->devpath, ctrl->entity->id); return 0; } /* * Add a control mapping to a given control. */ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain, struct uvc_control *ctrl, const struct uvc_control_mapping *mapping) { struct uvc_control_mapping *map; unsigned int size; unsigned int i; /* * Most mappings come from static kernel data, and need to be duplicated. * Mappings that come from userspace will be unnecessarily duplicated, * this could be optimized. */ map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL); if (!map) return -ENOMEM; map->name = NULL; map->menu_names = NULL; map->menu_mapping = NULL; /* For UVCIOC_CTRL_MAP custom control */ if (mapping->name) { map->name = kstrdup(mapping->name, GFP_KERNEL); if (!map->name) goto err_nomem; } INIT_LIST_HEAD(&map->ev_subs); if (mapping->menu_mapping && mapping->menu_mask) { size = sizeof(mapping->menu_mapping[0]) * fls(mapping->menu_mask); map->menu_mapping = kmemdup(mapping->menu_mapping, size, GFP_KERNEL); if (!map->menu_mapping) goto err_nomem; } if (mapping->menu_names && mapping->menu_mask) { size = sizeof(mapping->menu_names[0]) * fls(mapping->menu_mask); map->menu_names = kmemdup(mapping->menu_names, size, GFP_KERNEL); if (!map->menu_names) goto err_nomem; } if (map->get == NULL) map->get = uvc_get_le_value; if (map->set == NULL) map->set = uvc_set_le_value; for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) { if (V4L2_CTRL_ID2WHICH(uvc_control_classes[i]) == V4L2_CTRL_ID2WHICH(map->id)) { chain->ctrl_class_bitmap |= BIT(i); break; } } list_add_tail(&map->list, &ctrl->info.mappings); uvc_dbg(chain->dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n", uvc_map_get_name(map), ctrl->info.entity, ctrl->info.selector); return 0; err_nomem: kfree(map->menu_names); kfree(map->menu_mapping); kfree(map->name); kfree(map); return -ENOMEM; } int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, const struct uvc_control_mapping *mapping) { struct uvc_device *dev = chain->dev; struct uvc_control_mapping *map; struct uvc_entity *entity; struct uvc_control *ctrl; int found = 0; int ret; if (mapping->id & ~V4L2_CTRL_ID_MASK) { uvc_dbg(dev, CONTROL, "Can't add mapping '%s', control id 0x%08x is invalid\n", uvc_map_get_name(mapping), mapping->id); return -EINVAL; } /* Search for the matching (GUID/CS) control on the current chain */ list_for_each_entry(entity, &chain->entities, chain) { unsigned int i; if (UVC_ENTITY_TYPE(entity) != UVC_VC_EXTENSION_UNIT || !uvc_entity_match_guid(entity, mapping->entity)) continue; for (i = 0; i < entity->ncontrols; ++i) { ctrl = &entity->controls[i]; if (ctrl->index == mapping->selector - 1) { found = 1; break; } } if (found) break; } if (!found) return -ENOENT; if (mutex_lock_interruptible(&chain->ctrl_mutex)) return -ERESTARTSYS; /* Perform delayed initialization of XU controls */ ret = uvc_ctrl_init_xu_ctrl(dev, ctrl); if (ret < 0) { ret = -ENOENT; goto done; } /* Validate the user-provided bit-size and offset */ if (mapping->size > 32 || mapping->offset + mapping->size > ctrl->info.size * 8) { ret = -EINVAL; goto done; } list_for_each_entry(map, &ctrl->info.mappings, list) { if (mapping->id == map->id) { uvc_dbg(dev, CONTROL, "Can't add mapping '%s', control id 0x%08x already exists\n", uvc_map_get_name(mapping), mapping->id); ret = -EEXIST; goto done; } } /* Prevent excess memory consumption */ if (atomic_inc_return(&dev->nmappings) > UVC_MAX_CONTROL_MAPPINGS) { atomic_dec(&dev->nmappings); uvc_dbg(dev, CONTROL, "Can't add mapping '%s', maximum mappings count (%u) exceeded\n", uvc_map_get_name(mapping), UVC_MAX_CONTROL_MAPPINGS); ret = -ENOMEM; goto done; } ret = __uvc_ctrl_add_mapping(chain, ctrl, mapping); if (ret < 0) atomic_dec(&dev->nmappings); done: mutex_unlock(&chain->ctrl_mutex); return ret; } /* * Prune an entity of its bogus controls using a blacklist. Bogus controls * are currently the ones that crash the camera or unconditionally return an * error when queried. */ static void uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity) { struct uvc_ctrl_blacklist { struct usb_device_id id; u8 index; }; static const struct uvc_ctrl_blacklist processing_blacklist[] = { { { USB_DEVICE(0x13d3, 0x509b) }, 9 }, /* Gain */ { { USB_DEVICE(0x1c4f, 0x3000) }, 6 }, /* WB Temperature */ { { USB_DEVICE(0x5986, 0x0241) }, 2 }, /* Hue */ }; static const struct uvc_ctrl_blacklist camera_blacklist[] = { { { USB_DEVICE(0x06f8, 0x3005) }, 9 }, /* Zoom, Absolute */ }; const struct uvc_ctrl_blacklist *blacklist; unsigned int size; unsigned int count; unsigned int i; u8 *controls; switch (UVC_ENTITY_TYPE(entity)) { case UVC_VC_PROCESSING_UNIT: blacklist = processing_blacklist; count = ARRAY_SIZE(processing_blacklist); controls = entity->processing.bmControls; size = entity->processing.bControlSize; break; case UVC_ITT_CAMERA: blacklist = camera_blacklist; count = ARRAY_SIZE(camera_blacklist); controls = entity->camera.bmControls; size = entity->camera.bControlSize; break; default: return; } for (i = 0; i < count; ++i) { if (!usb_match_one_id(dev->intf, &blacklist[i].id)) continue; if (blacklist[i].index >= 8 * size || !uvc_test_bit(controls, blacklist[i].index)) continue; uvc_dbg(dev, CONTROL, "%u/%u control is black listed, removing it\n", entity->id, blacklist[i].index); uvc_clear_bit(controls, blacklist[i].index); } } /* * Add control information and hardcoded stock control mappings to the given * device. */ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain, struct uvc_control *ctrl) { const struct uvc_control_mapping **mappings; unsigned int i; /* * XU controls initialization requires querying the device for control * information. As some buggy UVC devices will crash when queried * repeatedly in a tight loop, delay XU controls initialization until * first use. */ if (UVC_ENTITY_TYPE(ctrl->entity) == UVC_VC_EXTENSION_UNIT) return; for (i = 0; i < ARRAY_SIZE(uvc_ctrls); ++i) { const struct uvc_control_info *info = &uvc_ctrls[i]; if (uvc_entity_match_guid(ctrl->entity, info->entity) && ctrl->index == info->index) { uvc_ctrl_add_info(chain->dev, ctrl, info); /* * Retrieve control flags from the device. Ignore errors * and work with default flag values from the uvc_ctrl * array when the device doesn't properly implement * GET_INFO on standard controls. */ uvc_ctrl_get_flags(chain->dev, ctrl, &ctrl->info); break; } } if (!ctrl->initialized) return; /* * First check if the device provides a custom mapping for this control, * used to override standard mappings for non-conformant devices. Don't * process standard mappings if a custom mapping is found. This * mechanism doesn't support combining standard and custom mappings for * a single control. */ if (chain->dev->info->mappings) { bool custom = false; for (i = 0; chain->dev->info->mappings[i]; ++i) { const struct uvc_control_mapping *mapping = chain->dev->info->mappings[i]; if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && ctrl->info.selector == mapping->selector) { __uvc_ctrl_add_mapping(chain, ctrl, mapping); custom = true; } } if (custom) return; } /* Process common mappings next. */ for (i = 0; i < ARRAY_SIZE(uvc_ctrl_mappings); ++i) { const struct uvc_control_mapping *mapping = &uvc_ctrl_mappings[i]; if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && ctrl->info.selector == mapping->selector) __uvc_ctrl_add_mapping(chain, ctrl, mapping); } /* Finally process version-specific mappings. */ mappings = chain->dev->uvc_version < 0x0150 ? uvc_ctrl_mappings_uvc11 : uvc_ctrl_mappings_uvc15; for (i = 0; mappings[i]; ++i) { const struct uvc_control_mapping *mapping = mappings[i]; if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && ctrl->info.selector == mapping->selector) __uvc_ctrl_add_mapping(chain, ctrl, mapping); } } /* * Initialize device controls. */ static int uvc_ctrl_init_chain(struct uvc_video_chain *chain) { struct uvc_entity *entity; unsigned int i; /* Walk the entities list and instantiate controls */ list_for_each_entry(entity, &chain->entities, chain) { struct uvc_control *ctrl; unsigned int bControlSize = 0, ncontrols; u8 *bmControls = NULL; if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT) { bmControls = entity->extension.bmControls; bControlSize = entity->extension.bControlSize; } else if (UVC_ENTITY_TYPE(entity) == UVC_VC_PROCESSING_UNIT) { bmControls = entity->processing.bmControls; bControlSize = entity->processing.bControlSize; } else if (UVC_ENTITY_TYPE(entity) == UVC_ITT_CAMERA) { bmControls = entity->camera.bmControls; bControlSize = entity->camera.bControlSize; } else if (UVC_ENTITY_TYPE(entity) == UVC_EXT_GPIO_UNIT) { bmControls = entity->gpio.bmControls; bControlSize = entity->gpio.bControlSize; } /* Remove bogus/blacklisted controls */ uvc_ctrl_prune_entity(chain->dev, entity); /* Count supported controls and allocate the controls array */ ncontrols = memweight(bmControls, bControlSize); if (ncontrols == 0) continue; entity->controls = kcalloc(ncontrols, sizeof(*ctrl), GFP_KERNEL); if (entity->controls == NULL) return -ENOMEM; entity->ncontrols = ncontrols; /* Initialize all supported controls */ ctrl = entity->controls; for (i = 0; i < bControlSize * 8; ++i) { if (uvc_test_bit(bmControls, i) == 0) continue; ctrl->entity = entity; ctrl->index = i; uvc_ctrl_init_ctrl(chain, ctrl); ctrl++; } } return 0; } int uvc_ctrl_init_device(struct uvc_device *dev) { struct uvc_video_chain *chain; int ret; INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work); list_for_each_entry(chain, &dev->chains, list) { ret = uvc_ctrl_init_chain(chain); if (ret) return ret; } return 0; } /* * Cleanup device controls. */ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev, struct uvc_control *ctrl) { struct uvc_control_mapping *mapping, *nm; list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) { list_del(&mapping->list); kfree(mapping->menu_names); kfree(mapping->menu_mapping); kfree(mapping->name); kfree(mapping); } } void uvc_ctrl_cleanup_device(struct uvc_device *dev) { struct uvc_entity *entity; unsigned int i; /* Can be uninitialized if we are aborting on probe error. */ if (dev->async_ctrl.work.func) cancel_work_sync(&dev->async_ctrl.work); /* Free controls and control mappings for all entities. */ list_for_each_entry(entity, &dev->entities, list) { for (i = 0; i < entity->ncontrols; ++i) { struct uvc_control *ctrl = &entity->controls[i]; if (!ctrl->initialized) continue; uvc_ctrl_cleanup_mappings(dev, ctrl); kfree(ctrl->uvc_data); } kfree(entity->controls); } }
2 2 2 2 2 9 9 9 7 9 7 7 9 2 2 2 2 7 2 7 7 2 2 7 7 7 7 7 7 7 7 7 7 7 7 7 7 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 1 1 1 1 1 1 1 7 7 7 2 7 7 1 1 7 7 7 5 2 7 1 4 1 4 4 4 4 4 4 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 7 2 2 5 5 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 // SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/checkpoint.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/fs.h> #include <linux/bio.h> #include <linux/mpage.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/f2fs_fs.h> #include <linux/pagevec.h> #include <linux/swap.h> #include <linux/kthread.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "iostat.h" #include <trace/events/f2fs.h> #define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3)) static struct kmem_cache *ino_entry_slab; struct kmem_cache *f2fs_inode_entry_slab; void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, unsigned char reason) { f2fs_build_fault_attr(sbi, 0, 0); if (!end_io) f2fs_flush_merged_writes(sbi); f2fs_handle_critical_error(sbi, reason, end_io); } /* * We guarantee no failure on the returned page. */ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = META_MAPPING(sbi); struct page *page; repeat: page = f2fs_grab_cache_page(mapping, index, false); if (!page) { cond_resched(); goto repeat; } f2fs_wait_on_page_writeback(page, META, true, true); if (!PageUptodate(page)) SetPageUptodate(page); return page; } static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, bool is_meta) { struct address_space *mapping = META_MAPPING(sbi); struct page *page; struct f2fs_io_info fio = { .sbi = sbi, .type = META, .op = REQ_OP_READ, .op_flags = REQ_META | REQ_PRIO, .old_blkaddr = index, .new_blkaddr = index, .encrypted_page = NULL, .is_por = !is_meta ? 1 : 0, }; int err; if (unlikely(!is_meta)) fio.op_flags &= ~REQ_META; repeat: page = f2fs_grab_cache_page(mapping, index, false); if (!page) { cond_resched(); goto repeat; } if (PageUptodate(page)) goto out; fio.page = page; err = f2fs_submit_page_bio(&fio); if (err) { f2fs_put_page(page, 1); return ERR_PTR(err); } f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); lock_page(page); if (unlikely(page->mapping != mapping)) { f2fs_put_page(page, 1); goto repeat; } if (unlikely(!PageUptodate(page))) { f2fs_handle_page_eio(sbi, page->index, META); f2fs_put_page(page, 1); return ERR_PTR(-EIO); } out: return page; } struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { return __get_meta_page(sbi, index, true); } struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index) { struct page *page; int count = 0; retry: page = __get_meta_page(sbi, index, true); if (IS_ERR(page)) { if (PTR_ERR(page) == -EIO && ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE); } return page; } /* for POR only */ struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index) { return __get_meta_page(sbi, index, false); } static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { struct seg_entry *se; unsigned int segno, offset; bool exist; if (type == DATA_GENERIC) return true; segno = GET_SEGNO(sbi, blkaddr); offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); se = get_seg_entry(sbi, segno); exist = f2fs_test_bit(offset, se->cur_valid_map); /* skip data, if we already have an error in checkpoint. */ if (unlikely(f2fs_cp_error(sbi))) return exist; if ((exist && type == DATA_GENERIC_ENHANCE_UPDATE) || (!exist && type == DATA_GENERIC_ENHANCE)) goto out_err; if (!exist && type != DATA_GENERIC_ENHANCE_UPDATE) goto out_handle; return exist; out_err: f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d", blkaddr, exist); set_sbi_flag(sbi, SBI_NEED_FSCK); dump_stack(); out_handle: f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); return exist; } static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { switch (type) { case META_NAT: break; case META_SIT: if (unlikely(blkaddr >= SIT_BLK_CNT(sbi))) goto err; break; case META_SSA: if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) || blkaddr < SM_I(sbi)->ssa_blkaddr)) goto err; break; case META_CP: if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr || blkaddr < __start_cp_addr(sbi))) goto err; break; case META_POR: if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || blkaddr < MAIN_BLKADDR(sbi))) goto err; break; case DATA_GENERIC: case DATA_GENERIC_ENHANCE: case DATA_GENERIC_ENHANCE_READ: case DATA_GENERIC_ENHANCE_UPDATE: if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || blkaddr < MAIN_BLKADDR(sbi))) { /* Skip to emit an error message. */ if (unlikely(f2fs_cp_error(sbi))) return false; f2fs_warn(sbi, "access invalid blkaddr:%u", blkaddr); set_sbi_flag(sbi, SBI_NEED_FSCK); dump_stack(); goto err; } else { return __is_bitmap_valid(sbi, blkaddr, type); } break; case META_GENERIC: if (unlikely(blkaddr < SEG0_BLKADDR(sbi) || blkaddr >= MAIN_BLKADDR(sbi))) goto err; break; default: BUG(); } return true; err: f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); return false; } bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY)) return false; return __f2fs_is_valid_blkaddr(sbi, blkaddr, type); } bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { return __f2fs_is_valid_blkaddr(sbi, blkaddr, type); } /* * Readahead CP/NAT/SIT/SSA/POR pages */ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type, bool sync) { struct page *page; block_t blkno = start; struct f2fs_io_info fio = { .sbi = sbi, .type = META, .op = REQ_OP_READ, .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD, .encrypted_page = NULL, .in_list = 0, .is_por = (type == META_POR) ? 1 : 0, }; struct blk_plug plug; int err; if (unlikely(type == META_POR)) fio.op_flags &= ~REQ_META; blk_start_plug(&plug); for (; nrpages-- > 0; blkno++) { if (!f2fs_is_valid_blkaddr(sbi, blkno, type)) goto out; switch (type) { case META_NAT: if (unlikely(blkno >= NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) blkno = 0; /* get nat block addr */ fio.new_blkaddr = current_nat_addr(sbi, blkno * NAT_ENTRY_PER_BLOCK); break; case META_SIT: if (unlikely(blkno >= TOTAL_SEGS(sbi))) goto out; /* get sit block addr */ fio.new_blkaddr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK); break; case META_SSA: case META_CP: case META_POR: fio.new_blkaddr = blkno; break; default: BUG(); } page = f2fs_grab_cache_page(META_MAPPING(sbi), fio.new_blkaddr, false); if (!page) continue; if (PageUptodate(page)) { f2fs_put_page(page, 1); continue; } fio.page = page; err = f2fs_submit_page_bio(&fio); f2fs_put_page(page, err ? 1 : 0); if (!err) f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE); } out: blk_finish_plug(&plug); return blkno - start; } void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, unsigned int ra_blocks) { struct page *page; bool readahead = false; if (ra_blocks == RECOVERY_MIN_RA_BLOCKS) return; page = find_get_page(META_MAPPING(sbi), index); if (!page || !PageUptodate(page)) readahead = true; f2fs_put_page(page, 0); if (readahead) f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true); } static int __f2fs_write_meta_page(struct page *page, struct writeback_control *wbc, enum iostat_type io_type) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); trace_f2fs_writepage(page, META); if (unlikely(f2fs_cp_error(sbi))) { if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) { ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_META); unlock_page(page); return 0; } goto redirty_out; } if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) goto redirty_out; f2fs_do_write_meta_page(sbi, page, io_type); dec_page_count(sbi, F2FS_DIRTY_META); if (wbc->for_reclaim) f2fs_submit_merged_write_cond(sbi, NULL, page, 0, META); unlock_page(page); if (unlikely(f2fs_cp_error(sbi))) f2fs_submit_merged_write(sbi, META); return 0; redirty_out: redirty_page_for_writepage(wbc, page); return AOP_WRITEPAGE_ACTIVATE; } static int f2fs_write_meta_page(struct page *page, struct writeback_control *wbc) { return __f2fs_write_meta_page(page, wbc, FS_META_IO); } static int f2fs_write_meta_pages(struct address_space *mapping, struct writeback_control *wbc) { struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); long diff, written; if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto skip_write; /* collect a number of dirty meta pages and write together */ if (wbc->sync_mode != WB_SYNC_ALL && get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META)) goto skip_write; /* if locked failed, cp will flush dirty pages instead */ if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, META); diff = nr_pages_to_write(sbi, META, wbc); written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); f2fs_up_write(&sbi->cp_global_sem); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); return 0; skip_write: wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META); trace_f2fs_writepages(mapping->host, wbc, META); return 0; } long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, long nr_to_write, enum iostat_type io_type) { struct address_space *mapping = META_MAPPING(sbi); pgoff_t index = 0, prev = ULONG_MAX; struct folio_batch fbatch; long nwritten = 0; int nr_folios; struct writeback_control wbc = { .for_reclaim = 0, }; struct blk_plug plug; folio_batch_init(&fbatch); blk_start_plug(&plug); while ((nr_folios = filemap_get_folios_tag(mapping, &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, &fbatch))) { int i; for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; if (nr_to_write != LONG_MAX && i != 0 && folio->index != prev + folio_nr_pages(fbatch.folios[i-1])) { folio_batch_release(&fbatch); goto stop; } folio_lock(folio); if (unlikely(folio->mapping != mapping)) { continue_unlock: folio_unlock(folio); continue; } if (!folio_test_dirty(folio)) { /* someone wrote it for us */ goto continue_unlock; } f2fs_wait_on_page_writeback(&folio->page, META, true, true); if (!folio_clear_dirty_for_io(folio)) goto continue_unlock; if (__f2fs_write_meta_page(&folio->page, &wbc, io_type)) { folio_unlock(folio); break; } nwritten += folio_nr_pages(folio); prev = folio->index; if (unlikely(nwritten >= nr_to_write)) break; } folio_batch_release(&fbatch); cond_resched(); } stop: if (nwritten) f2fs_submit_merged_write(sbi, type); blk_finish_plug(&plug); return nwritten; } static bool f2fs_dirty_meta_folio(struct address_space *mapping, struct folio *folio) { trace_f2fs_set_page_dirty(&folio->page, META); if (!folio_test_uptodate(folio)) folio_mark_uptodate(folio); if (filemap_dirty_folio(mapping, folio)) { inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META); set_page_private_reference(&folio->page); return true; } return false; } const struct address_space_operations f2fs_meta_aops = { .writepage = f2fs_write_meta_page, .writepages = f2fs_write_meta_pages, .dirty_folio = f2fs_dirty_meta_folio, .invalidate_folio = f2fs_invalidate_folio, .release_folio = f2fs_release_folio, .migrate_folio = filemap_migrate_folio, }; static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type) { struct inode_management *im = &sbi->im[type]; struct ino_entry *e = NULL, *new = NULL; if (type == FLUSH_INO) { rcu_read_lock(); e = radix_tree_lookup(&im->ino_root, ino); rcu_read_unlock(); } retry: if (!e) new = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS, true, NULL); radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (!e) { if (!new) { spin_unlock(&im->ino_lock); radix_tree_preload_end(); goto retry; } e = new; if (unlikely(radix_tree_insert(&im->ino_root, ino, e))) f2fs_bug_on(sbi, 1); memset(e, 0, sizeof(struct ino_entry)); e->ino = ino; list_add_tail(&e->list, &im->ino_list); if (type != ORPHAN_INO) im->ino_num++; } if (type == FLUSH_INO) f2fs_set_bit(devidx, (char *)&e->dirty_device); spin_unlock(&im->ino_lock); radix_tree_preload_end(); if (new && e != new) kmem_cache_free(ino_entry_slab, new); } static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { struct inode_management *im = &sbi->im[type]; struct ino_entry *e; spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (e) { list_del(&e->list); radix_tree_delete(&im->ino_root, ino); im->ino_num--; spin_unlock(&im->ino_lock); kmem_cache_free(ino_entry_slab, e); return; } spin_unlock(&im->ino_lock); } void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { /* add new dirty ino entry into list */ __add_ino_entry(sbi, ino, 0, type); } void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { /* remove dirty ino entry from list */ __remove_ino_entry(sbi, ino, type); } /* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */ bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) { struct inode_management *im = &sbi->im[mode]; struct ino_entry *e; spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); spin_unlock(&im->ino_lock); return e ? true : false; } void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all) { struct ino_entry *e, *tmp; int i; for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) { struct inode_management *im = &sbi->im[i]; spin_lock(&im->ino_lock); list_for_each_entry_safe(e, tmp, &im->ino_list, list) { list_del(&e->list); radix_tree_delete(&im->ino_root, e->ino); kmem_cache_free(ino_entry_slab, e); im->ino_num--; } spin_unlock(&im->ino_lock); } } void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type) { __add_ino_entry(sbi, ino, devidx, type); } bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type) { struct inode_management *im = &sbi->im[type]; struct ino_entry *e; bool is_dirty = false; spin_lock(&im->ino_lock); e = radix_tree_lookup(&im->ino_root, ino); if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device)) is_dirty = true; spin_unlock(&im->ino_lock); return is_dirty; } int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi) { struct inode_management *im = &sbi->im[ORPHAN_INO]; int err = 0; spin_lock(&im->ino_lock); if (time_to_inject(sbi, FAULT_ORPHAN)) { spin_unlock(&im->ino_lock); return -ENOSPC; } if (unlikely(im->ino_num >= sbi->max_orphans)) err = -ENOSPC; else im->ino_num++; spin_unlock(&im->ino_lock); return err; } void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi) { struct inode_management *im = &sbi->im[ORPHAN_INO]; spin_lock(&im->ino_lock); f2fs_bug_on(sbi, im->ino_num == 0); im->ino_num--; spin_unlock(&im->ino_lock); } void f2fs_add_orphan_inode(struct inode *inode) { /* add new orphan ino entry into list */ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO); f2fs_update_inode_page(inode); } void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) { /* remove orphan entry from orphan list */ __remove_ino_entry(sbi, ino, ORPHAN_INO); } static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode; struct node_info ni; int err; inode = f2fs_iget_retry(sbi->sb, ino); if (IS_ERR(inode)) { /* * there should be a bug that we can't find the entry * to orphan inode. */ f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT); return PTR_ERR(inode); } err = f2fs_dquot_initialize(inode); if (err) { iput(inode); goto err_out; } clear_nlink(inode); /* truncate all the data during iput */ iput(inode); err = f2fs_get_node_info(sbi, ino, &ni, false); if (err) goto err_out; /* ENOMEM was fully retried in f2fs_evict_inode. */ if (ni.blk_addr != NULL_ADDR) { err = -EIO; goto err_out; } return 0; err_out: set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.", __func__, ino); return err; } int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi) { block_t start_blk, orphan_blocks, i, j; int err = 0; if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) return 0; if (f2fs_hw_is_readonly(sbi)) { f2fs_info(sbi, "write access unavailable, skipping orphan cleanup"); return 0; } if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) f2fs_info(sbi, "orphan cleanup on readonly fs"); start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi); orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi); f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true); for (i = 0; i < orphan_blocks; i++) { struct page *page; struct f2fs_orphan_block *orphan_blk; page = f2fs_get_meta_page(sbi, start_blk + i); if (IS_ERR(page)) { err = PTR_ERR(page); goto out; } orphan_blk = (struct f2fs_orphan_block *)page_address(page); for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) { nid_t ino = le32_to_cpu(orphan_blk->ino[j]); err = recover_orphan_inode(sbi, ino); if (err) { f2fs_put_page(page, 1); goto out; } } f2fs_put_page(page, 1); } /* clear Orphan Flag */ clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG); out: set_sbi_flag(sbi, SBI_IS_RECOVERED); return err; } static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) { struct list_head *head; struct f2fs_orphan_block *orphan_blk = NULL; unsigned int nentries = 0; unsigned short index = 1; unsigned short orphan_blocks; struct page *page = NULL; struct ino_entry *orphan = NULL; struct inode_management *im = &sbi->im[ORPHAN_INO]; orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num); /* * we don't need to do spin_lock(&im->ino_lock) here, since all the * orphan inode operations are covered under f2fs_lock_op(). * And, spin_lock should be avoided due to page operations below. */ head = &im->ino_list; /* loop for each orphan inode entry and write them in journal block */ list_for_each_entry(orphan, head, list) { if (!page) { page = f2fs_grab_meta_page(sbi, start_blk++); orphan_blk = (struct f2fs_orphan_block *)page_address(page); memset(orphan_blk, 0, sizeof(*orphan_blk)); } orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino); if (nentries == F2FS_ORPHANS_PER_BLOCK) { /* * an orphan block is full of 1020 entries, * then we need to flush current orphan blocks * and bring another one in memory */ orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); set_page_dirty(page); f2fs_put_page(page, 1); index++; nentries = 0; page = NULL; } } if (page) { orphan_blk->blk_addr = cpu_to_le16(index); orphan_blk->blk_count = cpu_to_le16(orphan_blocks); orphan_blk->entry_count = cpu_to_le32(nentries); set_page_dirty(page); f2fs_put_page(page, 1); } } static __u32 f2fs_checkpoint_chksum(struct f2fs_sb_info *sbi, struct f2fs_checkpoint *ckpt) { unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset); __u32 chksum; chksum = f2fs_crc32(sbi, ckpt, chksum_ofs); if (chksum_ofs < CP_CHKSUM_OFFSET) { chksum_ofs += sizeof(chksum); chksum = f2fs_chksum(sbi, chksum, (__u8 *)ckpt + chksum_ofs, F2FS_BLKSIZE - chksum_ofs); } return chksum; } static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, struct f2fs_checkpoint **cp_block, struct page **cp_page, unsigned long long *version) { size_t crc_offset = 0; __u32 crc; *cp_page = f2fs_get_meta_page(sbi, cp_addr); if (IS_ERR(*cp_page)) return PTR_ERR(*cp_page); *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page); crc_offset = le32_to_cpu((*cp_block)->checksum_offset); if (crc_offset < CP_MIN_CHKSUM_OFFSET || crc_offset > CP_CHKSUM_OFFSET) { f2fs_put_page(*cp_page, 1); f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset); return -EINVAL; } crc = f2fs_checkpoint_chksum(sbi, *cp_block); if (crc != cur_cp_crc(*cp_block)) { f2fs_put_page(*cp_page, 1); f2fs_warn(sbi, "invalid crc value"); return -EINVAL; } *version = cur_cp_version(*cp_block); return 0; } static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr, unsigned long long *version) { struct page *cp_page_1 = NULL, *cp_page_2 = NULL; struct f2fs_checkpoint *cp_block = NULL; unsigned long long cur_version = 0, pre_version = 0; unsigned int cp_blocks; int err; err = get_checkpoint_version(sbi, cp_addr, &cp_block, &cp_page_1, version); if (err) return NULL; cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count); if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) { f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u", le32_to_cpu(cp_block->cp_pack_total_block_count)); goto invalid_cp; } pre_version = *version; cp_addr += cp_blocks - 1; err = get_checkpoint_version(sbi, cp_addr, &cp_block, &cp_page_2, version); if (err) goto invalid_cp; cur_version = *version; if (cur_version == pre_version) { *version = cur_version; f2fs_put_page(cp_page_2, 1); return cp_page_1; } f2fs_put_page(cp_page_2, 1); invalid_cp: f2fs_put_page(cp_page_1, 1); return NULL; } int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *cp_block; struct f2fs_super_block *fsb = sbi->raw_super; struct page *cp1, *cp2, *cur_page; unsigned long blk_size = sbi->blocksize; unsigned long long cp1_version = 0, cp2_version = 0; unsigned long long cp_start_blk_no; unsigned int cp_blks = 1 + __cp_payload(sbi); block_t cp_blk_no; int i; int err; sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks), GFP_KERNEL); if (!sbi->ckpt) return -ENOMEM; /* * Finding out valid cp block involves read both * sets( cp pack 1 and cp pack 2) */ cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr); cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); /* The second checkpoint pack should start at the next segment */ cp_start_blk_no += ((unsigned long long)1) << le32_to_cpu(fsb->log_blocks_per_seg); cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); if (cp1 && cp2) { if (ver_after(cp2_version, cp1_version)) cur_page = cp2; else cur_page = cp1; } else if (cp1) { cur_page = cp1; } else if (cp2) { cur_page = cp2; } else { err = -EFSCORRUPTED; goto fail_no_cp; } cp_block = (struct f2fs_checkpoint *)page_address(cur_page); memcpy(sbi->ckpt, cp_block, blk_size); if (cur_page == cp1) sbi->cur_cp_pack = 1; else sbi->cur_cp_pack = 2; /* Sanity checking of checkpoint */ if (f2fs_sanity_check_ckpt(sbi)) { err = -EFSCORRUPTED; goto free_fail_no_cp; } if (cp_blks <= 1) goto done; cp_blk_no = le32_to_cpu(fsb->cp_blkaddr); if (cur_page == cp2) cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg)); for (i = 1; i < cp_blks; i++) { void *sit_bitmap_ptr; unsigned char *ckpt = (unsigned char *)sbi->ckpt; cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); if (IS_ERR(cur_page)) { err = PTR_ERR(cur_page); goto free_fail_no_cp; } sit_bitmap_ptr = page_address(cur_page); memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size); f2fs_put_page(cur_page, 1); } done: f2fs_put_page(cp1, 1); f2fs_put_page(cp2, 1); return 0; free_fail_no_cp: f2fs_put_page(cp1, 1); f2fs_put_page(cp2, 1); fail_no_cp: kvfree(sbi->ckpt); return err; } static void __add_dirty_inode(struct inode *inode, enum inode_type type) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE; if (is_inode_flag_set(inode, flag)) return; set_inode_flag(inode, flag); list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]); stat_inc_dirty_inode(sbi, type); } static void __remove_dirty_inode(struct inode *inode, enum inode_type type) { int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE; if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag)) return; list_del_init(&F2FS_I(inode)->dirty_list); clear_inode_flag(inode, flag); stat_dec_dirty_inode(F2FS_I_SB(inode), type); } void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return; spin_lock(&sbi->inode_lock[type]); if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) __add_dirty_inode(inode, type); inode_inc_dirty_pages(inode); spin_unlock(&sbi->inode_lock[type]); set_page_private_reference(&folio->page); } void f2fs_remove_dirty_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return; if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) return; spin_lock(&sbi->inode_lock[type]); __remove_dirty_inode(inode, type); spin_unlock(&sbi->inode_lock[type]); } int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, bool from_cp) { struct list_head *head; struct inode *inode; struct f2fs_inode_info *fi; bool is_dir = (type == DIR_INODE); unsigned long ino = 0; trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir, get_pages(sbi, is_dir ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); retry: if (unlikely(f2fs_cp_error(sbi))) { trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, get_pages(sbi, is_dir ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); return -EIO; } spin_lock(&sbi->inode_lock[type]); head = &sbi->inode_list[type]; if (list_empty(head)) { spin_unlock(&sbi->inode_lock[type]); trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir, get_pages(sbi, is_dir ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); return 0; } fi = list_first_entry(head, struct f2fs_inode_info, dirty_list); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[type]); if (inode) { unsigned long cur_ino = inode->i_ino; if (from_cp) F2FS_I(inode)->cp_task = current; F2FS_I(inode)->wb_task = current; filemap_fdatawrite(inode->i_mapping); F2FS_I(inode)->wb_task = NULL; if (from_cp) F2FS_I(inode)->cp_task = NULL; iput(inode); /* We need to give cpu to another writers. */ if (ino == cur_ino) cond_resched(); else ino = cur_ino; } else { /* * We should submit bio, since it exists several * writebacking dentry pages in the freeing inode. */ f2fs_submit_merged_write(sbi, DATA); cond_resched(); } goto retry; } static int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) { struct list_head *head = &sbi->inode_list[DIRTY_META]; struct inode *inode; struct f2fs_inode_info *fi; s64 total = get_pages(sbi, F2FS_DIRTY_IMETA); while (total--) { if (unlikely(f2fs_cp_error(sbi))) return -EIO; spin_lock(&sbi->inode_lock[DIRTY_META]); if (list_empty(head)) { spin_unlock(&sbi->inode_lock[DIRTY_META]); return 0; } fi = list_first_entry(head, struct f2fs_inode_info, gdirty_list); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[DIRTY_META]); if (inode) { sync_inode_metadata(inode, 0); /* it's on eviction */ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) f2fs_update_inode_page(inode); iput(inode); } } return 0; } static void __prepare_cp_block(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); nid_t last_nid = nm_i->next_scan_nid; next_free_nid(sbi, &last_nid); ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi)); ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi)); ckpt->next_free_nid = cpu_to_le32(last_nid); } static bool __need_flush_quota(struct f2fs_sb_info *sbi) { bool ret = false; if (!is_journalled_quota(sbi)) return false; if (!f2fs_down_write_trylock(&sbi->quota_sem)) return true; if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { ret = false; } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) { ret = false; } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) { clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); ret = true; } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { ret = true; } f2fs_up_write(&sbi->quota_sem); return ret; } /* * Freeze all the FS-operations for checkpoint. */ static int block_operations(struct f2fs_sb_info *sbi) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; int err = 0, cnt = 0; /* * Let's flush inline_data in dirty node pages. */ f2fs_flush_inline_data(sbi); retry_flush_quotas: f2fs_lock_all(sbi); if (__need_flush_quota(sbi)) { int locked; if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) { set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); goto retry_flush_dents; } f2fs_unlock_all(sbi); /* only failed during mount/umount/freeze/quotactl */ locked = down_read_trylock(&sbi->sb->s_umount); f2fs_quota_sync(sbi->sb, -1); if (locked) up_read(&sbi->sb->s_umount); cond_resched(); goto retry_flush_quotas; } retry_flush_dents: /* write all the dirty dentry pages */ if (get_pages(sbi, F2FS_DIRTY_DENTS)) { f2fs_unlock_all(sbi); err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true); if (err) return err; cond_resched(); goto retry_flush_quotas; } /* * POR: we should ensure that there are no dirty node pages * until finishing nat/sit flush. inode->i_blocks can be updated. */ f2fs_down_write(&sbi->node_change); if (get_pages(sbi, F2FS_DIRTY_IMETA)) { f2fs_up_write(&sbi->node_change); f2fs_unlock_all(sbi); err = f2fs_sync_inode_meta(sbi); if (err) return err; cond_resched(); goto retry_flush_quotas; } retry_flush_nodes: f2fs_down_write(&sbi->node_write); if (get_pages(sbi, F2FS_DIRTY_NODES)) { f2fs_up_write(&sbi->node_write); atomic_inc(&sbi->wb_sync_req[NODE]); err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); atomic_dec(&sbi->wb_sync_req[NODE]); if (err) { f2fs_up_write(&sbi->node_change); f2fs_unlock_all(sbi); return err; } cond_resched(); goto retry_flush_nodes; } /* * sbi->node_change is used only for AIO write_begin path which produces * dirty node blocks and some checkpoint values by block allocation. */ __prepare_cp_block(sbi); f2fs_up_write(&sbi->node_change); return err; } static void unblock_operations(struct f2fs_sb_info *sbi) { f2fs_up_write(&sbi->node_write); f2fs_unlock_all(sbi); } void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) { DEFINE_WAIT(wait); for (;;) { if (!get_pages(sbi, type)) break; if (unlikely(f2fs_cp_error(sbi) && !is_sbi_flag_set(sbi, SBI_IS_CLOSE))) break; if (type == F2FS_DIRTY_META) f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); else if (type == F2FS_WB_CP_DATA) f2fs_submit_merged_write(sbi, DATA); prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); io_schedule_timeout(DEFAULT_IO_TIMEOUT); } finish_wait(&sbi->cp_wait, &wait); } static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) { unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num; struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long flags; if (cpc->reason & CP_UMOUNT) { if (le32_to_cpu(ckpt->cp_pack_total_block_count) + NM_I(sbi)->nat_bits_blocks > BLKS_PER_SEG(sbi)) { clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); f2fs_notice(sbi, "Disable nat_bits due to no space"); } else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) && f2fs_nat_bitmap_enabled(sbi)) { f2fs_enable_nat_bits(sbi); set_ckpt_flags(sbi, CP_NAT_BITS_FLAG); f2fs_notice(sbi, "Rebuild and enable nat_bits"); } } spin_lock_irqsave(&sbi->cp_lock, flags); if (cpc->reason & CP_TRIMMED) __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG); else __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG); if (cpc->reason & CP_UMOUNT) __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); else __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); if (cpc->reason & CP_FASTBOOT) __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG); else __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG); if (orphan_num) __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); else __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) __set_ckpt_flags(ckpt, CP_FSCK_FLAG); if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS)) __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); else __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG); if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) __set_ckpt_flags(ckpt, CP_DISABLED_FLAG); else __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG); if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK)) __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG); else __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG); if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); else __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); /* set this flag to activate crc|cp_ver for recovery */ __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG); __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG); spin_unlock_irqrestore(&sbi->cp_lock, flags); } static void commit_checkpoint(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { struct writeback_control wbc = { .for_reclaim = 0, }; /* * filemap_get_folios_tag and lock_page again will take * some extra time. Therefore, f2fs_update_meta_pages and * f2fs_sync_meta_pages are combined in this function. */ struct page *page = f2fs_grab_meta_page(sbi, blk_addr); int err; f2fs_wait_on_page_writeback(page, META, true, true); memcpy(page_address(page), src, PAGE_SIZE); set_page_dirty(page); if (unlikely(!clear_page_dirty_for_io(page))) f2fs_bug_on(sbi, 1); /* writeout cp pack 2 page */ err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO); if (unlikely(err && f2fs_cp_error(sbi))) { f2fs_put_page(page, 1); return; } f2fs_bug_on(sbi, err); f2fs_put_page(page, 0); /* submit checkpoint (with barrier if NOBARRIER is not set) */ f2fs_submit_merged_write(sbi, META_FLUSH); } static inline u64 get_sectors_written(struct block_device *bdev) { return (u64)part_stat_read(bdev, sectors[STAT_WRITE]); } u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi) { if (f2fs_is_multi_device(sbi)) { u64 sectors = 0; int i; for (i = 0; i < sbi->s_ndevs; i++) sectors += get_sectors_written(FDEV(i).bdev); return sectors; } return get_sectors_written(sbi->sb->s_bdev); } static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_nm_info *nm_i = NM_I(sbi); unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags; block_t start_blk; unsigned int data_sum_blocks, orphan_blocks; __u32 crc32 = 0; int i; int cp_payload_blks = __cp_payload(sbi); struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); u64 kbytes_written; int err; /* Flush all the NAT/SIT pages */ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); /* start to update checkpoint, cp ver is already updated previously */ ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true)); ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_NODE); ckpt->cur_node_segno[i] = cpu_to_le32(curseg->segno); ckpt->cur_node_blkoff[i] = cpu_to_le16(curseg->next_blkoff); ckpt->alloc_type[i + CURSEG_HOT_NODE] = curseg->alloc_type; } for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_DATA); ckpt->cur_data_segno[i] = cpu_to_le32(curseg->segno); ckpt->cur_data_blkoff[i] = cpu_to_le16(curseg->next_blkoff); ckpt->alloc_type[i + CURSEG_HOT_DATA] = curseg->alloc_type; } /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false); spin_lock_irqsave(&sbi->cp_lock, flags); if (data_sum_blocks < NR_CURSEG_DATA_TYPE) __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); spin_unlock_irqrestore(&sbi->cp_lock, flags); orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num); ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + orphan_blocks); if (__remain_node_summaries(cpc->reason)) ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + cp_payload_blks + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); else ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + cp_payload_blks + data_sum_blocks + orphan_blocks); /* update ckpt flag for checkpoint */ update_ckpt_flags(sbi, cpc); /* update SIT/NAT bitmap */ get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP)); get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP)); crc32 = f2fs_checkpoint_chksum(sbi, ckpt); *((__le32 *)((unsigned char *)ckpt + le32_to_cpu(ckpt->checksum_offset))) = cpu_to_le32(crc32); start_blk = __start_cp_next_addr(sbi); /* write nat bits */ if ((cpc->reason & CP_UMOUNT) && is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) { __u64 cp_ver = cur_cp_version(ckpt); block_t blk; cp_ver |= ((__u64)crc32 << 32); *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver); blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks; for (i = 0; i < nm_i->nat_bits_blocks; i++) f2fs_update_meta_page(sbi, nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), blk + i); } /* write out checkpoint buffer at block 0 */ f2fs_update_meta_page(sbi, ckpt, start_blk++); for (i = 1; i < 1 + cp_payload_blks; i++) f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE, start_blk++); if (orphan_num) { write_orphan_inodes(sbi, start_blk); start_blk += orphan_blocks; } f2fs_write_data_summaries(sbi, start_blk); start_blk += data_sum_blocks; /* Record write statistics in the hot node summary */ kbytes_written = sbi->kbytes_written; kbytes_written += (f2fs_get_sectors_written(sbi) - sbi->sectors_written_start) >> 1; seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written); if (__remain_node_summaries(cpc->reason)) { f2fs_write_node_summaries(sbi, start_blk); start_blk += NR_CURSEG_NODE_TYPE; } /* update user_block_counts */ sbi->last_valid_block_count = sbi->total_valid_block_count; percpu_counter_set(&sbi->alloc_valid_block_count, 0); percpu_counter_set(&sbi->rf_node_block_count, 0); /* Here, we have one bio having CP pack except cp pack 2 page */ f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); /* Wait for all dirty meta pages to be submitted for IO */ f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META); /* wait for previous submitted meta pages writeback */ f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); /* flush all device cache */ err = f2fs_flush_device_cache(sbi); if (err) return err; /* barrier and flush checkpoint cp pack 2 page if it can */ commit_checkpoint(sbi, ckpt, start_blk); f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); /* * invalidate intermediate page cache borrowed from meta inode which are * used for migration of encrypted, verity or compressed inode's blocks. */ if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) || f2fs_sb_has_compression(sbi)) f2fs_bug_on(sbi, invalidate_inode_pages2_range(META_MAPPING(sbi), MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1)); f2fs_release_ino_entry(sbi, false); f2fs_reset_fsync_node_info(sbi); clear_sbi_flag(sbi, SBI_IS_DIRTY); clear_sbi_flag(sbi, SBI_NEED_CP); clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); spin_lock(&sbi->stat_lock); sbi->unusable_block_count = 0; spin_unlock(&sbi->stat_lock); __set_cp_next_pack(sbi); /* * redirty superblock if metadata like node page or inode cache is * updated during writing checkpoint. */ if (get_pages(sbi, F2FS_DIRTY_NODES) || get_pages(sbi, F2FS_DIRTY_IMETA)) set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS)); return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0; } int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); unsigned long long ckpt_ver; int err = 0; if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi)) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { if (cpc->reason != CP_PAUSE) return 0; f2fs_warn(sbi, "Start checkpoint disabled!"); } if (cpc->reason != CP_RESIZE) f2fs_down_write(&sbi->cp_global_sem); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || ((cpc->reason & CP_DISCARD) && !sbi->discard_blks))) goto out; if (unlikely(f2fs_cp_error(sbi))) { err = -EIO; goto out; } trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops"); err = block_operations(sbi); if (err) goto out; trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish block_ops"); f2fs_flush_merged_writes(sbi); /* this is the case of multiple fstrims without any changes */ if (cpc->reason & CP_DISCARD) { if (!f2fs_exist_trim_candidates(sbi, cpc)) { unblock_operations(sbi); goto out; } if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 && SIT_I(sbi)->dirty_sentries == 0 && prefree_segments(sbi) == 0) { f2fs_flush_sit_entries(sbi, cpc); f2fs_clear_prefree_segments(sbi, cpc); unblock_operations(sbi); goto out; } } /* * update checkpoint pack index * Increase the version number so that * SIT entries and seg summaries are written at correct place */ ckpt_ver = cur_cp_version(ckpt); ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); /* write cached NAT/SIT entries to NAT/SIT area */ err = f2fs_flush_nat_entries(sbi, cpc); if (err) { f2fs_err(sbi, "f2fs_flush_nat_entries failed err:%d, stop checkpoint", err); f2fs_bug_on(sbi, !f2fs_cp_error(sbi)); goto stop; } f2fs_flush_sit_entries(sbi, cpc); /* save inmem log status */ f2fs_save_inmem_curseg(sbi); err = do_checkpoint(sbi, cpc); if (err) { f2fs_err(sbi, "do_checkpoint failed err:%d, stop checkpoint", err); f2fs_bug_on(sbi, !f2fs_cp_error(sbi)); f2fs_release_discard_addrs(sbi); } else { f2fs_clear_prefree_segments(sbi, cpc); } f2fs_restore_inmem_curseg(sbi); stat_inc_cp_count(sbi); stop: unblock_operations(sbi); if (cpc->reason & CP_RECOVERY) f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver); /* update CP_TIME to trigger checkpoint periodically */ f2fs_update_time(sbi, CP_TIME); trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); out: if (cpc->reason != CP_RESIZE) f2fs_up_write(&sbi->cp_global_sem); return err; } void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi) { int i; for (i = 0; i < MAX_INO_ENTRY; i++) { struct inode_management *im = &sbi->im[i]; INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC); spin_lock_init(&im->ino_lock); INIT_LIST_HEAD(&im->ino_list); im->ino_num = 0; } sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS - NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) * F2FS_ORPHANS_PER_BLOCK; } int __init f2fs_create_checkpoint_caches(void) { ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry", sizeof(struct ino_entry)); if (!ino_entry_slab) return -ENOMEM; f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry", sizeof(struct inode_entry)); if (!f2fs_inode_entry_slab) { kmem_cache_destroy(ino_entry_slab); return -ENOMEM; } return 0; } void f2fs_destroy_checkpoint_caches(void) { kmem_cache_destroy(ino_entry_slab); kmem_cache_destroy(f2fs_inode_entry_slab); } static int __write_checkpoint_sync(struct f2fs_sb_info *sbi) { struct cp_control cpc = { .reason = CP_SYNC, }; int err; f2fs_down_write(&sbi->gc_lock); err = f2fs_write_checkpoint(sbi, &cpc); f2fs_up_write(&sbi->gc_lock); return err; } static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; struct ckpt_req *req, *next; struct llist_node *dispatch_list; u64 sum_diff = 0, diff, count = 0; int ret; dispatch_list = llist_del_all(&cprc->issue_list); if (!dispatch_list) return; dispatch_list = llist_reverse_order(dispatch_list); ret = __write_checkpoint_sync(sbi); atomic_inc(&cprc->issued_ckpt); llist_for_each_entry_safe(req, next, dispatch_list, llnode) { diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time); req->ret = ret; complete(&req->wait); sum_diff += diff; count++; } atomic_sub(count, &cprc->queued_ckpt); atomic_add(count, &cprc->total_ckpt); spin_lock(&cprc->stat_lock); cprc->cur_time = (unsigned int)div64_u64(sum_diff, count); if (cprc->peak_time < cprc->cur_time) cprc->peak_time = cprc->cur_time; spin_unlock(&cprc->stat_lock); } static int issue_checkpoint_thread(void *data) { struct f2fs_sb_info *sbi = data; struct ckpt_req_control *cprc = &sbi->cprc_info; wait_queue_head_t *q = &cprc->ckpt_wait_queue; repeat: if (kthread_should_stop()) return 0; if (!llist_empty(&cprc->issue_list)) __checkpoint_and_complete_reqs(sbi); wait_event_interruptible(*q, kthread_should_stop() || !llist_empty(&cprc->issue_list)); goto repeat; } static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi, struct ckpt_req *wait_req) { struct ckpt_req_control *cprc = &sbi->cprc_info; if (!llist_empty(&cprc->issue_list)) { __checkpoint_and_complete_reqs(sbi); } else { /* already dispatched by issue_checkpoint_thread */ if (wait_req) wait_for_completion(&wait_req->wait); } } static void init_ckpt_req(struct ckpt_req *req) { memset(req, 0, sizeof(struct ckpt_req)); init_completion(&req->wait); req->queue_time = ktime_get(); } int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; struct ckpt_req req; struct cp_control cpc; cpc.reason = __get_cp_reason(sbi); if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { int ret; f2fs_down_write(&sbi->gc_lock); ret = f2fs_write_checkpoint(sbi, &cpc); f2fs_up_write(&sbi->gc_lock); return ret; } if (!cprc->f2fs_issue_ckpt) return __write_checkpoint_sync(sbi); init_ckpt_req(&req); llist_add(&req.llnode, &cprc->issue_list); atomic_inc(&cprc->queued_ckpt); /* * update issue_list before we wake up issue_checkpoint thread, * this smp_mb() pairs with another barrier in ___wait_event(), * see more details in comments of waitqueue_active(). */ smp_mb(); if (waitqueue_active(&cprc->ckpt_wait_queue)) wake_up(&cprc->ckpt_wait_queue); if (cprc->f2fs_issue_ckpt) wait_for_completion(&req.wait); else flush_remained_ckpt_reqs(sbi, &req); return req.ret; } int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct ckpt_req_control *cprc = &sbi->cprc_info; if (cprc->f2fs_issue_ckpt) return 0; cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi, "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(cprc->f2fs_issue_ckpt)) { int err = PTR_ERR(cprc->f2fs_issue_ckpt); cprc->f2fs_issue_ckpt = NULL; return err; } set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio); return 0; } void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; struct task_struct *ckpt_task; if (!cprc->f2fs_issue_ckpt) return; ckpt_task = cprc->f2fs_issue_ckpt; cprc->f2fs_issue_ckpt = NULL; kthread_stop(ckpt_task); f2fs_flush_ckpt_thread(sbi); } void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; flush_remained_ckpt_reqs(sbi, NULL); /* Let's wait for the previous dispatched checkpoint. */ while (atomic_read(&cprc->queued_ckpt)) io_schedule_timeout(DEFAULT_IO_TIMEOUT); } void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi) { struct ckpt_req_control *cprc = &sbi->cprc_info; atomic_set(&cprc->issued_ckpt, 0); atomic_set(&cprc->total_ckpt, 0); atomic_set(&cprc->queued_ckpt, 0); cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO; init_waitqueue_head(&cprc->ckpt_wait_queue); init_llist_head(&cprc->issue_list); spin_lock_init(&cprc->stat_lock); }
6 6 6 6 6 6 6 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 // SPDX-License-Identifier: GPL-2.0-only /* * GCM: Galois/Counter Mode. * * Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <mh1@iki.fi> */ #include <crypto/gf128mul.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/hash.h> #include <crypto/null.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include <crypto/hash.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> struct gcm_instance_ctx { struct crypto_skcipher_spawn ctr; struct crypto_ahash_spawn ghash; }; struct crypto_gcm_ctx { struct crypto_skcipher *ctr; struct crypto_ahash *ghash; }; struct crypto_rfc4106_ctx { struct crypto_aead *child; u8 nonce[4]; }; struct crypto_rfc4106_req_ctx { struct scatterlist src[3]; struct scatterlist dst[3]; struct aead_request subreq; }; struct crypto_rfc4543_instance_ctx { struct crypto_aead_spawn aead; }; struct crypto_rfc4543_ctx { struct crypto_aead *child; struct crypto_sync_skcipher *null; u8 nonce[4]; }; struct crypto_rfc4543_req_ctx { struct aead_request subreq; }; struct crypto_gcm_ghash_ctx { unsigned int cryptlen; struct scatterlist *src; int (*complete)(struct aead_request *req, u32 flags); }; struct crypto_gcm_req_priv_ctx { u8 iv[16]; u8 auth_tag[16]; u8 iauth_tag[16]; struct scatterlist src[3]; struct scatterlist dst[3]; struct scatterlist sg; struct crypto_gcm_ghash_ctx ghash_ctx; union { struct ahash_request ahreq; struct skcipher_request skreq; } u; }; static struct { u8 buf[16]; struct scatterlist sg; } *gcm_zeroes; static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx( struct aead_request *req) { unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); } static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_ahash *ghash = ctx->ghash; struct crypto_skcipher *ctr = ctx->ctr; struct { be128 hash; u8 iv[16]; struct crypto_wait wait; struct scatterlist sg[1]; struct skcipher_request req; } *data; int err; crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(ctr, key, keylen); if (err) return err; data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr), GFP_KERNEL); if (!data) return -ENOMEM; crypto_init_wait(&data->wait); sg_init_one(data->sg, &data->hash, sizeof(data->hash)); skcipher_request_set_tfm(&data->req, ctr); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &data->wait); skcipher_request_set_crypt(&data->req, data->sg, data->sg, sizeof(data->hash), data->iv); err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait); if (err) goto out; crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128)); out: kfree_sensitive(data); return err; } static int crypto_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { return crypto_gcm_check_authsize(authsize); } static void crypto_gcm_init_common(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __be32 counter = cpu_to_be32(1); struct scatterlist *sg; memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag)); memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE); memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4); sg_init_table(pctx->src, 3); sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen); if (sg != pctx->src + 1) sg_chain(pctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(pctx->dst, 3); sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag)); sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen); if (sg != pctx->dst + 1) sg_chain(pctx->dst, 2, sg); } } static void crypto_gcm_init_crypt(struct aead_request *req, unsigned int cryptlen) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; struct scatterlist *dst; dst = req->src == req->dst ? pctx->src : pctx->dst; skcipher_request_set_tfm(skreq, ctx->ctr); skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + sizeof(pctx->auth_tag), pctx->iv); } static inline unsigned int gcm_remain(unsigned int len) { len &= 0xfU; return len ? 16 - len : 0; } static void gcm_hash_len_done(void *data, int err); static int gcm_hash_update(struct aead_request *req, crypto_completion_t compl, struct scatterlist *src, unsigned int len, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; ahash_request_set_callback(ahreq, flags, compl, req); ahash_request_set_crypt(ahreq, src, NULL, len); return crypto_ahash_update(ahreq); } static int gcm_hash_remain(struct aead_request *req, unsigned int remain, crypto_completion_t compl, u32 flags) { return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags); } static int gcm_hash_len(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; be128 lengths; lengths.a = cpu_to_be64(req->assoclen * 8); lengths.b = cpu_to_be64(gctx->cryptlen * 8); memcpy(pctx->iauth_tag, &lengths, 16); sg_init_one(&pctx->sg, pctx->iauth_tag, 16); ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req); ahash_request_set_crypt(ahreq, &pctx->sg, pctx->iauth_tag, sizeof(lengths)); return crypto_ahash_finup(ahreq); } static int gcm_hash_len_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; return gctx->complete(req, flags); } static void gcm_hash_len_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_len_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags) { return gcm_hash_len(req, flags) ?: gcm_hash_len_continue(req, flags); } static void gcm_hash_crypt_remain_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_crypt_remain_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int remain; remain = gcm_remain(gctx->cryptlen); if (remain) return gcm_hash_remain(req, remain, gcm_hash_crypt_remain_done, flags) ?: gcm_hash_crypt_remain_continue(req, flags); return gcm_hash_crypt_remain_continue(req, flags); } static void gcm_hash_crypt_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_crypt_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; if (gctx->cryptlen) return gcm_hash_update(req, gcm_hash_crypt_done, gctx->src, gctx->cryptlen, flags) ?: gcm_hash_crypt_continue(req, flags); return gcm_hash_crypt_remain_continue(req, flags); } static void gcm_hash_assoc_remain_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_assoc_remain_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags) { unsigned int remain; remain = gcm_remain(req->assoclen); if (remain) return gcm_hash_remain(req, remain, gcm_hash_assoc_remain_done, flags) ?: gcm_hash_assoc_remain_continue(req, flags); return gcm_hash_assoc_remain_continue(req, flags); } static void gcm_hash_assoc_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_assoc_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash_init_continue(struct aead_request *req, u32 flags) { if (req->assoclen) return gcm_hash_update(req, gcm_hash_assoc_done, req->src, req->assoclen, flags) ?: gcm_hash_assoc_continue(req, flags); return gcm_hash_assoc_remain_continue(req, flags); } static void gcm_hash_init_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_hash_init_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int gcm_hash(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct ahash_request *ahreq = &pctx->u.ahreq; struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); ahash_request_set_tfm(ahreq, ctx->ghash); ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req); return crypto_ahash_init(ahreq) ?: gcm_hash_init_continue(req, flags); } static int gcm_enc_copy_hash(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); u8 *auth_tag = pctx->auth_tag; crypto_xor(auth_tag, pctx->iauth_tag, 16); scatterwalk_map_and_copy(auth_tag, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); return 0; } static int gcm_encrypt_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst); gctx->cryptlen = req->cryptlen; gctx->complete = gcm_enc_copy_hash; return gcm_hash(req, flags); } static void gcm_encrypt_done(void *data, int err) { struct aead_request *req = data; if (err) goto out; err = gcm_encrypt_continue(req, 0); if (err == -EINPROGRESS) return; out: aead_request_complete(req, err); } static int crypto_gcm_encrypt(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; u32 flags = aead_request_flags(req); crypto_gcm_init_common(req); crypto_gcm_init_crypt(req, req->cryptlen); skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req); return crypto_skcipher_encrypt(skreq) ?: gcm_encrypt_continue(req, flags); } static int crypto_gcm_verify(struct aead_request *req) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); u8 *auth_tag = pctx->auth_tag; u8 *iauth_tag = pctx->iauth_tag; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen - authsize; crypto_xor(auth_tag, iauth_tag, 16); scatterwalk_map_and_copy(iauth_tag, req->src, req->assoclen + cryptlen, authsize, 0); return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0; } static void gcm_decrypt_done(void *data, int err) { struct aead_request *req = data; if (!err) err = crypto_gcm_verify(req); aead_request_complete(req, err); } static int gcm_dec_hash_continue(struct aead_request *req, u32 flags) { struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct skcipher_request *skreq = &pctx->u.skreq; struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; crypto_gcm_init_crypt(req, gctx->cryptlen); skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req); return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req); } static int crypto_gcm_decrypt(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx; unsigned int authsize = crypto_aead_authsize(aead); unsigned int cryptlen = req->cryptlen; u32 flags = aead_request_flags(req); cryptlen -= authsize; crypto_gcm_init_common(req); gctx->src = sg_next(pctx->src); gctx->cryptlen = cryptlen; gctx->complete = gcm_dec_hash_continue; return gcm_hash(req, flags); } static int crypto_gcm_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct gcm_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *ctr; struct crypto_ahash *ghash; unsigned long align; int err; ghash = crypto_spawn_ahash(&ictx->ghash); if (IS_ERR(ghash)) return PTR_ERR(ghash); ctr = crypto_spawn_skcipher(&ictx->ctr); err = PTR_ERR(ctr); if (IS_ERR(ctr)) goto err_free_hash; ctx->ctr = ctr; ctx->ghash = ghash; align = crypto_aead_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize(tfm, align + offsetof(struct crypto_gcm_req_priv_ctx, u) + max(sizeof(struct skcipher_request) + crypto_skcipher_reqsize(ctr), sizeof(struct ahash_request) + crypto_ahash_reqsize(ghash))); return 0; err_free_hash: crypto_free_ahash(ghash); return err; } static void crypto_gcm_exit_tfm(struct crypto_aead *tfm) { struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->ghash); crypto_free_skcipher(ctx->ctr); } static void crypto_gcm_free(struct aead_instance *inst) { struct gcm_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->ctr); crypto_drop_ahash(&ctx->ghash); kfree(inst); } static int crypto_gcm_create_common(struct crypto_template *tmpl, struct rtattr **tb, const char *ctr_name, const char *ghash_name) { struct skcipher_alg_common *ctr; u32 mask; struct aead_instance *inst; struct gcm_instance_ctx *ctx; struct hash_alg_common *ghash; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst), ghash_name, 0, mask); if (err) goto err_free_inst; ghash = crypto_spawn_ahash_alg(&ctx->ghash); err = -EINVAL; if (strcmp(ghash->base.cra_name, "ghash") != 0 || ghash->digestsize != 16) goto err_free_inst; err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst), ctr_name, 0, mask); if (err) goto err_free_inst; ctr = crypto_spawn_skcipher_alg_common(&ctx->ctr); /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */ err = -EINVAL; if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 || ctr->ivsize != 16 || ctr->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->base.cra_driver_name, ghash->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = (ghash->base.cra_priority + ctr->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = ctr->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.ivsize = GCM_AES_IV_SIZE; inst->alg.chunksize = ctr->chunksize; inst->alg.maxauthsize = 16; inst->alg.init = crypto_gcm_init_tfm; inst->alg.exit = crypto_gcm_exit_tfm; inst->alg.setkey = crypto_gcm_setkey; inst->alg.setauthsize = crypto_gcm_setauthsize; inst->alg.encrypt = crypto_gcm_encrypt; inst->alg.decrypt = crypto_gcm_decrypt; inst->free = crypto_gcm_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_gcm_free(inst); } return err; } static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *cipher_name; char ctr_name[CRYPTO_MAX_ALG_NAME]; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash"); } static int crypto_gcm_base_create(struct crypto_template *tmpl, struct rtattr **tb) { const char *ctr_name; const char *ghash_name; ctr_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ctr_name)) return PTR_ERR(ctr_name); ghash_name = crypto_attr_alg_name(tb[2]); if (IS_ERR(ghash_name)) return PTR_ERR(ghash_name); return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name); } static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->nonce, key + keylen, 4); crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4106_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent); int err; err = crypto_rfc4106_check_authsize(authsize); if (err) return err; return crypto_aead_setauthsize(ctx->child, authsize); } static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) { struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead); struct aead_request *subreq = &rctx->subreq; struct crypto_aead *child = ctx->child; struct scatterlist *sg; u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child), crypto_aead_alignmask(child) + 1); scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0); memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (req->src != req->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8); sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_request_set_tfm(subreq, child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, rctx->src, req->src == req->dst ? rctx->src : rctx->dst, req->cryptlen, iv); aead_request_set_ad(subreq, req->assoclen - 8); return subreq; } static int crypto_rfc4106_encrypt(struct aead_request *req) { int err; err = crypto_ipsec_check_assoclen(req->assoclen); if (err) return err; req = crypto_rfc4106_crypt(req); return crypto_aead_encrypt(req); } static int crypto_rfc4106_decrypt(struct aead_request *req) { int err; err = crypto_ipsec_check_assoclen(req->assoclen); if (err) return err; req = crypto_rfc4106_crypt(req); return crypto_aead_decrypt(req); } static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct crypto_aead_spawn *spawn = aead_instance_ctx(inst); struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; unsigned long align; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); ctx->child = aead; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, sizeof(struct crypto_rfc4106_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + 24); return 0; } static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm) { struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static void crypto_rfc4106_free(struct aead_instance *inst) { crypto_drop_aead(aead_instance_ctx(inst)); kfree(inst); } static int crypto_rfc4106_create(struct crypto_template *tmpl, struct rtattr **tb) { u32 mask; struct aead_instance *inst; struct crypto_aead_spawn *spawn; struct aead_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = aead_instance_ctx(inst); err = crypto_grab_aead(spawn, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(spawn); err = -EINVAL; /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4106(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4106(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx); inst->alg.ivsize = GCM_RFC4106_IV_SIZE; inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4106_init_tfm; inst->alg.exit = crypto_rfc4106_exit_tfm; inst->alg.setkey = crypto_rfc4106_setkey; inst->alg.setauthsize = crypto_rfc4106_setauthsize; inst->alg.encrypt = crypto_rfc4106_encrypt; inst->alg.decrypt = crypto_rfc4106_decrypt; inst->free = crypto_rfc4106_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_rfc4106_free(inst); } return err; } static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); struct crypto_aead *child = ctx->child; if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->nonce, key + keylen, 4); crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(child, crypto_aead_get_flags(parent) & CRYPTO_TFM_REQ_MASK); return crypto_aead_setkey(child, key, keylen); } static int crypto_rfc4543_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent); if (authsize != 16) return -EINVAL; return crypto_aead_setauthsize(ctx->child, authsize); } static int crypto_rfc4543_crypt(struct aead_request *req, bool enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req); struct aead_request *subreq = &rctx->subreq; unsigned int authsize = crypto_aead_authsize(aead); u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child), crypto_aead_alignmask(ctx->child) + 1); int err; if (req->src != req->dst) { err = crypto_rfc4543_copy_src_to_dst(req, enc); if (err) return err; } memcpy(iv, ctx->nonce, 4); memcpy(iv + 4, req->iv, 8); aead_request_set_tfm(subreq, ctx->child); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, enc ? 0 : authsize, iv); aead_request_set_ad(subreq, req->assoclen + req->cryptlen - subreq->cryptlen); return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq); } static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead); unsigned int authsize = crypto_aead_authsize(aead); unsigned int nbytes = req->assoclen + req->cryptlen - (enc ? 0 : authsize); SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); skcipher_request_set_sync_tfm(nreq, ctx->null); skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); return crypto_skcipher_encrypt(nreq); } static int crypto_rfc4543_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: crypto_rfc4543_crypt(req, true); } static int crypto_rfc4543_decrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ?: crypto_rfc4543_crypt(req, false); } static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct crypto_rfc4543_instance_ctx *ictx = aead_instance_ctx(inst); struct crypto_aead_spawn *spawn = &ictx->aead; struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *aead; struct crypto_sync_skcipher *null; unsigned long align; int err = 0; aead = crypto_spawn_aead(spawn); if (IS_ERR(aead)) return PTR_ERR(aead); null = crypto_get_default_null_skcipher(); err = PTR_ERR(null); if (IS_ERR(null)) goto err_free_aead; ctx->child = aead; ctx->null = null; align = crypto_aead_alignmask(aead); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, sizeof(struct crypto_rfc4543_req_ctx) + ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) + align + GCM_AES_IV_SIZE); return 0; err_free_aead: crypto_free_aead(aead); return err; } static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm) { struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); crypto_put_default_null_skcipher(); } static void crypto_rfc4543_free(struct aead_instance *inst) { struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->aead); kfree(inst); } static int crypto_rfc4543_create(struct crypto_template *tmpl, struct rtattr **tb) { u32 mask; struct aead_instance *inst; struct aead_alg *alg; struct crypto_rfc4543_instance_ctx *ctx; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; alg = crypto_spawn_aead_alg(&ctx->aead); err = -EINVAL; /* Underlying IV size must be 12. */ if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (alg->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4543(%s)", alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = alg->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx); inst->alg.ivsize = GCM_RFC4543_IV_SIZE; inst->alg.chunksize = crypto_aead_alg_chunksize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.init = crypto_rfc4543_init_tfm; inst->alg.exit = crypto_rfc4543_exit_tfm; inst->alg.setkey = crypto_rfc4543_setkey; inst->alg.setauthsize = crypto_rfc4543_setauthsize; inst->alg.encrypt = crypto_rfc4543_encrypt; inst->alg.decrypt = crypto_rfc4543_decrypt; inst->free = crypto_rfc4543_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: crypto_rfc4543_free(inst); } return err; } static struct crypto_template crypto_gcm_tmpls[] = { { .name = "gcm_base", .create = crypto_gcm_base_create, .module = THIS_MODULE, }, { .name = "gcm", .create = crypto_gcm_create, .module = THIS_MODULE, }, { .name = "rfc4106", .create = crypto_rfc4106_create, .module = THIS_MODULE, }, { .name = "rfc4543", .create = crypto_rfc4543_create, .module = THIS_MODULE, }, }; static int __init crypto_gcm_module_init(void) { int err; gcm_zeroes = kzalloc(sizeof(*gcm_zeroes), GFP_KERNEL); if (!gcm_zeroes) return -ENOMEM; sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf)); err = crypto_register_templates(crypto_gcm_tmpls, ARRAY_SIZE(crypto_gcm_tmpls)); if (err) kfree(gcm_zeroes); return err; } static void __exit crypto_gcm_module_exit(void) { kfree(gcm_zeroes); crypto_unregister_templates(crypto_gcm_tmpls, ARRAY_SIZE(crypto_gcm_tmpls)); } subsys_initcall(crypto_gcm_module_init); module_exit(crypto_gcm_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Galois/Counter Mode"); MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>"); MODULE_ALIAS_CRYPTO("gcm_base"); MODULE_ALIAS_CRYPTO("rfc4106"); MODULE_ALIAS_CRYPTO("rfc4543"); MODULE_ALIAS_CRYPTO("gcm");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:ip,port,net type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Range as input support for IPv4 added */ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ /* 6 Forceadd support added */ /* 7 skbinfo support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip,port,net"); /* Type specific function prefix */ #define HTYPE hash_ipportnet /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS /* IPv4 variant */ /* Member elements */ struct hash_ipportnet4_elem { __be32 ip; __be32 ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, const struct hash_ipportnet4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) { elem->ip2 &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet4_data_list(struct sk_buff *skb, const struct hash_ipportnet4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next, const struct hash_ipportnet4_elem *d) { next->ip = d->ip; next->port = d->port; next->ip2 = d->ip2; } #define MTYPE hash_ipportnet4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2); e.ip2 &= ip_set_netmask(e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) { e.ip = htonl(ip); e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) return ret; if (ip2_from > ip2_to) swap(ip2_from, ip2_to); if (ip2_from + UINT_MAX == ip2_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); } if (retried) { ip = ntohl(h->next.ip); p = ntohs(h->next.port); ip2 = ntohl(h->next.ip2); } else { p = port; ip2 = ip2_from; } for (; ip <= ip_to; ip++) { e.ip = htonl(ip); for (; p <= port_to; p++) { e.port = htons(p); do { i++; e.ip2 = htonl(ip2); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; if (i > IPSET_MAX_RANGE) { hash_ipportnet4_data_next(&h->next, &e); return -ERANGE; } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip2++ < ip2_to); ip2 = ip2_from; } p = port; } return ret; } /* IPv6 variant */ struct hash_ipportnet6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, const struct hash_ipportnet6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip2, cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet6_data_list(struct sk_buff *skb, const struct hash_ipportnet6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet6_data_next(struct hash_ipportnet6_elem *next, const struct hash_ipportnet6_elem *d) { next->port = d->port; } #undef MTYPE #undef HOST_MASK #define MTYPE hash_ipportnet6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6); ip6_netmask(&e.ip2, e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (unlikely(tb[IPSET_ATTR_CIDR])) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr != HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } ip6_netmask(&e.ip2, e.cidr + 1); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } return ret; } static struct ip_set_type hash_ipportnet_type __read_mostly = { .name = "hash:ip,port,net", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_ipportnet_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ipportnet_init(void) { return ip_set_type_register(&hash_ipportnet_type); } static void __exit hash_ipportnet_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_ipportnet_type); } module_init(hash_ipportnet_init); module_exit(hash_ipportnet_fini);
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2015 Microchip Technology */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> #include <linux/linkmode.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/mdio.h> #include <linux/phy.h> #include <net/ip6_checksum.h> #include <net/vxlan.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/microchipphy.h> #include <linux/phy_fixed.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include "lan78xx.h" #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) #define UNLINK_TIMEOUT_MS 3 #define RX_MAX_QUEUE_MEMORY (60 * 1518) #define SS_USB_PKT_SIZE (1024) #define HS_USB_PKT_SIZE (512) #define FS_USB_PKT_SIZE (64) #define MAX_RX_FIFO_SIZE (12 * 1024) #define MAX_TX_FIFO_SIZE (12 * 1024) #define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F) #define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \ (FLOW_THRESHOLD(off) << 8)) /* Flow control turned on when Rx FIFO level rises above this level (bytes) */ #define FLOW_ON_SS 9216 #define FLOW_ON_HS 8704 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */ #define FLOW_OFF_SS 4096 #define FLOW_OFF_HS 1024 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE) #define DEFAULT_BULK_IN_DELAY (0x0800) #define MAX_SINGLE_PACKET_SIZE (9000) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) #define DEFAULT_TSO_CSUM_ENABLE (true) #define DEFAULT_VLAN_FILTER_ENABLE (true) #define DEFAULT_VLAN_RX_OFFLOAD (true) #define TX_ALIGNMENT (4) #define RXW_PADDING 2 #define LAN78XX_USB_VENDOR_ID (0x0424) #define LAN7800_USB_PRODUCT_ID (0x7800) #define LAN7850_USB_PRODUCT_ID (0x7850) #define LAN7801_USB_PRODUCT_ID (0x7801) #define LAN78XX_EEPROM_MAGIC (0x78A5) #define LAN78XX_OTP_MAGIC (0x78F3) #define AT29M2AF_USB_VENDOR_ID (0x07C9) #define AT29M2AF_USB_PRODUCT_ID (0x0012) #define MII_READ 1 #define MII_WRITE 0 #define EEPROM_INDICATOR (0xA5) #define EEPROM_MAC_OFFSET (0x01) #define MAX_EEPROM_SIZE 512 #define OTP_INDICATOR_1 (0xF3) #define OTP_INDICATOR_2 (0xF7) #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \ WAKE_MCAST | WAKE_BCAST | \ WAKE_ARP | WAKE_MAGIC) #define TX_URB_NUM 10 #define TX_SS_URB_NUM TX_URB_NUM #define TX_HS_URB_NUM TX_URB_NUM #define TX_FS_URB_NUM TX_URB_NUM /* A single URB buffer must be large enough to hold a complete jumbo packet */ #define TX_SS_URB_SIZE (32 * 1024) #define TX_HS_URB_SIZE (16 * 1024) #define TX_FS_URB_SIZE (10 * 1024) #define RX_SS_URB_NUM 30 #define RX_HS_URB_NUM 10 #define RX_FS_URB_NUM 10 #define RX_SS_URB_SIZE TX_SS_URB_SIZE #define RX_HS_URB_SIZE TX_HS_URB_SIZE #define RX_FS_URB_SIZE TX_FS_URB_SIZE #define SS_BURST_CAP_SIZE RX_SS_URB_SIZE #define SS_BULK_IN_DELAY 0x2000 #define HS_BURST_CAP_SIZE RX_HS_URB_SIZE #define HS_BULK_IN_DELAY 0x2000 #define FS_BURST_CAP_SIZE RX_FS_URB_SIZE #define FS_BULK_IN_DELAY 0x2000 #define TX_CMD_LEN 8 #define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN) #define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN) #define RX_CMD_LEN 10 #define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN) #define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN) /* USB related defines */ #define BULK_IN_PIPE 1 #define BULK_OUT_PIPE 2 /* default autosuspend delay (mSec)*/ #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000) /* statistic update interval (mSec) */ #define STAT_UPDATE_TIMER (1 * 1000) /* time to wait for MAC or FCT to stop (jiffies) */ #define HW_DISABLE_TIMEOUT (HZ / 10) /* time to wait between polling MAC or FCT state (ms) */ #define HW_DISABLE_DELAY_MS 1 /* defines interrupts from interrupt EP */ #define MAX_INT_EP (32) #define INT_EP_INTEP (31) #define INT_EP_OTP_WR_DONE (28) #define INT_EP_EEE_TX_LPI_START (26) #define INT_EP_EEE_TX_LPI_STOP (25) #define INT_EP_EEE_RX_LPI (24) #define INT_EP_MAC_RESET_TIMEOUT (23) #define INT_EP_RDFO (22) #define INT_EP_TXE (21) #define INT_EP_USB_STATUS (20) #define INT_EP_TX_DIS (19) #define INT_EP_RX_DIS (18) #define INT_EP_PHY (17) #define INT_EP_DP (16) #define INT_EP_MAC_ERR (15) #define INT_EP_TDFU (14) #define INT_EP_TDFO (13) #define INT_EP_UTX (12) #define INT_EP_GPIO_11 (11) #define INT_EP_GPIO_10 (10) #define INT_EP_GPIO_9 (9) #define INT_EP_GPIO_8 (8) #define INT_EP_GPIO_7 (7) #define INT_EP_GPIO_6 (6) #define INT_EP_GPIO_5 (5) #define INT_EP_GPIO_4 (4) #define INT_EP_GPIO_3 (3) #define INT_EP_GPIO_2 (2) #define INT_EP_GPIO_1 (1) #define INT_EP_GPIO_0 (0) static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { "RX FCS Errors", "RX Alignment Errors", "Rx Fragment Errors", "RX Jabber Errors", "RX Undersize Frame Errors", "RX Oversize Frame Errors", "RX Dropped Frames", "RX Unicast Byte Count", "RX Broadcast Byte Count", "RX Multicast Byte Count", "RX Unicast Frames", "RX Broadcast Frames", "RX Multicast Frames", "RX Pause Frames", "RX 64 Byte Frames", "RX 65 - 127 Byte Frames", "RX 128 - 255 Byte Frames", "RX 256 - 511 Bytes Frames", "RX 512 - 1023 Byte Frames", "RX 1024 - 1518 Byte Frames", "RX Greater 1518 Byte Frames", "EEE RX LPI Transitions", "EEE RX LPI Time", "TX FCS Errors", "TX Excess Deferral Errors", "TX Carrier Errors", "TX Bad Byte Count", "TX Single Collisions", "TX Multiple Collisions", "TX Excessive Collision", "TX Late Collisions", "TX Unicast Byte Count", "TX Broadcast Byte Count", "TX Multicast Byte Count", "TX Unicast Frames", "TX Broadcast Frames", "TX Multicast Frames", "TX Pause Frames", "TX 64 Byte Frames", "TX 65 - 127 Byte Frames", "TX 128 - 255 Byte Frames", "TX 256 - 511 Bytes Frames", "TX 512 - 1023 Byte Frames", "TX 1024 - 1518 Byte Frames", "TX Greater 1518 Byte Frames", "EEE TX LPI Transitions", "EEE TX LPI Time", }; struct lan78xx_statstage { u32 rx_fcs_errors; u32 rx_alignment_errors; u32 rx_fragment_errors; u32 rx_jabber_errors; u32 rx_undersize_frame_errors; u32 rx_oversize_frame_errors; u32 rx_dropped_frames; u32 rx_unicast_byte_count; u32 rx_broadcast_byte_count; u32 rx_multicast_byte_count; u32 rx_unicast_frames; u32 rx_broadcast_frames; u32 rx_multicast_frames; u32 rx_pause_frames; u32 rx_64_byte_frames; u32 rx_65_127_byte_frames; u32 rx_128_255_byte_frames; u32 rx_256_511_bytes_frames; u32 rx_512_1023_byte_frames; u32 rx_1024_1518_byte_frames; u32 rx_greater_1518_byte_frames; u32 eee_rx_lpi_transitions; u32 eee_rx_lpi_time; u32 tx_fcs_errors; u32 tx_excess_deferral_errors; u32 tx_carrier_errors; u32 tx_bad_byte_count; u32 tx_single_collisions; u32 tx_multiple_collisions; u32 tx_excessive_collision; u32 tx_late_collisions; u32 tx_unicast_byte_count; u32 tx_broadcast_byte_count; u32 tx_multicast_byte_count; u32 tx_unicast_frames; u32 tx_broadcast_frames; u32 tx_multicast_frames; u32 tx_pause_frames; u32 tx_64_byte_frames; u32 tx_65_127_byte_frames; u32 tx_128_255_byte_frames; u32 tx_256_511_bytes_frames; u32 tx_512_1023_byte_frames; u32 tx_1024_1518_byte_frames; u32 tx_greater_1518_byte_frames; u32 eee_tx_lpi_transitions; u32 eee_tx_lpi_time; }; struct lan78xx_statstage64 { u64 rx_fcs_errors; u64 rx_alignment_errors; u64 rx_fragment_errors; u64 rx_jabber_errors; u64 rx_undersize_frame_errors; u64 rx_oversize_frame_errors; u64 rx_dropped_frames; u64 rx_unicast_byte_count; u64 rx_broadcast_byte_count; u64 rx_multicast_byte_count; u64 rx_unicast_frames; u64 rx_broadcast_frames; u64 rx_multicast_frames; u64 rx_pause_frames; u64 rx_64_byte_frames; u64 rx_65_127_byte_frames; u64 rx_128_255_byte_frames; u64 rx_256_511_bytes_frames; u64 rx_512_1023_byte_frames; u64 rx_1024_1518_byte_frames; u64 rx_greater_1518_byte_frames; u64 eee_rx_lpi_transitions; u64 eee_rx_lpi_time; u64 tx_fcs_errors; u64 tx_excess_deferral_errors; u64 tx_carrier_errors; u64 tx_bad_byte_count; u64 tx_single_collisions; u64 tx_multiple_collisions; u64 tx_excessive_collision; u64 tx_late_collisions; u64 tx_unicast_byte_count; u64 tx_broadcast_byte_count; u64 tx_multicast_byte_count; u64 tx_unicast_frames; u64 tx_broadcast_frames; u64 tx_multicast_frames; u64 tx_pause_frames; u64 tx_64_byte_frames; u64 tx_65_127_byte_frames; u64 tx_128_255_byte_frames; u64 tx_256_511_bytes_frames; u64 tx_512_1023_byte_frames; u64 tx_1024_1518_byte_frames; u64 tx_greater_1518_byte_frames; u64 eee_tx_lpi_transitions; u64 eee_tx_lpi_time; }; static u32 lan78xx_regs[] = { ID_REV, INT_STS, HW_CFG, PMT_CTL, E2P_CMD, E2P_DATA, USB_STATUS, VLAN_TYPE, MAC_CR, MAC_RX, MAC_TX, FLOW, ERR_STS, MII_ACC, MII_DATA, EEE_TX_LPI_REQ_DLY, EEE_TW_TX_SYS, EEE_TX_LPI_REM_DLY, WUCSR }; #define PHY_REG_SIZE (32 * sizeof(u32)) struct lan78xx_net; struct lan78xx_priv { struct lan78xx_net *dev; u32 rfe_ctl; u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */ u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */ u32 vlan_table[DP_SEL_VHF_VLAN_LEN]; struct mutex dataport_mutex; /* for dataport access */ spinlock_t rfe_ctl_lock; /* for rfe register access */ struct work_struct set_multicast; struct work_struct set_vlan; u32 wol; }; enum skb_state { illegal = 0, tx_start, tx_done, rx_start, rx_done, rx_cleanup, unlink_start }; struct skb_data { /* skb->cb is one of these */ struct urb *urb; struct lan78xx_net *dev; enum skb_state state; size_t length; int num_of_packet; }; struct usb_context { struct usb_ctrlrequest req; struct lan78xx_net *dev; }; #define EVENT_TX_HALT 0 #define EVENT_RX_HALT 1 #define EVENT_RX_MEMORY 2 #define EVENT_STS_SPLIT 3 #define EVENT_LINK_RESET 4 #define EVENT_RX_PAUSED 5 #define EVENT_DEV_WAKING 6 #define EVENT_DEV_ASLEEP 7 #define EVENT_DEV_OPEN 8 #define EVENT_STAT_UPDATE 9 #define EVENT_DEV_DISCONNECT 10 struct statstage { struct mutex access_lock; /* for stats access */ struct lan78xx_statstage saved; struct lan78xx_statstage rollover_count; struct lan78xx_statstage rollover_max; struct lan78xx_statstage64 curr_stat; }; struct irq_domain_data { struct irq_domain *irqdomain; unsigned int phyirq; struct irq_chip *irqchip; irq_flow_handler_t irq_handler; u32 irqenable; struct mutex irq_lock; /* for irq bus access */ }; struct lan78xx_net { struct net_device *net; struct usb_device *udev; struct usb_interface *intf; void *driver_priv; unsigned int tx_pend_data_len; size_t n_tx_urbs; size_t n_rx_urbs; size_t tx_urb_size; size_t rx_urb_size; struct sk_buff_head rxq_free; struct sk_buff_head rxq; struct sk_buff_head rxq_done; struct sk_buff_head rxq_overflow; struct sk_buff_head txq_free; struct sk_buff_head txq; struct sk_buff_head txq_pend; struct napi_struct napi; struct delayed_work wq; int msg_enable; struct urb *urb_intr; struct usb_anchor deferred; struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */ struct mutex phy_mutex; /* for phy access */ unsigned int pipe_in, pipe_out, pipe_intr; unsigned int bulk_in_delay; unsigned int burst_cap; unsigned long flags; wait_queue_head_t *wait; unsigned char suspend_count; unsigned int maxpacket; struct timer_list stat_monitor; unsigned long data[5]; int link_on; u8 mdix_ctrl; u32 chipid; u32 chiprev; struct mii_bus *mdiobus; phy_interface_t interface; int fc_autoneg; u8 fc_request_control; int delta; struct statstage stats; struct irq_domain_data domain_data; }; /* define external phy id */ #define PHY_LAN8835 (0x0007C130) #define PHY_KSZ9031RNX (0x00221620) /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param(msg_level, int, 0); MODULE_PARM_DESC(msg_level, "Override default message level"); static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool) { if (skb_queue_empty(buf_pool)) return NULL; return skb_dequeue(buf_pool); } static void lan78xx_release_buf(struct sk_buff_head *buf_pool, struct sk_buff *buf) { buf->data = buf->head; skb_reset_tail_pointer(buf); buf->len = 0; buf->data_len = 0; skb_queue_tail(buf_pool, buf); } static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool) { struct skb_data *entry; struct sk_buff *buf; while (!skb_queue_empty(buf_pool)) { buf = skb_dequeue(buf_pool); if (buf) { entry = (struct skb_data *)buf->cb; usb_free_urb(entry->urb); dev_kfree_skb_any(buf); } } } static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool, size_t n_urbs, size_t urb_size, struct lan78xx_net *dev) { struct skb_data *entry; struct sk_buff *buf; struct urb *urb; int i; skb_queue_head_init(buf_pool); for (i = 0; i < n_urbs; i++) { buf = alloc_skb(urb_size, GFP_ATOMIC); if (!buf) goto error; if (skb_linearize(buf) != 0) { dev_kfree_skb_any(buf); goto error; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { dev_kfree_skb_any(buf); goto error; } entry = (struct skb_data *)buf->cb; entry->urb = urb; entry->dev = dev; entry->length = 0; entry->num_of_packet = 0; skb_queue_tail(buf_pool, buf); } return 0; error: lan78xx_free_buf_pool(buf_pool); return -ENOMEM; } static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev) { return lan78xx_get_buf(&dev->rxq_free); } static void lan78xx_release_rx_buf(struct lan78xx_net *dev, struct sk_buff *rx_buf) { lan78xx_release_buf(&dev->rxq_free, rx_buf); } static void lan78xx_free_rx_resources(struct lan78xx_net *dev) { lan78xx_free_buf_pool(&dev->rxq_free); } static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev) { return lan78xx_alloc_buf_pool(&dev->rxq_free, dev->n_rx_urbs, dev->rx_urb_size, dev); } static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev) { return lan78xx_get_buf(&dev->txq_free); } static void lan78xx_release_tx_buf(struct lan78xx_net *dev, struct sk_buff *tx_buf) { lan78xx_release_buf(&dev->txq_free, tx_buf); } static void lan78xx_free_tx_resources(struct lan78xx_net *dev) { lan78xx_free_buf_pool(&dev->txq_free); } static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev) { return lan78xx_alloc_buf_pool(&dev->txq_free, dev->n_tx_urbs, dev->tx_urb_size, dev); } static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) { u32 *buf; int ret; if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) return -ENODEV; buf = kmalloc(sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, buf, 4, USB_CTRL_GET_TIMEOUT); if (likely(ret >= 0)) { le32_to_cpus(buf); *data = *buf; } else if (net_ratelimit()) { netdev_warn(dev->net, "Failed to read register index 0x%08x. ret = %d", index, ret); } kfree(buf); return ret; } static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) { u32 *buf; int ret; if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) return -ENODEV; buf = kmalloc(sizeof(u32), GFP_KERNEL); if (!buf) return -ENOMEM; *buf = data; cpu_to_le32s(buf); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), USB_VENDOR_REQUEST_WRITE_REGISTER, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, index, buf, 4, USB_CTRL_SET_TIMEOUT); if (unlikely(ret < 0) && net_ratelimit()) { netdev_warn(dev->net, "Failed to write register index 0x%08x. ret = %d", index, ret); } kfree(buf); return ret; } static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask, u32 data) { int ret; u32 buf; ret = lan78xx_read_reg(dev, reg, &buf); if (ret < 0) return ret; buf &= ~mask; buf |= (mask & data); ret = lan78xx_write_reg(dev, reg, buf); if (ret < 0) return ret; return 0; } static int lan78xx_read_stats(struct lan78xx_net *dev, struct lan78xx_statstage *data) { int ret = 0; int i; struct lan78xx_statstage *stats; u32 *src; u32 *dst; stats = kmalloc(sizeof(*stats), GFP_KERNEL); if (!stats) return -ENOMEM; ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), USB_VENDOR_REQUEST_GET_STATS, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, (void *)stats, sizeof(*stats), USB_CTRL_SET_TIMEOUT); if (likely(ret >= 0)) { src = (u32 *)stats; dst = (u32 *)data; for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) { le32_to_cpus(&src[i]); dst[i] = src[i]; } } else { netdev_warn(dev->net, "Failed to read stat ret = %d", ret); } kfree(stats); return ret; } #define check_counter_rollover(struct1, dev_stats, member) \ do { \ if ((struct1)->member < (dev_stats).saved.member) \ (dev_stats).rollover_count.member++; \ } while (0) static void lan78xx_check_stat_rollover(struct lan78xx_net *dev, struct lan78xx_statstage *stats) { check_counter_rollover(stats, dev->stats, rx_fcs_errors); check_counter_rollover(stats, dev->stats, rx_alignment_errors); check_counter_rollover(stats, dev->stats, rx_fragment_errors); check_counter_rollover(stats, dev->stats, rx_jabber_errors); check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors); check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors); check_counter_rollover(stats, dev->stats, rx_dropped_frames); check_counter_rollover(stats, dev->stats, rx_unicast_byte_count); check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count); check_counter_rollover(stats, dev->stats, rx_multicast_byte_count); check_counter_rollover(stats, dev->stats, rx_unicast_frames); check_counter_rollover(stats, dev->stats, rx_broadcast_frames); check_counter_rollover(stats, dev->stats, rx_multicast_frames); check_counter_rollover(stats, dev->stats, rx_pause_frames); check_counter_rollover(stats, dev->stats, rx_64_byte_frames); check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames); check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames); check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames); check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames); check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames); check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames); check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions); check_counter_rollover(stats, dev->stats, eee_rx_lpi_time); check_counter_rollover(stats, dev->stats, tx_fcs_errors); check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors); check_counter_rollover(stats, dev->stats, tx_carrier_errors); check_counter_rollover(stats, dev->stats, tx_bad_byte_count); check_counter_rollover(stats, dev->stats, tx_single_collisions); check_counter_rollover(stats, dev->stats, tx_multiple_collisions); check_counter_rollover(stats, dev->stats, tx_excessive_collision); check_counter_rollover(stats, dev->stats, tx_late_collisions); check_counter_rollover(stats, dev->stats, tx_unicast_byte_count); check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count); check_counter_rollover(stats, dev->stats, tx_multicast_byte_count); check_counter_rollover(stats, dev->stats, tx_unicast_frames); check_counter_rollover(stats, dev->stats, tx_broadcast_frames); check_counter_rollover(stats, dev->stats, tx_multicast_frames); check_counter_rollover(stats, dev->stats, tx_pause_frames); check_counter_rollover(stats, dev->stats, tx_64_byte_frames); check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames); check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames); check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames); check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames); check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames); check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames); check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions); check_counter_rollover(stats, dev->stats, eee_tx_lpi_time); memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage)); } static void lan78xx_update_stats(struct lan78xx_net *dev) { u32 *p, *count, *max; u64 *data; int i; struct lan78xx_statstage lan78xx_stats; if (usb_autopm_get_interface(dev->intf) < 0) return; p = (u32 *)&lan78xx_stats; count = (u32 *)&dev->stats.rollover_count; max = (u32 *)&dev->stats.rollover_max; data = (u64 *)&dev->stats.curr_stat; mutex_lock(&dev->stats.access_lock); if (lan78xx_read_stats(dev, &lan78xx_stats) > 0) lan78xx_check_stat_rollover(dev, &lan78xx_stats); for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++) data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1)); mutex_unlock(&dev->stats.access_lock); usb_autopm_put_interface(dev->intf); } /* Loop until the read is completed with timeout called with phy_mutex held */ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = lan78xx_read_reg(dev, MII_ACC, &val); if (unlikely(ret < 0)) return -EIO; if (!(val & MII_ACC_MII_BUSY_)) return 0; } while (!time_after(jiffies, start_time + HZ)); return -EIO; } static inline u32 mii_access(int id, int index, int read) { u32 ret; ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_; ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_; if (read) ret |= MII_ACC_MII_READ_; else ret |= MII_ACC_MII_WRITE_; ret |= MII_ACC_MII_BUSY_; return ret; } static int lan78xx_wait_eeprom(struct lan78xx_net *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = lan78xx_read_reg(dev, E2P_CMD, &val); if (unlikely(ret < 0)) return -EIO; if (!(val & E2P_CMD_EPC_BUSY_) || (val & E2P_CMD_EPC_TIMEOUT_)) break; usleep_range(40, 100); } while (!time_after(jiffies, start_time + HZ)); if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) { netdev_warn(dev->net, "EEPROM read operation timeout"); return -EIO; } return 0; } static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev) { unsigned long start_time = jiffies; u32 val; int ret; do { ret = lan78xx_read_reg(dev, E2P_CMD, &val); if (unlikely(ret < 0)) return -EIO; if (!(val & E2P_CMD_EPC_BUSY_)) return 0; usleep_range(40, 100); } while (!time_after(jiffies, start_time + HZ)); netdev_warn(dev->net, "EEPROM is busy"); return -EIO; } static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; u32 saved; int i, ret; int retval; /* depends on chip, some EEPROM pins are muxed with LED function. * disable & restore LED function to access EEPROM. */ ret = lan78xx_read_reg(dev, HW_CFG, &val); saved = val; if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); } retval = lan78xx_eeprom_confirm_not_busy(dev); if (retval) return retval; for (i = 0; i < length; i++) { val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); if (unlikely(ret < 0)) { retval = -EIO; goto exit; } retval = lan78xx_wait_eeprom(dev); if (retval < 0) goto exit; ret = lan78xx_read_reg(dev, E2P_DATA, &val); if (unlikely(ret < 0)) { retval = -EIO; goto exit; } data[i] = val & 0xFF; offset++; } retval = 0; exit: if (dev->chipid == ID_REV_CHIP_ID_7800_) ret = lan78xx_write_reg(dev, HW_CFG, saved); return retval; } static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u8 sig; int ret; ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); if ((ret == 0) && (sig == EEPROM_INDICATOR)) ret = lan78xx_read_raw_eeprom(dev, offset, length, data); else ret = -EINVAL; return ret; } static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; u32 saved; int i, ret; int retval; /* depends on chip, some EEPROM pins are muxed with LED function. * disable & restore LED function to access EEPROM. */ ret = lan78xx_read_reg(dev, HW_CFG, &val); saved = val; if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); } retval = lan78xx_eeprom_confirm_not_busy(dev); if (retval) goto exit; /* Issue write/erase enable command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; ret = lan78xx_write_reg(dev, E2P_CMD, val); if (unlikely(ret < 0)) { retval = -EIO; goto exit; } retval = lan78xx_wait_eeprom(dev); if (retval < 0) goto exit; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = lan78xx_write_reg(dev, E2P_DATA, val); if (ret < 0) { retval = -EIO; goto exit; } /* Send "write" command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); if (ret < 0) { retval = -EIO; goto exit; } retval = lan78xx_wait_eeprom(dev); if (retval < 0) goto exit; offset++; } retval = 0; exit: if (dev->chipid == ID_REV_CHIP_ID_7800_) ret = lan78xx_write_reg(dev, HW_CFG, saved); return retval; } static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; u32 buf; unsigned long timeout; lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { usleep_range(1, 10); lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN"); return -EIO; } } while (buf & OTP_PWR_DN_PWRDN_N_); } for (i = 0; i < length; i++) { lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_); lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_STATUS"); return -EIO; } } while (buf & OTP_STATUS_BUSY_); lan78xx_read_reg(dev, OTP_RD_DATA, &buf); data[i] = (u8)(buf & 0xFF); } return 0; } static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { int i; u32 buf; unsigned long timeout; lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (buf & OTP_PWR_DN_PWRDN_N_) { /* clear it and wait to be cleared */ lan78xx_write_reg(dev, OTP_PWR_DN, 0); timeout = jiffies + HZ; do { udelay(1); lan78xx_read_reg(dev, OTP_PWR_DN, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on OTP_PWR_DN completion"); return -EIO; } } while (buf & OTP_PWR_DN_PWRDN_N_); } /* set to BYTE program mode */ lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_); for (i = 0; i < length; i++) { lan78xx_write_reg(dev, OTP_ADDR1, ((offset + i) >> 8) & OTP_ADDR1_15_11); lan78xx_write_reg(dev, OTP_ADDR2, ((offset + i) & OTP_ADDR2_10_3)); lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]); lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_); lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_); timeout = jiffies + HZ; do { udelay(1); lan78xx_read_reg(dev, OTP_STATUS, &buf); if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "Timeout on OTP_STATUS completion"); return -EIO; } } while (buf & OTP_STATUS_BUSY_); } return 0; } static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u8 sig; int ret; ret = lan78xx_read_raw_otp(dev, 0, 1, &sig); if (ret == 0) { if (sig == OTP_INDICATOR_2) offset += 0x100; else if (sig != OTP_INDICATOR_1) ret = -EINVAL; if (!ret) ret = lan78xx_read_raw_otp(dev, offset, length, data); } return ret; } static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev) { int i, ret; for (i = 0; i < 100; i++) { u32 dp_sel; ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); if (unlikely(ret < 0)) return -EIO; if (dp_sel & DP_SEL_DPRDY_) return 0; usleep_range(40, 100); } netdev_warn(dev->net, "%s timed out", __func__); return -EIO; } static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select, u32 addr, u32 length, u32 *buf) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); u32 dp_sel; int i, ret; if (usb_autopm_get_interface(dev->intf) < 0) return 0; mutex_lock(&pdata->dataport_mutex); ret = lan78xx_dataport_wait_not_busy(dev); if (ret < 0) goto done; ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel); dp_sel &= ~DP_SEL_RSEL_MASK_; dp_sel |= ram_select; ret = lan78xx_write_reg(dev, DP_SEL, dp_sel); for (i = 0; i < length; i++) { ret = lan78xx_write_reg(dev, DP_ADDR, addr + i); ret = lan78xx_write_reg(dev, DP_DATA, buf[i]); ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_); ret = lan78xx_dataport_wait_not_busy(dev); if (ret < 0) goto done; } done: mutex_unlock(&pdata->dataport_mutex); usb_autopm_put_interface(dev->intf); return ret; } static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, int index, u8 addr[ETH_ALEN]) { u32 temp; if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { temp = addr[3]; temp = addr[2] | (temp << 8); temp = addr[1] | (temp << 8); temp = addr[0] | (temp << 8); pdata->pfilter_table[index][1] = temp; temp = addr[5]; temp = addr[4] | (temp << 8); temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_; pdata->pfilter_table[index][0] = temp; } } /* returns hash bit number for given MAC address */ static inline u32 lan78xx_hash(char addr[ETH_ALEN]) { return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff; } static void lan78xx_deferred_multicast_write(struct work_struct *param) { struct lan78xx_priv *pdata = container_of(param, struct lan78xx_priv, set_multicast); struct lan78xx_net *dev = pdata->dev; int i; netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n", pdata->rfe_ctl); lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN, DP_SEL_VHF_HASH_LEN, pdata->mchash_table); for (i = 1; i < NUM_OF_MAF; i++) { lan78xx_write_reg(dev, MAF_HI(i), 0); lan78xx_write_reg(dev, MAF_LO(i), pdata->pfilter_table[i][1]); lan78xx_write_reg(dev, MAF_HI(i), pdata->pfilter_table[i][0]); } lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); } static void lan78xx_set_multicast(struct net_device *netdev) { struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); unsigned long flags; int i; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ | RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++) pdata->mchash_table[i] = 0; /* pfilter_table[0] has own HW address */ for (i = 1; i < NUM_OF_MAF; i++) { pdata->pfilter_table[i][0] = 0; pdata->pfilter_table[i][1] = 0; } pdata->rfe_ctl |= RFE_CTL_BCAST_EN_; if (dev->net->flags & IFF_PROMISC) { netif_dbg(dev, drv, dev->net, "promiscuous mode enabled"); pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_; } else { if (dev->net->flags & IFF_ALLMULTI) { netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); pdata->rfe_ctl |= RFE_CTL_MCAST_EN_; } } if (netdev_mc_count(dev->net)) { struct netdev_hw_addr *ha; int i; netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_; i = 1; netdev_for_each_mc_addr(ha, netdev) { /* set first 32 into Perfect Filter */ if (i < 33) { lan78xx_set_addr_filter(pdata, i, ha->addr); } else { u32 bitnum = lan78xx_hash(ha->addr); pdata->mchash_table[bitnum / 32] |= (1 << (bitnum % 32)); pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_; } i++; } } spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_multicast); } static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, u16 lcladv, u16 rmtadv) { u32 flow = 0, fct_flow = 0; u8 cap; if (dev->fc_autoneg) cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); else cap = dev->fc_request_control; if (cap & FLOW_CTRL_TX) flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF); if (cap & FLOW_CTRL_RX) flow |= FLOW_CR_RX_FCEN_; if (dev->udev->speed == USB_SPEED_SUPER) fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS); else if (dev->udev->speed == USB_SPEED_HIGH) fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS); netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s", (cap & FLOW_CTRL_RX ? "enabled" : "disabled"), (cap & FLOW_CTRL_TX ? "enabled" : "disabled")); lan78xx_write_reg(dev, FCT_FLOW, fct_flow); /* threshold value should be set before enabling flow */ lan78xx_write_reg(dev, FLOW, flow); return 0; } static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev); static int lan78xx_mac_reset(struct lan78xx_net *dev) { unsigned long start_time = jiffies; u32 val; int ret; mutex_lock(&dev->phy_mutex); /* Resetting the device while there is activity on the MDIO * bus can result in the MAC interface locking up and not * completing register access transactions. */ ret = lan78xx_phy_wait_not_busy(dev); if (ret < 0) goto done; ret = lan78xx_read_reg(dev, MAC_CR, &val); if (ret < 0) goto done; val |= MAC_CR_RST_; ret = lan78xx_write_reg(dev, MAC_CR, val); if (ret < 0) goto done; /* Wait for the reset to complete before allowing any further * MAC register accesses otherwise the MAC may lock up. */ do { ret = lan78xx_read_reg(dev, MAC_CR, &val); if (ret < 0) goto done; if (!(val & MAC_CR_RST_)) { ret = 0; goto done; } } while (!time_after(jiffies, start_time + HZ)); ret = -ETIMEDOUT; done: mutex_unlock(&dev->phy_mutex); return ret; } static int lan78xx_link_reset(struct lan78xx_net *dev) { struct phy_device *phydev = dev->net->phydev; struct ethtool_link_ksettings ecmd; int ladv, radv, ret, link; u32 buf; /* clear LAN78xx interrupt status */ ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_); if (unlikely(ret < 0)) return ret; mutex_lock(&phydev->lock); phy_read_status(phydev); link = phydev->link; mutex_unlock(&phydev->lock); if (!link && dev->link_on) { dev->link_on = false; /* reset MAC */ ret = lan78xx_mac_reset(dev); if (ret < 0) return ret; del_timer(&dev->stat_monitor); } else if (link && !dev->link_on) { dev->link_on = true; phy_ethtool_ksettings_get(phydev, &ecmd); if (dev->udev->speed == USB_SPEED_SUPER) { if (ecmd.base.speed == 1000) { /* disable U2 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); if (ret < 0) return ret; buf &= ~USB_CFG1_DEV_U2_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); if (ret < 0) return ret; /* enable U1 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); if (ret < 0) return ret; buf |= USB_CFG1_DEV_U1_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); if (ret < 0) return ret; } else { /* enable U1 & U2 */ ret = lan78xx_read_reg(dev, USB_CFG1, &buf); if (ret < 0) return ret; buf |= USB_CFG1_DEV_U2_INIT_EN_; buf |= USB_CFG1_DEV_U1_INIT_EN_; ret = lan78xx_write_reg(dev, USB_CFG1, buf); if (ret < 0) return ret; } } ladv = phy_read(phydev, MII_ADVERTISE); if (ladv < 0) return ladv; radv = phy_read(phydev, MII_LPA); if (radv < 0) return radv; netif_dbg(dev, link, dev->net, "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x", ecmd.base.speed, ecmd.base.duplex, ladv, radv); ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv, radv); if (ret < 0) return ret; if (!timer_pending(&dev->stat_monitor)) { dev->delta = 1; mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } lan78xx_rx_urb_submit_all(dev); local_bh_disable(); napi_schedule(&dev->napi); local_bh_enable(); } return 0; } /* some work can't be done in tasklets, so we use keventd * * NOTE: annoying asymmetry: if it's active, schedule_work() fails, * but tasklet_schedule() doesn't. hope the failure is rare. */ static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work) { set_bit(work, &dev->flags); if (!schedule_delayed_work(&dev->wq, 0)) netdev_err(dev->net, "kevent %d may have been dropped\n", work); } static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb) { u32 intdata; if (urb->actual_length != 4) { netdev_warn(dev->net, "unexpected urb length %d", urb->actual_length); return; } intdata = get_unaligned_le32(urb->transfer_buffer); if (intdata & INT_ENP_PHY_INT) { netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata); lan78xx_defer_kevent(dev, EVENT_LINK_RESET); if (dev->domain_data.phyirq > 0) generic_handle_irq_safe(dev->domain_data.phyirq); } else { netdev_warn(dev->net, "unexpected interrupt: 0x%08x\n", intdata); } } static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev) { return MAX_EEPROM_SIZE; } static int lan78xx_ethtool_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); int ret; ret = usb_autopm_get_interface(dev->intf); if (ret) return ret; ee->magic = LAN78XX_EEPROM_MAGIC; ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *ee, u8 *data) { struct lan78xx_net *dev = netdev_priv(netdev); int ret; ret = usb_autopm_get_interface(dev->intf); if (ret) return ret; /* Invalid EEPROM_INDICATOR at offset zero will result in a failure * to load data from EEPROM */ if (ee->magic == LAN78XX_EEPROM_MAGIC) ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data); else if ((ee->magic == LAN78XX_OTP_MAGIC) && (ee->offset == 0) && (ee->len == 512) && (data[0] == OTP_INDICATOR_1)) ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); usb_autopm_put_interface(dev->intf); return ret; } static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings)); } static int lan78xx_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return ARRAY_SIZE(lan78xx_gstrings); else return -EOPNOTSUPP; } static void lan78xx_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct lan78xx_net *dev = netdev_priv(netdev); lan78xx_update_stats(dev); mutex_lock(&dev->stats.access_lock); memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat)); mutex_unlock(&dev->stats.access_lock); } static void lan78xx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct lan78xx_net *dev = netdev_priv(netdev); int ret; u32 buf; struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); if (usb_autopm_get_interface(dev->intf) < 0) return; ret = lan78xx_read_reg(dev, USB_CFG0, &buf); if (unlikely(ret < 0)) { wol->supported = 0; wol->wolopts = 0; } else { if (buf & USB_CFG_RMT_WKP_) { wol->supported = WAKE_ALL; wol->wolopts = pdata->wol; } else { wol->supported = 0; wol->wolopts = 0; } } usb_autopm_put_interface(dev->intf); } static int lan78xx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); int ret; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; if (wol->wolopts & ~WAKE_ALL) return -EINVAL; pdata->wol = wol->wolopts; device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); phy_ethtool_set_wol(netdev->phydev, wol); usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; int ret; u32 buf; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; ret = phy_ethtool_get_eee(phydev, edata); if (ret < 0) goto exit; ret = lan78xx_read_reg(dev, MAC_CR, &buf); if (buf & MAC_CR_EEE_EN_) { edata->eee_enabled = true; edata->tx_lpi_enabled = true; /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */ ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf); edata->tx_lpi_timer = buf; } else { edata->eee_enabled = false; edata->eee_active = false; edata->tx_lpi_enabled = false; edata->tx_lpi_timer = 0; } ret = 0; exit: usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata) { struct lan78xx_net *dev = netdev_priv(net); int ret; u32 buf; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; if (edata->eee_enabled) { ret = lan78xx_read_reg(dev, MAC_CR, &buf); buf |= MAC_CR_EEE_EN_; ret = lan78xx_write_reg(dev, MAC_CR, buf); phy_ethtool_set_eee(net->phydev, edata); buf = (u32)edata->tx_lpi_timer; ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf); } else { ret = lan78xx_read_reg(dev, MAC_CR, &buf); buf &= ~MAC_CR_EEE_EN_; ret = lan78xx_write_reg(dev, MAC_CR, buf); } usb_autopm_put_interface(dev->intf); return 0; } static u32 lan78xx_get_link(struct net_device *net) { u32 link; mutex_lock(&net->phydev->lock); phy_read_status(net->phydev); link = net->phydev->link; mutex_unlock(&net->phydev->lock); return link; } static void lan78xx_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { struct lan78xx_net *dev = netdev_priv(net); strscpy(info->driver, DRIVER_NAME, sizeof(info->driver)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } static u32 lan78xx_get_msglevel(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); return dev->msg_enable; } static void lan78xx_set_msglevel(struct net_device *net, u32 level) { struct lan78xx_net *dev = netdev_priv(net); dev->msg_enable = level; } static int lan78xx_get_link_ksettings(struct net_device *net, struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; int ret; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; phy_ethtool_ksettings_get(phydev, cmd); usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_set_link_ksettings(struct net_device *net, const struct ethtool_link_ksettings *cmd) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; int ret = 0; int temp; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; /* change speed & duplex */ ret = phy_ethtool_ksettings_set(phydev, cmd); if (!cmd->base.autoneg) { /* force link down */ temp = phy_read(phydev, MII_BMCR); phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK); mdelay(1); phy_write(phydev, MII_BMCR, temp); } usb_autopm_put_interface(dev->intf); return ret; } static void lan78xx_get_pause(struct net_device *net, struct ethtool_pauseparam *pause) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; struct ethtool_link_ksettings ecmd; phy_ethtool_ksettings_get(phydev, &ecmd); pause->autoneg = dev->fc_autoneg; if (dev->fc_request_control & FLOW_CTRL_TX) pause->tx_pause = 1; if (dev->fc_request_control & FLOW_CTRL_RX) pause->rx_pause = 1; } static int lan78xx_set_pause(struct net_device *net, struct ethtool_pauseparam *pause) { struct lan78xx_net *dev = netdev_priv(net); struct phy_device *phydev = net->phydev; struct ethtool_link_ksettings ecmd; int ret; phy_ethtool_ksettings_get(phydev, &ecmd); if (pause->autoneg && !ecmd.base.autoneg) { ret = -EINVAL; goto exit; } dev->fc_request_control = 0; if (pause->rx_pause) dev->fc_request_control |= FLOW_CTRL_RX; if (pause->tx_pause) dev->fc_request_control |= FLOW_CTRL_TX; if (ecmd.base.autoneg) { __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; u32 mii_adv; linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, ecmd.link_modes.advertising); linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, ecmd.link_modes.advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); mii_adv_to_linkmode_adv_t(fc, mii_adv); linkmode_or(ecmd.link_modes.advertising, fc, ecmd.link_modes.advertising); phy_ethtool_ksettings_set(phydev, &ecmd); } dev->fc_autoneg = pause->autoneg; ret = 0; exit: return ret; } static int lan78xx_get_regs_len(struct net_device *netdev) { if (!netdev->phydev) return (sizeof(lan78xx_regs)); else return (sizeof(lan78xx_regs) + PHY_REG_SIZE); } static void lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) { u32 *data = buf; int i, j; struct lan78xx_net *dev = netdev_priv(netdev); /* Read Device/MAC registers */ for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]); if (!netdev->phydev) return; /* Read PHY registers */ for (j = 0; j < 32; i++, j++) data[i] = phy_read(netdev->phydev, j); } static const struct ethtool_ops lan78xx_ethtool_ops = { .get_link = lan78xx_get_link, .nway_reset = phy_ethtool_nway_reset, .get_drvinfo = lan78xx_get_drvinfo, .get_msglevel = lan78xx_get_msglevel, .set_msglevel = lan78xx_set_msglevel, .get_eeprom_len = lan78xx_ethtool_get_eeprom_len, .get_eeprom = lan78xx_ethtool_get_eeprom, .set_eeprom = lan78xx_ethtool_set_eeprom, .get_ethtool_stats = lan78xx_get_stats, .get_sset_count = lan78xx_get_sset_count, .get_strings = lan78xx_get_strings, .get_wol = lan78xx_get_wol, .set_wol = lan78xx_set_wol, .get_ts_info = ethtool_op_get_ts_info, .get_eee = lan78xx_get_eee, .set_eee = lan78xx_set_eee, .get_pauseparam = lan78xx_get_pause, .set_pauseparam = lan78xx_set_pause, .get_link_ksettings = lan78xx_get_link_ksettings, .set_link_ksettings = lan78xx_set_link_ksettings, .get_regs_len = lan78xx_get_regs_len, .get_regs = lan78xx_get_regs, }; static void lan78xx_init_mac_address(struct lan78xx_net *dev) { u32 addr_lo, addr_hi; u8 addr[6]; lan78xx_read_reg(dev, RX_ADDRL, &addr_lo); lan78xx_read_reg(dev, RX_ADDRH, &addr_hi); addr[0] = addr_lo & 0xFF; addr[1] = (addr_lo >> 8) & 0xFF; addr[2] = (addr_lo >> 16) & 0xFF; addr[3] = (addr_lo >> 24) & 0xFF; addr[4] = addr_hi & 0xFF; addr[5] = (addr_hi >> 8) & 0xFF; if (!is_valid_ether_addr(addr)) { if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) { /* valid address present in Device Tree */ netif_dbg(dev, ifup, dev->net, "MAC address read from Device Tree"); } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) || (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0)) && is_valid_ether_addr(addr)) { /* eeprom values are valid so use them */ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM"); } else { /* generate random MAC */ eth_random_addr(addr); netif_dbg(dev, ifup, dev->net, "MAC address set to random addr"); } addr_lo = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24); addr_hi = addr[4] | (addr[5] << 8); lan78xx_write_reg(dev, RX_ADDRL, addr_lo); lan78xx_write_reg(dev, RX_ADDRH, addr_hi); } lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); eth_hw_addr_set(dev->net, addr); } /* MDIO read and write wrappers for phylib */ static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx) { struct lan78xx_net *dev = bus->priv; u32 val, addr; int ret; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = lan78xx_phy_wait_not_busy(dev); if (ret < 0) goto done; /* set the address, index & direction (read from PHY) */ addr = mii_access(phy_id, idx, MII_READ); ret = lan78xx_write_reg(dev, MII_ACC, addr); ret = lan78xx_phy_wait_not_busy(dev); if (ret < 0) goto done; ret = lan78xx_read_reg(dev, MII_DATA, &val); ret = (int)(val & 0xFFFF); done: mutex_unlock(&dev->phy_mutex); usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx, u16 regval) { struct lan78xx_net *dev = bus->priv; u32 val, addr; int ret; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; mutex_lock(&dev->phy_mutex); /* confirm MII not busy */ ret = lan78xx_phy_wait_not_busy(dev); if (ret < 0) goto done; val = (u32)regval; ret = lan78xx_write_reg(dev, MII_DATA, val); /* set the address, index & direction (write to PHY) */ addr = mii_access(phy_id, idx, MII_WRITE); ret = lan78xx_write_reg(dev, MII_ACC, addr); ret = lan78xx_phy_wait_not_busy(dev); if (ret < 0) goto done; done: mutex_unlock(&dev->phy_mutex); usb_autopm_put_interface(dev->intf); return 0; } static int lan78xx_mdio_init(struct lan78xx_net *dev) { struct device_node *node; int ret; dev->mdiobus = mdiobus_alloc(); if (!dev->mdiobus) { netdev_err(dev->net, "can't allocate MDIO bus\n"); return -ENOMEM; } dev->mdiobus->priv = (void *)dev; dev->mdiobus->read = lan78xx_mdiobus_read; dev->mdiobus->write = lan78xx_mdiobus_write; dev->mdiobus->name = "lan78xx-mdiobus"; dev->mdiobus->parent = &dev->udev->dev; snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); switch (dev->chipid) { case ID_REV_CHIP_ID_7800_: case ID_REV_CHIP_ID_7850_: /* set to internal PHY id */ dev->mdiobus->phy_mask = ~(1 << 1); break; case ID_REV_CHIP_ID_7801_: /* scan thru PHYAD[2..0] */ dev->mdiobus->phy_mask = ~(0xFF); break; } node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); ret = of_mdiobus_register(dev->mdiobus, node); of_node_put(node); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; } netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id); return 0; exit1: mdiobus_free(dev->mdiobus); return ret; } static void lan78xx_remove_mdio(struct lan78xx_net *dev) { mdiobus_unregister(dev->mdiobus); mdiobus_free(dev->mdiobus); } static void lan78xx_link_status_change(struct net_device *net) { struct phy_device *phydev = net->phydev; phy_print_status(phydev); } static int irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { struct irq_domain_data *data = d->host_data; irq_set_chip_data(irq, data); irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler); irq_set_noprobe(irq); return 0; } static void irq_unmap(struct irq_domain *d, unsigned int irq) { irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } static const struct irq_domain_ops chip_domain_ops = { .map = irq_map, .unmap = irq_unmap, }; static void lan78xx_irq_mask(struct irq_data *irqd) { struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); data->irqenable &= ~BIT(irqd_to_hwirq(irqd)); } static void lan78xx_irq_unmask(struct irq_data *irqd) { struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); data->irqenable |= BIT(irqd_to_hwirq(irqd)); } static void lan78xx_irq_bus_lock(struct irq_data *irqd) { struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); mutex_lock(&data->irq_lock); } static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd) { struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd); struct lan78xx_net *dev = container_of(data, struct lan78xx_net, domain_data); u32 buf; /* call register access here because irq_bus_lock & irq_bus_sync_unlock * are only two callbacks executed in non-atomic contex. */ lan78xx_read_reg(dev, INT_EP_CTL, &buf); if (buf != data->irqenable) lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable); mutex_unlock(&data->irq_lock); } static struct irq_chip lan78xx_irqchip = { .name = "lan78xx-irqs", .irq_mask = lan78xx_irq_mask, .irq_unmask = lan78xx_irq_unmask, .irq_bus_lock = lan78xx_irq_bus_lock, .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock, }; static int lan78xx_setup_irq_domain(struct lan78xx_net *dev) { struct device_node *of_node; struct irq_domain *irqdomain; unsigned int irqmap = 0; u32 buf; int ret = 0; of_node = dev->udev->dev.parent->of_node; mutex_init(&dev->domain_data.irq_lock); lan78xx_read_reg(dev, INT_EP_CTL, &buf); dev->domain_data.irqenable = buf; dev->domain_data.irqchip = &lan78xx_irqchip; dev->domain_data.irq_handler = handle_simple_irq; irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0, &chip_domain_ops, &dev->domain_data); if (irqdomain) { /* create mapping for PHY interrupt */ irqmap = irq_create_mapping(irqdomain, INT_EP_PHY); if (!irqmap) { irq_domain_remove(irqdomain); irqdomain = NULL; ret = -EINVAL; } } else { ret = -EINVAL; } dev->domain_data.irqdomain = irqdomain; dev->domain_data.phyirq = irqmap; return ret; } static void lan78xx_remove_irq_domain(struct lan78xx_net *dev) { if (dev->domain_data.phyirq > 0) { irq_dispose_mapping(dev->domain_data.phyirq); if (dev->domain_data.irqdomain) irq_domain_remove(dev->domain_data.irqdomain); } dev->domain_data.phyirq = 0; dev->domain_data.irqdomain = NULL; } static int lan8835_fixup(struct phy_device *phydev) { int buf; struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */ buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010); buf &= ~0x1800; buf |= 0x0800; phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf); /* RGMII MAC TXC Delay Enable */ lan78xx_write_reg(dev, MAC_RGMII_ID, MAC_RGMII_ID_TXC_DELAY_EN_); /* RGMII TX DLL Tune Adjust */ lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; return 1; } static int ksz9031rnx_fixup(struct phy_device *phydev) { struct lan78xx_net *dev = netdev_priv(phydev->attached_dev); /* Micrel9301RNX PHY configuration */ /* RGMII Control Signal Pad Skew */ phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077); /* RGMII RX Data Pad Skew */ phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777); /* RGMII RX Clock Pad Skew */ phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF); dev->interface = PHY_INTERFACE_MODE_RGMII_RXID; return 1; } static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) { u32 buf; int ret; struct fixed_phy_status fphy_status = { .link = 1, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; struct phy_device *phydev; phydev = phy_find_first(dev->mdiobus); if (!phydev) { netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n"); phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (IS_ERR(phydev)) { netdev_err(dev->net, "No PHY/fixed_PHY found\n"); return NULL; } netdev_dbg(dev->net, "Registered FIXED PHY\n"); dev->interface = PHY_INTERFACE_MODE_RGMII; ret = lan78xx_write_reg(dev, MAC_RGMII_ID, MAC_RGMII_ID_TXC_DELAY_EN_); ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00); ret = lan78xx_read_reg(dev, HW_CFG, &buf); buf |= HW_CFG_CLK125_EN_; buf |= HW_CFG_REFCLK25_EN_; ret = lan78xx_write_reg(dev, HW_CFG, buf); } else { if (!phydev->drv) { netdev_err(dev->net, "no PHY driver found\n"); return NULL; } dev->interface = PHY_INTERFACE_MODE_RGMII; /* external PHY fixup for KSZ9031RNX */ ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0, ksz9031rnx_fixup); if (ret < 0) { netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n"); return NULL; } /* external PHY fixup for LAN8835 */ ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0, lan8835_fixup); if (ret < 0) { netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n"); return NULL; } /* add more external PHY fixup here if needed */ phydev->is_internal = false; } return phydev; } static int lan78xx_phy_init(struct lan78xx_net *dev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; int ret; u32 mii_adv; struct phy_device *phydev; switch (dev->chipid) { case ID_REV_CHIP_ID_7801_: phydev = lan7801_phy_init(dev); if (!phydev) { netdev_err(dev->net, "lan7801: PHY Init Failed"); return -EIO; } break; case ID_REV_CHIP_ID_7800_: case ID_REV_CHIP_ID_7850_: phydev = phy_find_first(dev->mdiobus); if (!phydev) { netdev_err(dev->net, "no PHY found\n"); return -EIO; } phydev->is_internal = true; dev->interface = PHY_INTERFACE_MODE_GMII; break; default: netdev_err(dev->net, "Unknown CHIP ID found\n"); return -EIO; } /* if phyirq is not set, use polling mode in phylib */ if (dev->domain_data.phyirq > 0) phydev->irq = dev->domain_data.phyirq; else phydev->irq = PHY_POLL; netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq); /* set to AUTOMDIX */ phydev->mdix = ETH_TP_MDI_AUTO; ret = phy_connect_direct(dev->net, phydev, lan78xx_link_status_change, dev->interface); if (ret) { netdev_err(dev->net, "can't attach PHY to %s\n", dev->mdiobus->id); if (dev->chipid == ID_REV_CHIP_ID_7801_) { if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); } else { phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); } } return -EIO; } /* MAC doesn't support 1000T Half */ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising); linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); mii_adv_to_linkmode_adv_t(fc, mii_adv); linkmode_or(phydev->advertising, fc, phydev->advertising); if (phydev->mdio.dev.of_node) { u32 reg; int len; len = of_property_count_elems_of_size(phydev->mdio.dev.of_node, "microchip,led-modes", sizeof(u32)); if (len >= 0) { /* Ensure the appropriate LEDs are enabled */ lan78xx_read_reg(dev, HW_CFG, &reg); reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ | HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_); reg |= (len > 0) * HW_CFG_LED0_EN_ | (len > 1) * HW_CFG_LED1_EN_ | (len > 2) * HW_CFG_LED2_EN_ | (len > 3) * HW_CFG_LED3_EN_; lan78xx_write_reg(dev, HW_CFG, reg); } } genphy_config_aneg(phydev); dev->fc_autoneg = phydev->autoneg; return 0; } static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size) { u32 buf; bool rxenabled; lan78xx_read_reg(dev, MAC_RX, &buf); rxenabled = ((buf & MAC_RX_RXEN_) != 0); if (rxenabled) { buf &= ~MAC_RX_RXEN_; lan78xx_write_reg(dev, MAC_RX, buf); } /* add 4 to size for FCS */ buf &= ~MAC_RX_MAX_SIZE_MASK_; buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_); lan78xx_write_reg(dev, MAC_RX, buf); if (rxenabled) { buf |= MAC_RX_RXEN_; lan78xx_write_reg(dev, MAC_RX, buf); } return 0; } static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q) { struct sk_buff *skb; unsigned long flags; int count = 0; spin_lock_irqsave(&q->lock, flags); while (!skb_queue_empty(q)) { struct skb_data *entry; struct urb *urb; int ret; skb_queue_walk(q, skb) { entry = (struct skb_data *)skb->cb; if (entry->state != unlink_start) goto found; } break; found: entry->state = unlink_start; urb = entry->urb; /* Get reference count of the URB to avoid it to be * freed during usb_unlink_urb, which may trigger * use-after-free problem inside usb_unlink_urb since * usb_unlink_urb is always racing with .complete * handler(include defer_bh). */ usb_get_urb(urb); spin_unlock_irqrestore(&q->lock, flags); /* during some PM-driven resume scenarios, * these (async) unlinks complete immediately */ ret = usb_unlink_urb(urb); if (ret != -EINPROGRESS && ret != 0) netdev_dbg(dev->net, "unlink urb err, %d\n", ret); else count++; usb_put_urb(urb); spin_lock_irqsave(&q->lock, flags); } spin_unlock_irqrestore(&q->lock, flags); return count; } static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu) { struct lan78xx_net *dev = netdev_priv(netdev); int max_frame_len = RX_MAX_FRAME_LEN(new_mtu); int ret; /* no second zero-length packet read wanted after mtu-sized packets */ if ((max_frame_len % dev->maxpacket) == 0) return -EDOM; ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len); if (!ret) netdev->mtu = new_mtu; usb_autopm_put_interface(dev->intf); return ret; } static int lan78xx_set_mac_addr(struct net_device *netdev, void *p) { struct lan78xx_net *dev = netdev_priv(netdev); struct sockaddr *addr = p; u32 addr_lo, addr_hi; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(netdev, addr->sa_data); addr_lo = netdev->dev_addr[0] | netdev->dev_addr[1] << 8 | netdev->dev_addr[2] << 16 | netdev->dev_addr[3] << 24; addr_hi = netdev->dev_addr[4] | netdev->dev_addr[5] << 8; lan78xx_write_reg(dev, RX_ADDRL, addr_lo); lan78xx_write_reg(dev, RX_ADDRH, addr_hi); /* Added to support MAC address changes */ lan78xx_write_reg(dev, MAF_LO(0), addr_lo); lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_); return 0; } /* Enable or disable Rx checksum offload engine */ static int lan78xx_set_features(struct net_device *netdev, netdev_features_t features) { struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); unsigned long flags; spin_lock_irqsave(&pdata->rfe_ctl_lock, flags); if (features & NETIF_F_RXCSUM) { pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_; pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_; } else { pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_); pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_); } if (features & NETIF_F_HW_VLAN_CTAG_RX) pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_; else pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_; if (features & NETIF_F_HW_VLAN_CTAG_FILTER) pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; else pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags); lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); return 0; } static void lan78xx_deferred_vlan_write(struct work_struct *param) { struct lan78xx_priv *pdata = container_of(param, struct lan78xx_priv, set_vlan); struct lan78xx_net *dev = pdata->dev; lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0, DP_SEL_VHF_VLAN_LEN, pdata->vlan_table); } static int lan78xx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); u16 vid_bit_index; u16 vid_dword_index; vid_dword_index = (vid >> 5) & 0x7F; vid_bit_index = vid & 0x1F; pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_vlan); return 0; } static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct lan78xx_net *dev = netdev_priv(netdev); struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); u16 vid_bit_index; u16 vid_dword_index; vid_dword_index = (vid >> 5) & 0x7F; vid_bit_index = vid & 0x1F; pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index); /* defer register writes to a sleepable context */ schedule_work(&pdata->set_vlan); return 0; } static void lan78xx_init_ltm(struct lan78xx_net *dev) { int ret; u32 buf; u32 regs[6] = { 0 }; ret = lan78xx_read_reg(dev, USB_CFG1, &buf); if (buf & USB_CFG1_LTM_ENABLE_) { u8 temp[2]; /* Get values from EEPROM first */ if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) { if (temp[0] == 24) { ret = lan78xx_read_raw_eeprom(dev, temp[1] * 2, 24, (u8 *)regs); if (ret < 0) return; } } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) { if (temp[0] == 24) { ret = lan78xx_read_raw_otp(dev, temp[1] * 2, 24, (u8 *)regs); if (ret < 0) return; } } } lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]); lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]); lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]); lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]); lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]); lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]); } static int lan78xx_urb_config_init(struct lan78xx_net *dev) { int result = 0; switch (dev->udev->speed) { case USB_SPEED_SUPER: dev->rx_urb_size = RX_SS_URB_SIZE; dev->tx_urb_size = TX_SS_URB_SIZE; dev->n_rx_urbs = RX_SS_URB_NUM; dev->n_tx_urbs = TX_SS_URB_NUM; dev->bulk_in_delay = SS_BULK_IN_DELAY; dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE; break; case USB_SPEED_HIGH: dev->rx_urb_size = RX_HS_URB_SIZE; dev->tx_urb_size = TX_HS_URB_SIZE; dev->n_rx_urbs = RX_HS_URB_NUM; dev->n_tx_urbs = TX_HS_URB_NUM; dev->bulk_in_delay = HS_BULK_IN_DELAY; dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE; break; case USB_SPEED_FULL: dev->rx_urb_size = RX_FS_URB_SIZE; dev->tx_urb_size = TX_FS_URB_SIZE; dev->n_rx_urbs = RX_FS_URB_NUM; dev->n_tx_urbs = TX_FS_URB_NUM; dev->bulk_in_delay = FS_BULK_IN_DELAY; dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE; break; default: netdev_warn(dev->net, "USB bus speed not supported\n"); result = -EIO; break; } return result; } static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable) { return lan78xx_update_reg(dev, reg, hw_enable, hw_enable); } static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled, u32 hw_disabled) { unsigned long timeout; bool stopped = true; int ret; u32 buf; /* Stop the h/w block (if not already stopped) */ ret = lan78xx_read_reg(dev, reg, &buf); if (ret < 0) return ret; if (buf & hw_enabled) { buf &= ~hw_enabled; ret = lan78xx_write_reg(dev, reg, buf); if (ret < 0) return ret; stopped = false; timeout = jiffies + HW_DISABLE_TIMEOUT; do { ret = lan78xx_read_reg(dev, reg, &buf); if (ret < 0) return ret; if (buf & hw_disabled) stopped = true; else msleep(HW_DISABLE_DELAY_MS); } while (!stopped && !time_after(jiffies, timeout)); } ret = stopped ? 0 : -ETIME; return ret; } static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush) { return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush); } static int lan78xx_start_tx_path(struct lan78xx_net *dev) { int ret; netif_dbg(dev, drv, dev->net, "start tx path"); /* Start the MAC transmitter */ ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_); if (ret < 0) return ret; /* Start the Tx FIFO */ ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_); if (ret < 0) return ret; return 0; } static int lan78xx_stop_tx_path(struct lan78xx_net *dev) { int ret; netif_dbg(dev, drv, dev->net, "stop tx path"); /* Stop the Tx FIFO */ ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_); if (ret < 0) return ret; /* Stop the MAC transmitter */ ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_); if (ret < 0) return ret; return 0; } /* The caller must ensure the Tx path is stopped before calling * lan78xx_flush_tx_fifo(). */ static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev) { return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_); } static int lan78xx_start_rx_path(struct lan78xx_net *dev) { int ret; netif_dbg(dev, drv, dev->net, "start rx path"); /* Start the Rx FIFO */ ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_); if (ret < 0) return ret; /* Start the MAC receiver*/ ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_); if (ret < 0) return ret; return 0; } static int lan78xx_stop_rx_path(struct lan78xx_net *dev) { int ret; netif_dbg(dev, drv, dev->net, "stop rx path"); /* Stop the MAC receiver */ ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_); if (ret < 0) return ret; /* Stop the Rx FIFO */ ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_); if (ret < 0) return ret; return 0; } /* The caller must ensure the Rx path is stopped before calling * lan78xx_flush_rx_fifo(). */ static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev) { return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_); } static int lan78xx_reset(struct lan78xx_net *dev) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); unsigned long timeout; int ret; u32 buf; u8 sig; ret = lan78xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) return ret; buf |= HW_CFG_LRST_; ret = lan78xx_write_reg(dev, HW_CFG, buf); if (ret < 0) return ret; timeout = jiffies + HZ; do { mdelay(1); ret = lan78xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) return ret; if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout on completion of LiteReset"); ret = -ETIMEDOUT; return ret; } } while (buf & HW_CFG_LRST_); lan78xx_init_mac_address(dev); /* save DEVID for later usage */ ret = lan78xx_read_reg(dev, ID_REV, &buf); if (ret < 0) return ret; dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; /* Respond to the IN token with a NAK */ ret = lan78xx_read_reg(dev, USB_CFG0, &buf); if (ret < 0) return ret; buf |= USB_CFG_BIR_; ret = lan78xx_write_reg(dev, USB_CFG0, buf); if (ret < 0) return ret; /* Init LTM */ lan78xx_init_ltm(dev); ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay); if (ret < 0) return ret; ret = lan78xx_read_reg(dev, HW_CFG, &buf); if (ret < 0) return ret; buf |= HW_CFG_MEF_; ret = lan78xx_write_reg(dev, HW_CFG, buf); if (ret < 0) return ret; ret = lan78xx_read_reg(dev, USB_CFG0, &buf); if (ret < 0) return ret; buf |= USB_CFG_BCE_; ret = lan78xx_write_reg(dev, USB_CFG0, buf); if (ret < 0) return ret; /* set FIFO sizes */ buf = (MAX_RX_FIFO_SIZE - 512) / 512; ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf); if (ret < 0) return ret; buf = (MAX_TX_FIFO_SIZE - 512) / 512; ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, FLOW, 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, FCT_FLOW, 0); if (ret < 0) return ret; /* Don't need rfe_ctl_lock during initialisation */ ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl); if (ret < 0) return ret; pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_; ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl); if (ret < 0) return ret; /* Enable or disable checksum offload engines */ ret = lan78xx_set_features(dev->net, dev->net->features); if (ret < 0) return ret; lan78xx_set_multicast(dev->net); /* reset PHY */ ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) return ret; buf |= PMT_CTL_PHY_RST_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) return ret; timeout = jiffies + HZ; do { mdelay(1); ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) return ret; if (time_after(jiffies, timeout)) { netdev_warn(dev->net, "timeout waiting for PHY Reset"); ret = -ETIMEDOUT; return ret; } } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_)); ret = lan78xx_read_reg(dev, MAC_CR, &buf); if (ret < 0) return ret; /* LAN7801 only has RGMII mode */ if (dev->chipid == ID_REV_CHIP_ID_7801_) buf &= ~MAC_CR_GMII_EN_; if (dev->chipid == ID_REV_CHIP_ID_7800_ || dev->chipid == ID_REV_CHIP_ID_7850_) { ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig); if (!ret && sig != EEPROM_INDICATOR) { /* Implies there is no external eeprom. Set mac speed */ netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n"); buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; } } ret = lan78xx_write_reg(dev, MAC_CR, buf); if (ret < 0) return ret; ret = lan78xx_set_rx_max_frame_length(dev, RX_MAX_FRAME_LEN(dev->net->mtu)); return ret; } static void lan78xx_init_stats(struct lan78xx_net *dev) { u32 *p; int i; /* initialize for stats update * some counters are 20bits and some are 32bits */ p = (u32 *)&dev->stats.rollover_max; for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++) p[i] = 0xFFFFF; dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF; dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF; dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF; dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF; dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF; set_bit(EVENT_STAT_UPDATE, &dev->flags); } static int lan78xx_open(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); int ret; netif_dbg(dev, ifup, dev->net, "open device"); ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; mutex_lock(&dev->dev_mutex); phy_start(net->phydev); netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); /* for Link Check */ if (dev->urb_intr) { ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); if (ret < 0) { netif_err(dev, ifup, dev->net, "intr submit %d\n", ret); goto done; } } ret = lan78xx_flush_rx_fifo(dev); if (ret < 0) goto done; ret = lan78xx_flush_tx_fifo(dev); if (ret < 0) goto done; ret = lan78xx_start_tx_path(dev); if (ret < 0) goto done; ret = lan78xx_start_rx_path(dev); if (ret < 0) goto done; lan78xx_init_stats(dev); set_bit(EVENT_DEV_OPEN, &dev->flags); netif_start_queue(net); dev->link_on = false; napi_enable(&dev->napi); lan78xx_defer_kevent(dev, EVENT_LINK_RESET); done: mutex_unlock(&dev->dev_mutex); if (ret < 0) usb_autopm_put_interface(dev->intf); return ret; } static void lan78xx_terminate_urbs(struct lan78xx_net *dev) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); DECLARE_WAITQUEUE(wait, current); int temp; /* ensure there are no more active urbs */ add_wait_queue(&unlink_wakeup, &wait); set_current_state(TASK_UNINTERRUPTIBLE); dev->wait = &unlink_wakeup; temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq); /* maybe wait for deletions to finish. */ while (!skb_queue_empty(&dev->rxq) || !skb_queue_empty(&dev->txq)) { schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); set_current_state(TASK_UNINTERRUPTIBLE); netif_dbg(dev, ifdown, dev->net, "waited for %d urb completions", temp); } set_current_state(TASK_RUNNING); dev->wait = NULL; remove_wait_queue(&unlink_wakeup, &wait); /* empty Rx done, Rx overflow and Tx pend queues */ while (!skb_queue_empty(&dev->rxq_done)) { struct sk_buff *skb = skb_dequeue(&dev->rxq_done); lan78xx_release_rx_buf(dev, skb); } skb_queue_purge(&dev->rxq_overflow); skb_queue_purge(&dev->txq_pend); } static int lan78xx_stop(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); netif_dbg(dev, ifup, dev->net, "stop device"); mutex_lock(&dev->dev_mutex); if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue(net); napi_disable(&dev->napi); lan78xx_terminate_urbs(dev); netif_info(dev, ifdown, dev->net, "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n", net->stats.rx_packets, net->stats.tx_packets, net->stats.rx_errors, net->stats.tx_errors); /* ignore errors that occur stopping the Tx and Rx data paths */ lan78xx_stop_tx_path(dev); lan78xx_stop_rx_path(dev); if (net->phydev) phy_stop(net->phydev); usb_kill_urb(dev->urb_intr); /* deferred work (task, timer, softirq) must also stop. * can't flush_scheduled_work() until we drop rtnl (later), * else workers could deadlock; so make workers a NOP. */ clear_bit(EVENT_TX_HALT, &dev->flags); clear_bit(EVENT_RX_HALT, &dev->flags); clear_bit(EVENT_LINK_RESET, &dev->flags); clear_bit(EVENT_STAT_UPDATE, &dev->flags); cancel_delayed_work_sync(&dev->wq); usb_autopm_put_interface(dev->intf); mutex_unlock(&dev->dev_mutex); return 0; } static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb, struct sk_buff_head *list, enum skb_state state) { unsigned long flags; enum skb_state old_state; struct skb_data *entry = (struct skb_data *)skb->cb; spin_lock_irqsave(&list->lock, flags); old_state = entry->state; entry->state = state; __skb_unlink(skb, list); spin_unlock(&list->lock); spin_lock(&dev->rxq_done.lock); __skb_queue_tail(&dev->rxq_done, skb); if (skb_queue_len(&dev->rxq_done) == 1) napi_schedule(&dev->napi); spin_unlock_irqrestore(&dev->rxq_done.lock, flags); return old_state; } static void tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *)urb->context; struct skb_data *entry = (struct skb_data *)skb->cb; struct lan78xx_net *dev = entry->dev; if (urb->status == 0) { dev->net->stats.tx_packets += entry->num_of_packet; dev->net->stats.tx_bytes += entry->length; } else { dev->net->stats.tx_errors += entry->num_of_packet; switch (urb->status) { case -EPIPE: lan78xx_defer_kevent(dev, EVENT_TX_HALT); break; /* software-driven interface shutdown */ case -ECONNRESET: case -ESHUTDOWN: netif_dbg(dev, tx_err, dev->net, "tx err interface gone %d\n", entry->urb->status); break; case -EPROTO: case -ETIME: case -EILSEQ: netif_stop_queue(dev->net); netif_dbg(dev, tx_err, dev->net, "tx err queue stopped %d\n", entry->urb->status); break; default: netif_dbg(dev, tx_err, dev->net, "unknown tx err %d\n", entry->urb->status); break; } } usb_autopm_put_interface_async(dev->intf); skb_unlink(skb, &dev->txq); lan78xx_release_tx_buf(dev, skb); /* Re-schedule NAPI if Tx data pending but no URBs in progress. */ if (skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->txq_pend)) napi_schedule(&dev->napi); } static void lan78xx_queue_skb(struct sk_buff_head *list, struct sk_buff *newsk, enum skb_state state) { struct skb_data *entry = (struct skb_data *)newsk->cb; __skb_queue_tail(list, newsk); entry->state = state; } static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev) { return skb_queue_len(&dev->txq_free) * dev->tx_urb_size; } static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev) { return dev->tx_pend_data_len; } static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev, struct sk_buff *skb, unsigned int *tx_pend_data_len) { unsigned long flags; spin_lock_irqsave(&dev->txq_pend.lock, flags); __skb_queue_tail(&dev->txq_pend, skb); dev->tx_pend_data_len += skb->len; *tx_pend_data_len = dev->tx_pend_data_len; spin_unlock_irqrestore(&dev->txq_pend.lock, flags); } static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev, struct sk_buff *skb, unsigned int *tx_pend_data_len) { unsigned long flags; spin_lock_irqsave(&dev->txq_pend.lock, flags); __skb_queue_head(&dev->txq_pend, skb); dev->tx_pend_data_len += skb->len; *tx_pend_data_len = dev->tx_pend_data_len; spin_unlock_irqrestore(&dev->txq_pend.lock, flags); } static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev, struct sk_buff **skb, unsigned int *tx_pend_data_len) { unsigned long flags; spin_lock_irqsave(&dev->txq_pend.lock, flags); *skb = __skb_dequeue(&dev->txq_pend); if (*skb) dev->tx_pend_data_len -= (*skb)->len; *tx_pend_data_len = dev->tx_pend_data_len; spin_unlock_irqrestore(&dev->txq_pend.lock, flags); } static netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); unsigned int tx_pend_data_len; if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) schedule_delayed_work(&dev->wq, 0); skb_tx_timestamp(skb); lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len); /* Set up a Tx URB if none is in progress */ if (skb_queue_empty(&dev->txq)) napi_schedule(&dev->napi); /* Stop stack Tx queue if we have enough data to fill * all the free Tx URBs. */ if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) { netif_stop_queue(net); netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u", tx_pend_data_len, lan78xx_tx_urb_space(dev)); /* Kick off transmission of pending data */ if (!skb_queue_empty(&dev->txq_free)) napi_schedule(&dev->napi); } return NETDEV_TX_OK; } static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = NULL; int ret; int i; dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); pdata = (struct lan78xx_priv *)(dev->data[0]); if (!pdata) { netdev_warn(dev->net, "Unable to allocate lan78xx_priv"); return -ENOMEM; } pdata->dev = dev; spin_lock_init(&pdata->rfe_ctl_lock); mutex_init(&pdata->dataport_mutex); INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write); for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++) pdata->vlan_table[i] = 0; INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write); dev->net->features = 0; if (DEFAULT_TX_CSUM_ENABLE) dev->net->features |= NETIF_F_HW_CSUM; if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; if (DEFAULT_TSO_CSUM_ENABLE) dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; if (DEFAULT_VLAN_RX_OFFLOAD) dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX; if (DEFAULT_VLAN_FILTER_ENABLE) dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER; dev->net->hw_features = dev->net->features; ret = lan78xx_setup_irq_domain(dev); if (ret < 0) { netdev_warn(dev->net, "lan78xx_setup_irq_domain() failed : %d", ret); goto out1; } /* Init all registers */ ret = lan78xx_reset(dev); if (ret) { netdev_warn(dev->net, "Registers INIT FAILED...."); goto out2; } ret = lan78xx_mdio_init(dev); if (ret) { netdev_warn(dev->net, "MDIO INIT FAILED....."); goto out2; } dev->net->flags |= IFF_MULTICAST; pdata->wol = WAKE_MAGIC; return ret; out2: lan78xx_remove_irq_domain(dev); out1: netdev_warn(dev->net, "Bind routine FAILED"); cancel_work_sync(&pdata->set_multicast); cancel_work_sync(&pdata->set_vlan); kfree(pdata); return ret; } static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf) { struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); lan78xx_remove_irq_domain(dev); lan78xx_remove_mdio(dev); if (pdata) { cancel_work_sync(&pdata->set_multicast); cancel_work_sync(&pdata->set_vlan); netif_dbg(dev, ifdown, dev->net, "free pdata"); kfree(pdata); pdata = NULL; dev->data[0] = 0; } } static void lan78xx_rx_csum_offload(struct lan78xx_net *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { /* HW Checksum offload appears to be flawed if used when not stripping * VLAN headers. Drop back to S/W checksums under these conditions. */ if (!(dev->net->features & NETIF_F_RXCSUM) || unlikely(rx_cmd_a & RX_CMD_A_ICSM_) || ((rx_cmd_a & RX_CMD_A_FVTG_) && !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) { skb->ip_summed = CHECKSUM_NONE; } else { skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); skb->ip_summed = CHECKSUM_COMPLETE; } } static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, struct sk_buff *skb, u32 rx_cmd_a, u32 rx_cmd_b) { if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) && (rx_cmd_a & RX_CMD_A_FVTG_)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), (rx_cmd_b & 0xffff)); } static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { dev->net->stats.rx_packets++; dev->net->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev->net); netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", skb->len + sizeof(struct ethhdr), skb->protocol); memset(skb->cb, 0, sizeof(struct skb_data)); if (skb_defer_rx_timestamp(skb)) return; napi_gro_receive(&dev->napi, skb); } static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb, int budget, int *work_done) { if (skb->len < RX_SKB_MIN_LEN) return 0; /* Extract frames from the URB buffer and pass each one to * the stack in a new NAPI SKB. */ while (skb->len > 0) { u32 rx_cmd_a, rx_cmd_b, align_count, size; u16 rx_cmd_c; unsigned char *packet; rx_cmd_a = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_a)); rx_cmd_b = get_unaligned_le32(skb->data); skb_pull(skb, sizeof(rx_cmd_b)); rx_cmd_c = get_unaligned_le16(skb->data); skb_pull(skb, sizeof(rx_cmd_c)); packet = skb->data; /* get the packet length */ size = (rx_cmd_a & RX_CMD_A_LEN_MASK_); align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; if (unlikely(size > skb->len)) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x", rx_cmd_a); } else { u32 frame_len; struct sk_buff *skb2; if (unlikely(size < ETH_FCS_LEN)) { netif_dbg(dev, rx_err, dev->net, "size err rx_cmd_a=0x%08x\n", rx_cmd_a); return 0; } frame_len = size - ETH_FCS_LEN; skb2 = napi_alloc_skb(&dev->napi, frame_len); if (!skb2) return 0; memcpy(skb2->data, packet, frame_len); skb_put(skb2, frame_len); lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b); /* Processing of the URB buffer must complete once * it has started. If the NAPI work budget is exhausted * while frames remain they are added to the overflow * queue for delivery in the next NAPI polling cycle. */ if (*work_done < budget) { lan78xx_skb_return(dev, skb2); ++(*work_done); } else { skb_queue_tail(&dev->rxq_overflow, skb2); } } skb_pull(skb, size); /* skip padding bytes before the next frame starts */ if (skb->len) skb_pull(skb, align_count); } return 1; } static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb, int budget, int *work_done) { if (!lan78xx_rx(dev, skb, budget, work_done)) { netif_dbg(dev, rx_err, dev->net, "drop\n"); dev->net->stats.rx_errors++; } } static void rx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *)urb->context; struct skb_data *entry = (struct skb_data *)skb->cb; struct lan78xx_net *dev = entry->dev; int urb_status = urb->status; enum skb_state state; netif_dbg(dev, rx_status, dev->net, "rx done: status %d", urb->status); skb_put(skb, urb->actual_length); state = rx_done; if (urb != entry->urb) netif_warn(dev, rx_err, dev->net, "URB pointer mismatch"); switch (urb_status) { case 0: if (skb->len < RX_SKB_MIN_LEN) { state = rx_cleanup; dev->net->stats.rx_errors++; dev->net->stats.rx_length_errors++; netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len); } usb_mark_last_busy(dev->udev); break; case -EPIPE: dev->net->stats.rx_errors++; lan78xx_defer_kevent(dev, EVENT_RX_HALT); fallthrough; case -ECONNRESET: /* async unlink */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "rx shutdown, code %d\n", urb_status); state = rx_cleanup; break; case -EPROTO: case -ETIME: case -EILSEQ: dev->net->stats.rx_errors++; state = rx_cleanup; break; /* data overrun ... flush fifo? */ case -EOVERFLOW: dev->net->stats.rx_over_errors++; fallthrough; default: state = rx_cleanup; dev->net->stats.rx_errors++; netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status); break; } state = defer_bh(dev, skb, &dev->rxq, state); } static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags) { struct skb_data *entry = (struct skb_data *)skb->cb; size_t size = dev->rx_urb_size; struct urb *urb = entry->urb; unsigned long lockflags; int ret = 0; usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in, skb->data, size, rx_complete, skb); spin_lock_irqsave(&dev->rxq.lock, lockflags); if (netif_device_present(dev->net) && netif_running(dev->net) && !test_bit(EVENT_RX_HALT, &dev->flags) && !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { ret = usb_submit_urb(urb, flags); switch (ret) { case 0: lan78xx_queue_skb(&dev->rxq, skb, rx_start); break; case -EPIPE: lan78xx_defer_kevent(dev, EVENT_RX_HALT); break; case -ENODEV: case -ENOENT: netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_device_detach(dev->net); break; case -EHOSTUNREACH: ret = -ENOLINK; napi_schedule(&dev->napi); break; default: netif_dbg(dev, rx_err, dev->net, "rx submit, %d\n", ret); napi_schedule(&dev->napi); break; } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); ret = -ENOLINK; } spin_unlock_irqrestore(&dev->rxq.lock, lockflags); if (ret) lan78xx_release_rx_buf(dev, skb); return ret; } static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev) { struct sk_buff *rx_buf; /* Ensure the maximum number of Rx URBs is submitted */ while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) { if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0) break; } } static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev, struct sk_buff *rx_buf) { /* reset SKB data pointers */ rx_buf->data = rx_buf->head; skb_reset_tail_pointer(rx_buf); rx_buf->len = 0; rx_buf->data_len = 0; rx_submit(dev, rx_buf, GFP_ATOMIC); } static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer) { u32 tx_cmd_a; u32 tx_cmd_b; tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_; if (skb->ip_summed == CHECKSUM_PARTIAL) tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_; tx_cmd_b = 0; if (skb_is_gso(skb)) { u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_); tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_; tx_cmd_a |= TX_CMD_A_LSO_; } if (skb_vlan_tag_present(skb)) { tx_cmd_a |= TX_CMD_A_IVTG_; tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_; } put_unaligned_le32(tx_cmd_a, buffer); put_unaligned_le32(tx_cmd_b, buffer + 4); } static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev, struct sk_buff *tx_buf) { struct skb_data *entry = (struct skb_data *)tx_buf->cb; int remain = dev->tx_urb_size; u8 *tx_data = tx_buf->data; u32 urb_len = 0; entry->num_of_packet = 0; entry->length = 0; /* Work through the pending SKBs and copy the data of each SKB into * the URB buffer if there room for all the SKB data. * * There must be at least DST+SRC+TYPE in the SKB (with padding enabled) */ while (remain >= TX_SKB_MIN_LEN) { unsigned int pending_bytes; unsigned int align_bytes; struct sk_buff *skb; unsigned int len; lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes); if (!skb) break; align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) % TX_ALIGNMENT; len = align_bytes + TX_CMD_LEN + skb->len; if (len > remain) { lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes); break; } tx_data += align_bytes; lan78xx_fill_tx_cmd_words(skb, tx_data); tx_data += TX_CMD_LEN; len = skb->len; if (skb_copy_bits(skb, 0, tx_data, len) < 0) { struct net_device_stats *stats = &dev->net->stats; stats->tx_dropped++; dev_kfree_skb_any(skb); tx_data -= TX_CMD_LEN; continue; } tx_data += len; entry->length += len; entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1; dev_kfree_skb_any(skb); urb_len = (u32)(tx_data - (u8 *)tx_buf->data); remain = dev->tx_urb_size - urb_len; } skb_put(tx_buf, urb_len); return entry; } static void lan78xx_tx_bh(struct lan78xx_net *dev) { int ret; /* Start the stack Tx queue if it was stopped */ netif_tx_lock(dev->net); if (netif_queue_stopped(dev->net)) { if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)) netif_wake_queue(dev->net); } netif_tx_unlock(dev->net); /* Go through the Tx pending queue and set up URBs to transfer * the data to the device. Stop if no more pending data or URBs, * or if an error occurs when a URB is submitted. */ do { struct skb_data *entry; struct sk_buff *tx_buf; unsigned long flags; if (skb_queue_empty(&dev->txq_pend)) break; tx_buf = lan78xx_get_tx_buf(dev); if (!tx_buf) break; entry = lan78xx_tx_buf_fill(dev, tx_buf); spin_lock_irqsave(&dev->txq.lock, flags); ret = usb_autopm_get_interface_async(dev->intf); if (ret < 0) { spin_unlock_irqrestore(&dev->txq.lock, flags); goto out; } usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out, tx_buf->data, tx_buf->len, tx_complete, tx_buf); if (tx_buf->len % dev->maxpacket == 0) { /* send USB_ZERO_PACKET */ entry->urb->transfer_flags |= URB_ZERO_PACKET; } #ifdef CONFIG_PM /* if device is asleep stop outgoing packet processing */ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { usb_anchor_urb(entry->urb, &dev->deferred); netif_stop_queue(dev->net); spin_unlock_irqrestore(&dev->txq.lock, flags); netdev_dbg(dev->net, "Delaying transmission for resumption\n"); return; } #endif ret = usb_submit_urb(entry->urb, GFP_ATOMIC); switch (ret) { case 0: netif_trans_update(dev->net); lan78xx_queue_skb(&dev->txq, tx_buf, tx_start); break; case -EPIPE: netif_stop_queue(dev->net); lan78xx_defer_kevent(dev, EVENT_TX_HALT); usb_autopm_put_interface_async(dev->intf); break; case -ENODEV: case -ENOENT: netif_dbg(dev, tx_err, dev->net, "tx submit urb err %d (disconnected?)", ret); netif_device_detach(dev->net); break; default: usb_autopm_put_interface_async(dev->intf); netif_dbg(dev, tx_err, dev->net, "tx submit urb err %d\n", ret); break; } spin_unlock_irqrestore(&dev->txq.lock, flags); if (ret) { netdev_warn(dev->net, "failed to tx urb %d\n", ret); out: dev->net->stats.tx_dropped += entry->num_of_packet; lan78xx_release_tx_buf(dev, tx_buf); } } while (ret == 0); } static int lan78xx_bh(struct lan78xx_net *dev, int budget) { struct sk_buff_head done; struct sk_buff *rx_buf; struct skb_data *entry; unsigned long flags; int work_done = 0; /* Pass frames received in the last NAPI cycle before * working on newly completed URBs. */ while (!skb_queue_empty(&dev->rxq_overflow)) { lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow)); ++work_done; } /* Take a snapshot of the done queue and move items to a * temporary queue. Rx URB completions will continue to add * to the done queue. */ __skb_queue_head_init(&done); spin_lock_irqsave(&dev->rxq_done.lock, flags); skb_queue_splice_init(&dev->rxq_done, &done); spin_unlock_irqrestore(&dev->rxq_done.lock, flags); /* Extract receive frames from completed URBs and * pass them to the stack. Re-submit each completed URB. */ while ((work_done < budget) && (rx_buf = __skb_dequeue(&done))) { entry = (struct skb_data *)(rx_buf->cb); switch (entry->state) { case rx_done: rx_process(dev, rx_buf, budget, &work_done); break; case rx_cleanup: break; default: netdev_dbg(dev->net, "rx buf state %d\n", entry->state); break; } lan78xx_rx_urb_resubmit(dev, rx_buf); } /* If budget was consumed before processing all the URBs put them * back on the front of the done queue. They will be first to be * processed in the next NAPI cycle. */ spin_lock_irqsave(&dev->rxq_done.lock, flags); skb_queue_splice(&done, &dev->rxq_done); spin_unlock_irqrestore(&dev->rxq_done.lock, flags); if (netif_device_present(dev->net) && netif_running(dev->net)) { /* reset update timer delta */ if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) { dev->delta = 1; mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } /* Submit all free Rx URBs */ if (!test_bit(EVENT_RX_HALT, &dev->flags)) lan78xx_rx_urb_submit_all(dev); /* Submit new Tx URBs */ lan78xx_tx_bh(dev); } return work_done; } static int lan78xx_poll(struct napi_struct *napi, int budget) { struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi); int result = budget; int work_done; /* Don't do any work if the device is suspended */ if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { napi_complete_done(napi, 0); return 0; } /* Process completed URBs and submit new URBs */ work_done = lan78xx_bh(dev, budget); if (work_done < budget) { napi_complete_done(napi, work_done); /* Start a new polling cycle if data was received or * data is waiting to be transmitted. */ if (!skb_queue_empty(&dev->rxq_done)) { napi_schedule(napi); } else if (netif_carrier_ok(dev->net)) { if (skb_queue_empty(&dev->txq) && !skb_queue_empty(&dev->txq_pend)) { napi_schedule(napi); } else { netif_tx_lock(dev->net); if (netif_queue_stopped(dev->net)) { netif_wake_queue(dev->net); napi_schedule(napi); } netif_tx_unlock(dev->net); } } result = work_done; } return result; } static void lan78xx_delayedwork(struct work_struct *work) { int status; struct lan78xx_net *dev; dev = container_of(work, struct lan78xx_net, wq.work); if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags)) return; if (usb_autopm_get_interface(dev->intf) < 0) return; if (test_bit(EVENT_TX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->txq); status = usb_clear_halt(dev->udev, dev->pipe_out); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err(dev)) netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit(EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue(dev->net); } } if (test_bit(EVENT_RX_HALT, &dev->flags)) { unlink_urbs(dev, &dev->rxq); status = usb_clear_halt(dev->udev, dev->pipe_in); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err(dev)) netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { clear_bit(EVENT_RX_HALT, &dev->flags); napi_schedule(&dev->napi); } } if (test_bit(EVENT_LINK_RESET, &dev->flags)) { int ret = 0; clear_bit(EVENT_LINK_RESET, &dev->flags); if (lan78xx_link_reset(dev) < 0) { netdev_info(dev->net, "link reset failed (%d)\n", ret); } } if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) { lan78xx_update_stats(dev); clear_bit(EVENT_STAT_UPDATE, &dev->flags); mod_timer(&dev->stat_monitor, jiffies + (STAT_UPDATE_TIMER * dev->delta)); dev->delta = min((dev->delta * 2), 50); } usb_autopm_put_interface(dev->intf); } static void intr_complete(struct urb *urb) { struct lan78xx_net *dev = urb->context; int status = urb->status; switch (status) { /* success */ case 0: lan78xx_status(dev, urb); break; /* software-driven interface shutdown */ case -ENOENT: /* urb killed */ case -ENODEV: /* hardware gone */ case -ESHUTDOWN: /* hardware gone */ netif_dbg(dev, ifdown, dev->net, "intr shutdown, code %d\n", status); return; /* NOTE: not throttling like RX/TX, since this endpoint * already polls infrequently */ default: netdev_dbg(dev->net, "intr status %d\n", status); break; } if (!netif_device_present(dev->net) || !netif_running(dev->net)) { netdev_warn(dev->net, "not submitting new status URB"); return; } memset(urb->transfer_buffer, 0, urb->transfer_buffer_length); status = usb_submit_urb(urb, GFP_ATOMIC); switch (status) { case 0: break; case -ENODEV: case -ENOENT: netif_dbg(dev, timer, dev->net, "intr resubmit %d (disconnect?)", status); netif_device_detach(dev->net); break; default: netif_err(dev, timer, dev->net, "intr resubmit --> %d\n", status); break; } } static void lan78xx_disconnect(struct usb_interface *intf) { struct lan78xx_net *dev; struct usb_device *udev; struct net_device *net; struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; netif_napi_del(&dev->napi); udev = interface_to_usbdev(intf); net = dev->net; unregister_netdev(net); timer_shutdown_sync(&dev->stat_monitor); set_bit(EVENT_DEV_DISCONNECT, &dev->flags); cancel_delayed_work_sync(&dev->wq); phydev = net->phydev; phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0); phy_disconnect(net->phydev); if (phy_is_pseudo_fixed_link(phydev)) fixed_phy_unregister(phydev); usb_scuttle_anchored_urbs(&dev->deferred); lan78xx_unbind(dev, intf); lan78xx_free_tx_resources(dev); lan78xx_free_rx_resources(dev); usb_kill_urb(dev->urb_intr); usb_free_urb(dev->urb_intr); free_netdev(net); usb_put_dev(udev); } static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue) { struct lan78xx_net *dev = netdev_priv(net); unlink_urbs(dev, &dev->txq); napi_schedule(&dev->napi); } static netdev_features_t lan78xx_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { struct lan78xx_net *dev = netdev_priv(netdev); if (skb->len > LAN78XX_TSO_SIZE(dev)) features &= ~NETIF_F_GSO_MASK; features = vlan_features_check(skb, features); features = vxlan_features_check(skb, features); return features; } static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, .ndo_start_xmit = lan78xx_start_xmit, .ndo_tx_timeout = lan78xx_tx_timeout, .ndo_change_mtu = lan78xx_change_mtu, .ndo_set_mac_address = lan78xx_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = lan78xx_set_multicast, .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, .ndo_features_check = lan78xx_features_check, }; static void lan78xx_stat_monitor(struct timer_list *t) { struct lan78xx_net *dev = from_timer(dev, t, stat_monitor); lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE); } static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr; struct lan78xx_net *dev; struct net_device *netdev; struct usb_device *udev; int ret; unsigned int maxp; unsigned int period; u8 *buf = NULL; udev = interface_to_usbdev(intf); udev = usb_get_dev(udev); netdev = alloc_etherdev(sizeof(struct lan78xx_net)); if (!netdev) { dev_err(&intf->dev, "Error: OOM\n"); ret = -ENOMEM; goto out1; } /* netdev_printk() needs this */ SET_NETDEV_DEV(netdev, &intf->dev); dev = netdev_priv(netdev); dev->udev = udev; dev->intf = intf; dev->net = netdev; dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); skb_queue_head_init(&dev->rxq); skb_queue_head_init(&dev->txq); skb_queue_head_init(&dev->rxq_done); skb_queue_head_init(&dev->txq_pend); skb_queue_head_init(&dev->rxq_overflow); mutex_init(&dev->phy_mutex); mutex_init(&dev->dev_mutex); ret = lan78xx_urb_config_init(dev); if (ret < 0) goto out2; ret = lan78xx_alloc_tx_resources(dev); if (ret < 0) goto out2; ret = lan78xx_alloc_rx_resources(dev); if (ret < 0) goto out3; /* MTU range: 68 - 9000 */ netdev->max_mtu = MAX_SINGLE_PACKET_SIZE; netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev)); netif_napi_add(netdev, &dev->napi, lan78xx_poll); INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); init_usb_anchor(&dev->deferred); netdev->netdev_ops = &lan78xx_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES; netdev->ethtool_ops = &lan78xx_ethtool_ops; dev->delta = 1; timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0); mutex_init(&dev->stats.access_lock); if (intf->cur_altsetting->desc.bNumEndpoints < 3) { ret = -ENODEV; goto out4; } dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE); ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in); if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) { ret = -ENODEV; goto out4; } dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE); ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out); if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) { ret = -ENODEV; goto out4; } ep_intr = &intf->cur_altsetting->endpoint[2]; if (!usb_endpoint_is_int_in(&ep_intr->desc)) { ret = -ENODEV; goto out4; } dev->pipe_intr = usb_rcvintpipe(dev->udev, usb_endpoint_num(&ep_intr->desc)); ret = lan78xx_bind(dev, intf); if (ret < 0) goto out4; period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr); buf = kmalloc(maxp, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out5; } dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_intr) { ret = -ENOMEM; goto out6; } else { usb_fill_int_urb(dev->urb_intr, dev->udev, dev->pipe_intr, buf, maxp, intr_complete, dev, period); dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; } dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out); /* Reject broken descriptors. */ if (dev->maxpacket == 0) { ret = -ENODEV; goto out6; } /* driver requires remote-wakeup capability during autosuspend. */ intf->needs_remote_wakeup = 1; ret = lan78xx_phy_init(dev); if (ret < 0) goto out7; ret = register_netdev(netdev); if (ret != 0) { netif_err(dev, probe, netdev, "couldn't register the device\n"); goto out8; } usb_set_intfdata(intf, dev); ret = device_set_wakeup_enable(&udev->dev, true); /* Default delay of 2sec has more overhead than advantage. * Set to 10sec as default. */ pm_runtime_set_autosuspend_delay(&udev->dev, DEFAULT_AUTOSUSPEND_DELAY); return 0; out8: phy_disconnect(netdev->phydev); out7: usb_free_urb(dev->urb_intr); out6: kfree(buf); out5: lan78xx_unbind(dev, intf); out4: netif_napi_del(&dev->napi); lan78xx_free_rx_resources(dev); out3: lan78xx_free_tx_resources(dev); out2: free_netdev(netdev); out1: usb_put_dev(udev); return ret; } static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len) { const u16 crc16poly = 0x8005; int i; u16 bit, crc, msb; u8 data; crc = 0xFFFF; for (i = 0; i < len; i++) { data = *buf++; for (bit = 0; bit < 8; bit++) { msb = crc >> 15; crc <<= 1; if (msb ^ (u16)(data & 1)) { crc ^= crc16poly; crc |= (u16)0x0001U; } data >>= 1; } } return crc; } static int lan78xx_set_auto_suspend(struct lan78xx_net *dev) { u32 buf; int ret; ret = lan78xx_stop_tx_path(dev); if (ret < 0) return ret; ret = lan78xx_stop_rx_path(dev); if (ret < 0) return ret; /* auto suspend (selective suspend) */ ret = lan78xx_write_reg(dev, WUCSR, 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUCSR2, 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); if (ret < 0) return ret; /* set goodframe wakeup */ ret = lan78xx_read_reg(dev, WUCSR, &buf); if (ret < 0) return ret; buf |= WUCSR_RFE_WAKE_EN_; buf |= WUCSR_STORE_WAKE_; ret = lan78xx_write_reg(dev, WUCSR, buf); if (ret < 0) return ret; ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) return ret; buf &= ~PMT_CTL_RES_CLR_WKP_EN_; buf |= PMT_CTL_RES_CLR_WKP_STS_; buf |= PMT_CTL_PHY_WAKE_EN_; buf |= PMT_CTL_WOL_EN_; buf &= ~PMT_CTL_SUS_MODE_MASK_; buf |= PMT_CTL_SUS_MODE_3_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) return ret; ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) return ret; buf |= PMT_CTL_WUPS_MASK_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) return ret; ret = lan78xx_start_rx_path(dev); return ret; } static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol) { const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E }; const u8 ipv6_multicast[3] = { 0x33, 0x33 }; const u8 arp_type[2] = { 0x08, 0x06 }; u32 temp_pmt_ctl; int mask_index; u32 temp_wucsr; u32 buf; u16 crc; int ret; ret = lan78xx_stop_tx_path(dev); if (ret < 0) return ret; ret = lan78xx_stop_rx_path(dev); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUCSR, 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUCSR2, 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); if (ret < 0) return ret; temp_wucsr = 0; temp_pmt_ctl = 0; ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl); if (ret < 0) return ret; temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_; temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_; for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) { ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0); if (ret < 0) return ret; } mask_index = 0; if (wol & WAKE_PHY) { temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } if (wol & WAKE_MAGIC) { temp_wucsr |= WUCSR_MPEN_; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_; } if (wol & WAKE_BCAST) { temp_wucsr |= WUCSR_BCST_EN_; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } if (wol & WAKE_MCAST) { temp_wucsr |= WUCSR_WAKE_EN_; /* set WUF_CFG & WUF_MASK for IPv4 Multicast */ crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3); ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); if (ret < 0) return ret; mask_index++; /* for IPv6 Multicast */ crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2); ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_MCAST_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); if (ret < 0) return ret; mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } if (wol & WAKE_UCAST) { temp_wucsr |= WUCSR_PFDA_EN_; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } if (wol & WAKE_ARP) { temp_wucsr |= WUCSR_WAKE_EN_; /* set WUF_CFG & WUF_MASK * for packettype (offset 12,13) = ARP (0x0806) */ crc = lan78xx_wakeframe_crc16(arp_type, 2); ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), WUF_CFGX_EN_ | WUF_CFGX_TYPE_ALL_ | (0 << WUF_CFGX_OFFSET_SHIFT_) | (crc & WUF_CFGX_CRC16_MASK_)); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0); if (ret < 0) return ret; ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0); if (ret < 0) return ret; mask_index++; temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr); if (ret < 0) return ret; /* when multiple WOL bits are set */ if (hweight_long((unsigned long)wol) > 1) { temp_pmt_ctl |= PMT_CTL_WOL_EN_; temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_; temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_; } ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl); if (ret < 0) return ret; /* clear WUPS */ ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) return ret; buf |= PMT_CTL_WUPS_MASK_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) return ret; ret = lan78xx_start_rx_path(dev); return ret; } static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) { struct lan78xx_net *dev = usb_get_intfdata(intf); bool dev_open; int ret; mutex_lock(&dev->dev_mutex); netif_dbg(dev, ifdown, dev->net, "suspending: pm event %#x", message.event); dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags); if (dev_open) { spin_lock_irq(&dev->txq.lock); /* don't autosuspend while transmitting */ if ((skb_queue_len(&dev->txq) || skb_queue_len(&dev->txq_pend)) && PMSG_IS_AUTO(message)) { spin_unlock_irq(&dev->txq.lock); ret = -EBUSY; goto out; } else { set_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); } /* stop RX */ ret = lan78xx_stop_rx_path(dev); if (ret < 0) goto out; ret = lan78xx_flush_rx_fifo(dev); if (ret < 0) goto out; /* stop Tx */ ret = lan78xx_stop_tx_path(dev); if (ret < 0) goto out; /* empty out the Rx and Tx queues */ netif_device_detach(dev->net); lan78xx_terminate_urbs(dev); usb_kill_urb(dev->urb_intr); /* reattach */ netif_device_attach(dev->net); del_timer(&dev->stat_monitor); if (PMSG_IS_AUTO(message)) { ret = lan78xx_set_auto_suspend(dev); if (ret < 0) goto out; } else { struct lan78xx_priv *pdata; pdata = (struct lan78xx_priv *)(dev->data[0]); netif_carrier_off(dev->net); ret = lan78xx_set_suspend(dev, pdata->wol); if (ret < 0) goto out; } } else { /* Interface is down; don't allow WOL and PHY * events to wake up the host */ u32 buf; set_bit(EVENT_DEV_ASLEEP, &dev->flags); ret = lan78xx_write_reg(dev, WUCSR, 0); if (ret < 0) goto out; ret = lan78xx_write_reg(dev, WUCSR2, 0); if (ret < 0) goto out; ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) goto out; buf &= ~PMT_CTL_RES_CLR_WKP_EN_; buf |= PMT_CTL_RES_CLR_WKP_STS_; buf &= ~PMT_CTL_SUS_MODE_MASK_; buf |= PMT_CTL_SUS_MODE_3_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) goto out; ret = lan78xx_read_reg(dev, PMT_CTL, &buf); if (ret < 0) goto out; buf |= PMT_CTL_WUPS_MASK_; ret = lan78xx_write_reg(dev, PMT_CTL, buf); if (ret < 0) goto out; } ret = 0; out: mutex_unlock(&dev->dev_mutex); return ret; } static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev) { bool pipe_halted = false; struct urb *urb; while ((urb = usb_get_from_anchor(&dev->deferred))) { struct sk_buff *skb = urb->context; int ret; if (!netif_device_present(dev->net) || !netif_carrier_ok(dev->net) || pipe_halted) { lan78xx_release_tx_buf(dev, skb); continue; } ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == 0) { netif_trans_update(dev->net); lan78xx_queue_skb(&dev->txq, skb, tx_start); } else { if (ret == -EPIPE) { netif_stop_queue(dev->net); pipe_halted = true; } else if (ret == -ENODEV) { netif_device_detach(dev->net); } lan78xx_release_tx_buf(dev, skb); } } return pipe_halted; } static int lan78xx_resume(struct usb_interface *intf) { struct lan78xx_net *dev = usb_get_intfdata(intf); bool dev_open; int ret; mutex_lock(&dev->dev_mutex); netif_dbg(dev, ifup, dev->net, "resuming device"); dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags); if (dev_open) { bool pipe_halted = false; ret = lan78xx_flush_tx_fifo(dev); if (ret < 0) goto out; if (dev->urb_intr) { int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL); if (ret < 0) { if (ret == -ENODEV) netif_device_detach(dev->net); netdev_warn(dev->net, "Failed to submit intr URB"); } } spin_lock_irq(&dev->txq.lock); if (netif_device_present(dev->net)) { pipe_halted = lan78xx_submit_deferred_urbs(dev); if (pipe_halted) lan78xx_defer_kevent(dev, EVENT_TX_HALT); } clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); if (!pipe_halted && netif_device_present(dev->net) && (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))) netif_start_queue(dev->net); ret = lan78xx_start_tx_path(dev); if (ret < 0) goto out; napi_schedule(&dev->napi); if (!timer_pending(&dev->stat_monitor)) { dev->delta = 1; mod_timer(&dev->stat_monitor, jiffies + STAT_UPDATE_TIMER); } } else { clear_bit(EVENT_DEV_ASLEEP, &dev->flags); } ret = lan78xx_write_reg(dev, WUCSR2, 0); if (ret < 0) goto out; ret = lan78xx_write_reg(dev, WUCSR, 0); if (ret < 0) goto out; ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL); if (ret < 0) goto out; ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ | WUCSR2_ARP_RCD_ | WUCSR2_IPV6_TCPSYN_RCD_ | WUCSR2_IPV4_TCPSYN_RCD_); if (ret < 0) goto out; ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ | WUCSR_EEE_RX_WAKE_ | WUCSR_PFDA_FR_ | WUCSR_RFE_WAKE_FR_ | WUCSR_WUFR_ | WUCSR_MPR_ | WUCSR_BCST_FR_); if (ret < 0) goto out; ret = 0; out: mutex_unlock(&dev->dev_mutex); return ret; } static int lan78xx_reset_resume(struct usb_interface *intf) { struct lan78xx_net *dev = usb_get_intfdata(intf); int ret; netif_dbg(dev, ifup, dev->net, "(reset) resuming device"); ret = lan78xx_reset(dev); if (ret < 0) return ret; phy_start(dev->net->phydev); ret = lan78xx_resume(intf); return ret; } static const struct usb_device_id products[] = { { /* LAN7800 USB Gigabit Ethernet Device */ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID), }, { /* LAN7850 USB Gigabit Ethernet Device */ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID), }, { /* LAN7801 USB Gigabit Ethernet Device */ USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID), }, { /* ATM2-AF USB Gigabit Ethernet Device */ USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID), }, {}, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver lan78xx_driver = { .name = DRIVER_NAME, .id_table = products, .probe = lan78xx_probe, .disconnect = lan78xx_disconnect, .suspend = lan78xx_suspend, .resume = lan78xx_resume, .reset_resume = lan78xx_reset_resume, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(lan78xx_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
3 2 1 2 2 2 2 7 7 19 20 20 15 7 7 7 6 2 6 7 11 2 11 10 1 20 11 10 1 9 9 4 2 3 4 2 2 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 // SPDX-License-Identifier: GPL-2.0-only /* * Fence mechanism for dma-buf and to allow for asynchronous dma access * * Copyright (C) 2012 Canonical Ltd * Copyright (C) 2012 Texas Instruments * * Authors: * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> */ #include <linux/slab.h> #include <linux/export.h> #include <linux/atomic.h> #include <linux/dma-fence.h> #include <linux/sched/signal.h> #include <linux/seq_file.h> #define CREATE_TRACE_POINTS #include <trace/events/dma_fence.h> EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit); EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal); EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled); static DEFINE_SPINLOCK(dma_fence_stub_lock); static struct dma_fence dma_fence_stub; /* * fence context counter: each execution context should have its own * fence context, this allows checking if fences belong to the same * context or not. One device can have multiple separate contexts, * and they're used if some engine can run independently of another. */ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1); /** * DOC: DMA fences overview * * DMA fences, represented by &struct dma_fence, are the kernel internal * synchronization primitive for DMA operations like GPU rendering, video * encoding/decoding, or displaying buffers on a screen. * * A fence is initialized using dma_fence_init() and completed using * dma_fence_signal(). Fences are associated with a context, allocated through * dma_fence_context_alloc(), and all fences on the same context are * fully ordered. * * Since the purposes of fences is to facilitate cross-device and * cross-application synchronization, there's multiple ways to use one: * * - Individual fences can be exposed as a &sync_file, accessed as a file * descriptor from userspace, created by calling sync_file_create(). This is * called explicit fencing, since userspace passes around explicit * synchronization points. * * - Some subsystems also have their own explicit fencing primitives, like * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying * fence to be updated. * * - Then there's also implicit fencing, where the synchronization points are * implicitly passed around as part of shared &dma_buf instances. Such * implicit fences are stored in &struct dma_resv through the * &dma_buf.resv pointer. */ /** * DOC: fence cross-driver contract * * Since &dma_fence provide a cross driver contract, all drivers must follow the * same rules: * * * Fences must complete in a reasonable time. Fences which represent kernels * and shaders submitted by userspace, which could run forever, must be backed * up by timeout and gpu hang recovery code. Minimally that code must prevent * further command submission and force complete all in-flight fences, e.g. * when the driver or hardware do not support gpu reset, or if the gpu reset * failed for some reason. Ideally the driver supports gpu recovery which only * affects the offending userspace context, and no other userspace * submissions. * * * Drivers may have different ideas of what completion within a reasonable * time means. Some hang recovery code uses a fixed timeout, others a mix * between observing forward progress and increasingly strict timeouts. * Drivers should not try to second guess timeout handling of fences from * other drivers. * * * To ensure there's no deadlocks of dma_fence_wait() against other locks * drivers should annotate all code required to reach dma_fence_signal(), * which completes the fences, with dma_fence_begin_signalling() and * dma_fence_end_signalling(). * * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock(). * This means any code required for fence completion cannot acquire a * &dma_resv lock. Note that this also pulls in the entire established * locking hierarchy around dma_resv_lock() and dma_resv_unlock(). * * * Drivers are allowed to call dma_fence_wait() from their &shrinker * callbacks. This means any code required for fence completion cannot * allocate memory with GFP_KERNEL. * * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier * respectively &mmu_interval_notifier callbacks. This means any code required * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO. * Only GFP_ATOMIC is permissible, which might fail. * * Note that only GPU drivers have a reasonable excuse for both requiring * &mmu_interval_notifier and &shrinker callbacks at the same time as having to * track asynchronous compute work using &dma_fence. No driver outside of * drivers/gpu should ever call dma_fence_wait() in such contexts. */ static const char *dma_fence_stub_get_name(struct dma_fence *fence) { return "stub"; } static const struct dma_fence_ops dma_fence_stub_ops = { .get_driver_name = dma_fence_stub_get_name, .get_timeline_name = dma_fence_stub_get_name, }; /** * dma_fence_get_stub - return a signaled fence * * Return a stub fence which is already signaled. The fence's * timestamp corresponds to the first time after boot this * function is called. */ struct dma_fence *dma_fence_get_stub(void) { spin_lock(&dma_fence_stub_lock); if (!dma_fence_stub.ops) { dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &dma_fence_stub.flags); dma_fence_signal_locked(&dma_fence_stub); } spin_unlock(&dma_fence_stub_lock); return dma_fence_get(&dma_fence_stub); } EXPORT_SYMBOL(dma_fence_get_stub); /** * dma_fence_allocate_private_stub - return a private, signaled fence * @timestamp: timestamp when the fence was signaled * * Return a newly allocated and signaled stub fence. */ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) { struct dma_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (fence == NULL) return NULL; dma_fence_init(fence, &dma_fence_stub_ops, &dma_fence_stub_lock, 0, 0); set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); dma_fence_signal_timestamp(fence, timestamp); return fence; } EXPORT_SYMBOL(dma_fence_allocate_private_stub); /** * dma_fence_context_alloc - allocate an array of fence contexts * @num: amount of contexts to allocate * * This function will return the first index of the number of fence contexts * allocated. The fence context is used for setting &dma_fence.context to a * unique number by passing the context to dma_fence_init(). */ u64 dma_fence_context_alloc(unsigned num) { WARN_ON(!num); return atomic64_fetch_add(num, &dma_fence_context_counter); } EXPORT_SYMBOL(dma_fence_context_alloc); /** * DOC: fence signalling annotation * * Proving correctness of all the kernel code around &dma_fence through code * review and testing is tricky for a few reasons: * * * It is a cross-driver contract, and therefore all drivers must follow the * same rules for lock nesting order, calling contexts for various functions * and anything else significant for in-kernel interfaces. But it is also * impossible to test all drivers in a single machine, hence brute-force N vs. * N testing of all combinations is impossible. Even just limiting to the * possible combinations is infeasible. * * * There is an enormous amount of driver code involved. For render drivers * there's the tail of command submission, after fences are published, * scheduler code, interrupt and workers to process job completion, * and timeout, gpu reset and gpu hang recovery code. Plus for integration * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier, * and &shrinker. For modesetting drivers there's the commit tail functions * between when fences for an atomic modeset are published, and when the * corresponding vblank completes, including any interrupt processing and * related workers. Auditing all that code, across all drivers, is not * feasible. * * * Due to how many other subsystems are involved and the locking hierarchies * this pulls in there is extremely thin wiggle-room for driver-specific * differences. &dma_fence interacts with almost all of the core memory * handling through page fault handlers via &dma_resv, dma_resv_lock() and * dma_resv_unlock(). On the other side it also interacts through all * allocation sites through &mmu_notifier and &shrinker. * * Furthermore lockdep does not handle cross-release dependencies, which means * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught * at runtime with some quick testing. The simplest example is one thread * waiting on a &dma_fence while holding a lock:: * * lock(A); * dma_fence_wait(B); * unlock(A); * * while the other thread is stuck trying to acquire the same lock, which * prevents it from signalling the fence the previous thread is stuck waiting * on:: * * lock(A); * unlock(A); * dma_fence_signal(B); * * By manually annotating all code relevant to signalling a &dma_fence we can * teach lockdep about these dependencies, which also helps with the validation * headache since now lockdep can check all the rules for us:: * * cookie = dma_fence_begin_signalling(); * lock(A); * unlock(A); * dma_fence_signal(B); * dma_fence_end_signalling(cookie); * * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to * annotate critical sections the following rules need to be observed: * * * All code necessary to complete a &dma_fence must be annotated, from the * point where a fence is accessible to other threads, to the point where * dma_fence_signal() is called. Un-annotated code can contain deadlock issues, * and due to the very strict rules and many corner cases it is infeasible to * catch these just with review or normal stress testing. * * * &struct dma_resv deserves a special note, since the readers are only * protected by rcu. This means the signalling critical section starts as soon * as the new fences are installed, even before dma_resv_unlock() is called. * * * The only exception are fast paths and opportunistic signalling code, which * calls dma_fence_signal() purely as an optimization, but is not required to * guarantee completion of a &dma_fence. The usual example is a wait IOCTL * which calls dma_fence_signal(), while the mandatory completion path goes * through a hardware interrupt and possible job completion worker. * * * To aid composability of code, the annotations can be freely nested, as long * as the overall locking hierarchy is consistent. The annotations also work * both in interrupt and process context. Due to implementation details this * requires that callers pass an opaque cookie from * dma_fence_begin_signalling() to dma_fence_end_signalling(). * * * Validation against the cross driver contract is implemented by priming * lockdep with the relevant hierarchy at boot-up. This means even just * testing with a single device is enough to validate a driver, at least as * far as deadlocks with dma_fence_wait() against dma_fence_signal() are * concerned. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map dma_fence_lockdep_map = { .name = "dma_fence_map" }; /** * dma_fence_begin_signalling - begin a critical DMA fence signalling section * * Drivers should use this to annotate the beginning of any code section * required to eventually complete &dma_fence by calling dma_fence_signal(). * * The end of these critical sections are annotated with * dma_fence_end_signalling(). * * Returns: * * Opaque cookie needed by the implementation, which needs to be passed to * dma_fence_end_signalling(). */ bool dma_fence_begin_signalling(void) { /* explicitly nesting ... */ if (lock_is_held_type(&dma_fence_lockdep_map, 1)) return true; /* rely on might_sleep check for soft/hardirq locks */ if (in_atomic()) return true; /* ... and non-recursive readlock */ lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_); return false; } EXPORT_SYMBOL(dma_fence_begin_signalling); /** * dma_fence_end_signalling - end a critical DMA fence signalling section * @cookie: opaque cookie from dma_fence_begin_signalling() * * Closes a critical section annotation opened by dma_fence_begin_signalling(). */ void dma_fence_end_signalling(bool cookie) { if (cookie) return; lock_release(&dma_fence_lockdep_map, _RET_IP_); } EXPORT_SYMBOL(dma_fence_end_signalling); void __dma_fence_might_wait(void) { bool tmp; tmp = lock_is_held_type(&dma_fence_lockdep_map, 1); if (tmp) lock_release(&dma_fence_lockdep_map, _THIS_IP_); lock_map_acquire(&dma_fence_lockdep_map); lock_map_release(&dma_fence_lockdep_map); if (tmp) lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_); } #endif /** * dma_fence_signal_timestamp_locked - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Unlike dma_fence_signal_timestamp(), this function must be called with * &dma_fence.lock held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp) { struct dma_fence_cb *cur, *tmp; struct list_head cb_list; lockdep_assert_held(fence->lock); if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))) return -EINVAL; /* Stash the cb_list before replacing it with the timestamp */ list_replace(&fence->cb_list, &cb_list); fence->timestamp = timestamp; set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); list_for_each_entry_safe(cur, tmp, &cb_list, node) { INIT_LIST_HEAD(&cur->node); cur->func(fence, cur); } return 0; } EXPORT_SYMBOL(dma_fence_signal_timestamp_locked); /** * dma_fence_signal_timestamp - signal completion of a fence * @fence: the fence to signal * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. Set the timestamp provided as the fence * signal timestamp. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp) { unsigned long flags; int ret; if (!fence) return -EINVAL; spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, timestamp); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_signal_timestamp); /** * dma_fence_signal_locked - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock * held. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal_locked(struct dma_fence *fence) { return dma_fence_signal_timestamp_locked(fence, ktime_get()); } EXPORT_SYMBOL(dma_fence_signal_locked); /** * dma_fence_signal - signal completion of a fence * @fence: the fence to signal * * Signal completion for software callbacks on a fence, this will unblock * dma_fence_wait() calls and run all the callbacks added with * dma_fence_add_callback(). Can be called multiple times, but since a fence * can only go from the unsignaled to the signaled state and not back, it will * only be effective the first time. * * Returns 0 on success and a negative error value when @fence has been * signalled already. */ int dma_fence_signal(struct dma_fence *fence) { unsigned long flags; int ret; bool tmp; if (!fence) return -EINVAL; tmp = dma_fence_begin_signalling(); spin_lock_irqsave(fence->lock, flags); ret = dma_fence_signal_timestamp_locked(fence, ktime_get()); spin_unlock_irqrestore(fence->lock, flags); dma_fence_end_signalling(tmp); return ret; } EXPORT_SYMBOL(dma_fence_signal); /** * dma_fence_wait_timeout - sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. Other error values may be * returned on custom implementations. * * Performs a synchronous wait on this fence. It is assumed the caller * directly or indirectly (buf-mgr between reservation and committing) * holds a reference to the fence, otherwise the fence might be * freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_any_timeout(). */ signed long dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout) { signed long ret; if (WARN_ON(timeout < 0)) return -EINVAL; might_sleep(); __dma_fence_might_wait(); dma_fence_enable_sw_signaling(fence); trace_dma_fence_wait_start(fence); if (fence->ops->wait) ret = fence->ops->wait(fence, intr, timeout); else ret = dma_fence_default_wait(fence, intr, timeout); trace_dma_fence_wait_end(fence); return ret; } EXPORT_SYMBOL(dma_fence_wait_timeout); /** * dma_fence_release - default release function for fences * @kref: &dma_fence.recfount * * This is the default release functions for &dma_fence. Drivers shouldn't call * this directly, but instead call dma_fence_put(). */ void dma_fence_release(struct kref *kref) { struct dma_fence *fence = container_of(kref, struct dma_fence, refcount); trace_dma_fence_destroy(fence); if (WARN(!list_empty(&fence->cb_list) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->context, fence->seqno)) { unsigned long flags; /* * Failed to signal before release, likely a refcounting issue. * * This should never happen, but if it does make sure that we * don't leave chains dangling. We set the error flag first * so that the callbacks know this signal is due to an error. */ spin_lock_irqsave(fence->lock, flags); fence->error = -EDEADLK; dma_fence_signal_locked(fence); spin_unlock_irqrestore(fence->lock, flags); } if (fence->ops->release) fence->ops->release(fence); else dma_fence_free(fence); } EXPORT_SYMBOL(dma_fence_release); /** * dma_fence_free - default release function for &dma_fence. * @fence: fence to release * * This is the default implementation for &dma_fence_ops.release. It calls * kfree_rcu() on @fence. */ void dma_fence_free(struct dma_fence *fence) { kfree_rcu(fence, rcu); } EXPORT_SYMBOL(dma_fence_free); static bool __dma_fence_enable_signaling(struct dma_fence *fence) { bool was_set; lockdep_assert_held(fence->lock); was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return false; if (!was_set && fence->ops->enable_signaling) { trace_dma_fence_enable_signal(fence); if (!fence->ops->enable_signaling(fence)) { dma_fence_signal_locked(fence); return false; } } return true; } /** * dma_fence_enable_sw_signaling - enable signaling on fence * @fence: the fence to enable * * This will request for sw signaling to be enabled, to make the fence * complete as soon as possible. This calls &dma_fence_ops.enable_signaling * internally. */ void dma_fence_enable_sw_signaling(struct dma_fence *fence) { unsigned long flags; spin_lock_irqsave(fence->lock, flags); __dma_fence_enable_signaling(fence); spin_unlock_irqrestore(fence->lock, flags); } EXPORT_SYMBOL(dma_fence_enable_sw_signaling); /** * dma_fence_add_callback - add a callback to be called when the fence * is signaled * @fence: the fence to wait on * @cb: the callback to register * @func: the function to call * * Add a software callback to the fence. The caller should keep a reference to * the fence. * * @cb will be initialized by dma_fence_add_callback(), no initialization * by the caller is required. Any number of callbacks can be registered * to a fence, but a callback can only be registered to one fence at a time. * * If fence is already signaled, this function will return -ENOENT (and * *not* call the callback). * * Note that the callback can be called from an atomic context or irq context. * * Returns 0 in case of success, -ENOENT if the fence is already signaled * and -EINVAL in case of error. */ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func) { unsigned long flags; int ret = 0; if (WARN_ON(!fence || !func)) return -EINVAL; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { INIT_LIST_HEAD(&cb->node); return -ENOENT; } spin_lock_irqsave(fence->lock, flags); if (__dma_fence_enable_signaling(fence)) { cb->func = func; list_add_tail(&cb->node, &fence->cb_list); } else { INIT_LIST_HEAD(&cb->node); ret = -ENOENT; } spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_add_callback); /** * dma_fence_get_status - returns the status upon completion * @fence: the dma_fence to query * * This wraps dma_fence_get_status_locked() to return the error status * condition on a signaled fence. See dma_fence_get_status_locked() for more * details. * * Returns 0 if the fence has not yet been signaled, 1 if the fence has * been signaled without an error condition, or a negative error code * if the fence has been completed in err. */ int dma_fence_get_status(struct dma_fence *fence) { unsigned long flags; int status; spin_lock_irqsave(fence->lock, flags); status = dma_fence_get_status_locked(fence); spin_unlock_irqrestore(fence->lock, flags); return status; } EXPORT_SYMBOL(dma_fence_get_status); /** * dma_fence_remove_callback - remove a callback from the signaling list * @fence: the fence to wait on * @cb: the callback to remove * * Remove a previously queued callback from the fence. This function returns * true if the callback is successfully removed, or false if the fence has * already been signaled. * * *WARNING*: * Cancelling a callback should only be done if you really know what you're * doing, since deadlocks and race conditions could occur all too easily. For * this reason, it should only ever be done on hardware lockup recovery, * with a reference held to the fence. * * Behaviour is undefined if @cb has not been added to @fence using * dma_fence_add_callback() beforehand. */ bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb) { unsigned long flags; bool ret; spin_lock_irqsave(fence->lock, flags); ret = !list_empty(&cb->node); if (ret) list_del_init(&cb->node); spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_remove_callback); struct default_wait_cb { struct dma_fence_cb base; struct task_struct *task; }; static void dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct default_wait_cb *wait = container_of(cb, struct default_wait_cb, base); wake_up_state(wait->task, TASK_NORMAL); } /** * dma_fence_default_wait - default sleep until the fence gets signaled * or until timeout elapses * @fence: the fence to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the * remaining timeout in jiffies on success. If timeout is zero the value one is * returned if the fence is already signaled for consistency with other * functions taking a jiffies timeout. */ signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct default_wait_cb cb; unsigned long flags; signed long ret = timeout ? timeout : 1; spin_lock_irqsave(fence->lock, flags); if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) goto out; if (intr && signal_pending(current)) { ret = -ERESTARTSYS; goto out; } if (!timeout) { ret = 0; goto out; } cb.base.func = dma_fence_default_wait_cb; cb.task = current; list_add(&cb.base.node, &fence->cb_list); while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) { if (intr) __set_current_state(TASK_INTERRUPTIBLE); else __set_current_state(TASK_UNINTERRUPTIBLE); spin_unlock_irqrestore(fence->lock, flags); ret = schedule_timeout(ret); spin_lock_irqsave(fence->lock, flags); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } if (!list_empty(&cb.base.node)) list_del(&cb.base.node); __set_current_state(TASK_RUNNING); out: spin_unlock_irqrestore(fence->lock, flags); return ret; } EXPORT_SYMBOL(dma_fence_default_wait); static bool dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, uint32_t *idx) { int i; for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { if (idx) *idx = i; return true; } } return false; } /** * dma_fence_wait_any_timeout - sleep until any fence gets signaled * or until timeout elapses * @fences: array of fences to wait on * @count: number of fences to wait on * @intr: if true, do an interruptible wait * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT * @idx: used to store the first signaled fence index, meaningful only on * positive return * * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies * on success. * * Synchronous waits for the first fence in the array to be signaled. The * caller needs to hold a reference to all fences in the array, otherwise a * fence might be freed before return, resulting in undefined behavior. * * See also dma_fence_wait() and dma_fence_wait_timeout(). */ signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout, uint32_t *idx) { struct default_wait_cb *cb; signed long ret = timeout; unsigned i; if (WARN_ON(!fences || !count || timeout < 0)) return -EINVAL; if (timeout == 0) { for (i = 0; i < count; ++i) if (dma_fence_is_signaled(fences[i])) { if (idx) *idx = i; return 1; } return 0; } cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL); if (cb == NULL) { ret = -ENOMEM; goto err_free_cb; } for (i = 0; i < count; ++i) { struct dma_fence *fence = fences[i]; cb[i].task = current; if (dma_fence_add_callback(fence, &cb[i].base, dma_fence_default_wait_cb)) { /* This fence is already signaled */ if (idx) *idx = i; goto fence_rm_cb; } } while (ret > 0) { if (intr) set_current_state(TASK_INTERRUPTIBLE); else set_current_state(TASK_UNINTERRUPTIBLE); if (dma_fence_test_signaled_any(fences, count, idx)) break; ret = schedule_timeout(ret); if (ret > 0 && intr && signal_pending(current)) ret = -ERESTARTSYS; } __set_current_state(TASK_RUNNING); fence_rm_cb: while (i-- > 0) dma_fence_remove_callback(fences[i], &cb[i].base); err_free_cb: kfree(cb); return ret; } EXPORT_SYMBOL(dma_fence_wait_any_timeout); /** * DOC: deadline hints * * In an ideal world, it would be possible to pipeline a workload sufficiently * that a utilization based device frequency governor could arrive at a minimum * frequency that meets the requirements of the use-case, in order to minimize * power consumption. But in the real world there are many workloads which * defy this ideal. For example, but not limited to: * * * Workloads that ping-pong between device and CPU, with alternating periods * of CPU waiting for device, and device waiting on CPU. This can result in * devfreq and cpufreq seeing idle time in their respective domains and in * result reduce frequency. * * * Workloads that interact with a periodic time based deadline, such as double * buffered GPU rendering vs vblank sync'd page flipping. In this scenario, * missing a vblank deadline results in an *increase* in idle time on the GPU * (since it has to wait an additional vblank period), sending a signal to * the GPU's devfreq to reduce frequency, when in fact the opposite is what is * needed. * * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline * (or indirectly via userspace facing ioctls like &sync_set_deadline). * The deadline hint provides a way for the waiting driver, or userspace, to * convey an appropriate sense of urgency to the signaling driver. * * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace * facing APIs). The time could either be some point in the future (such as * the vblank based deadline for page-flipping, or the start of a compositor's * composition cycle), or the current time to indicate an immediate deadline * hint (Ie. forward progress cannot be made until this fence is signaled). * * Multiple deadlines may be set on a given fence, even in parallel. See the * documentation for &dma_fence_ops.set_deadline. * * The deadline hint is just that, a hint. The driver that created the fence * may react by increasing frequency, making different scheduling choices, etc. * Or doing nothing at all. */ /** * dma_fence_set_deadline - set desired fence-wait deadline hint * @fence: the fence that is to be waited on * @deadline: the time by which the waiter hopes for the fence to be * signaled * * Give the fence signaler a hint about an upcoming deadline, such as * vblank, by which point the waiter would prefer the fence to be * signaled by. This is intended to give feedback to the fence signaler * to aid in power management decisions, such as boosting GPU frequency * if a periodic vblank deadline is approaching but the fence is not * yet signaled.. */ void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { if (fence->ops->set_deadline && !dma_fence_is_signaled(fence)) fence->ops->set_deadline(fence, deadline); } EXPORT_SYMBOL(dma_fence_set_deadline); /** * dma_fence_describe - Dump fence description into seq_file * @fence: the fence to describe * @seq: the seq_file to put the textual description into * * Dump a textual description of the fence and it's state into the seq_file. */ void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { seq_printf(seq, "%s %s seq %llu %ssignalled\n", fence->ops->get_driver_name(fence), fence->ops->get_timeline_name(fence), fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); } EXPORT_SYMBOL(dma_fence_describe); /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize * @ops: the dma_fence_ops for operations on this fence * @lock: the irqsafe spinlock to use for locking this fence * @context: the execution context this fence is run on * @seqno: a linear increasing sequence number for this context * * Initializes an allocated fence, the caller doesn't have to keep its * refcount after committing with this fence, but it will need to hold a * refcount again if &dma_fence_ops.enable_signaling gets called. * * context and seqno are used for easy comparison between fences, allowing * to check which fence is later by simply using dma_fence_later(). */ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { BUG_ON(!lock); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); kref_init(&fence->refcount); fence->ops = ops; INIT_LIST_HEAD(&fence->cb_list); fence->lock = lock; fence->context = context; fence->seqno = seqno; fence->flags = 0UL; fence->error = 0; trace_dma_fence_init(fence); } EXPORT_SYMBOL(dma_fence_init);
4 1 1 1 1 1 1 1 3 3 13 13 1 1 3 1 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 // SPDX-License-Identifier: GPL-2.0-or-later /* * CALIPSO - Common Architecture Label IPv6 Security Option * * This is an implementation of the CALIPSO protocol as specified in * RFC 5570. * * Authors: Paul Moore <paul.moore@hp.com> * Huw Davies <huw@codeweavers.com> */ /* (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * (c) Copyright Huw Davies <huw@codeweavers.com>, 2015 */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/calipso.h> #include <linux/atomic.h> #include <linux/bug.h> #include <asm/unaligned.h> #include <linux/crc-ccitt.h> /* Maximium size of the calipso option including * the two-byte TLV header. */ #define CALIPSO_OPT_LEN_MAX (2 + 252) /* Size of the minimum calipso option including * the two-byte TLV header. */ #define CALIPSO_HDR_LEN (2 + 8) /* Maximium size of the calipso option including * the two-byte TLV header and upto 3 bytes of * leading pad and 7 bytes of trailing pad. */ #define CALIPSO_OPT_LEN_MAX_WITH_PAD (3 + CALIPSO_OPT_LEN_MAX + 7) /* Maximium size of u32 aligned buffer required to hold calipso * option. Max of 3 initial pad bytes starting from buffer + 3. * i.e. the worst case is when the previous tlv finishes on 4n + 3. */ #define CALIPSO_MAX_BUFFER (6 + CALIPSO_OPT_LEN_MAX) /* List of available DOI definitions */ static DEFINE_SPINLOCK(calipso_doi_list_lock); static LIST_HEAD(calipso_doi_list); /* Label mapping cache */ int calipso_cache_enabled = 1; int calipso_cache_bucketsize = 10; #define CALIPSO_CACHE_BUCKETBITS 7 #define CALIPSO_CACHE_BUCKETS BIT(CALIPSO_CACHE_BUCKETBITS) #define CALIPSO_CACHE_REORDERLIMIT 10 struct calipso_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct calipso_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct calipso_map_cache_bkt *calipso_cache; static void calipso_cache_invalidate(void); static void calipso_doi_putdef(struct calipso_doi *doi_def); /* Label Mapping Cache Functions */ /** * calipso_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void calipso_cache_entry_free(struct calipso_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * calipso_map_cache_hash - Hashing function for the CALIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CALIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 calipso_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /** * calipso_cache_init - Initialize the CALIPSO cache * * Description: * Initializes the CALIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int __init calipso_cache_init(void) { u32 iter; calipso_cache = kcalloc(CALIPSO_CACHE_BUCKETS, sizeof(struct calipso_map_cache_bkt), GFP_KERNEL); if (!calipso_cache) return -ENOMEM; for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { spin_lock_init(&calipso_cache[iter].lock); calipso_cache[iter].size = 0; INIT_LIST_HEAD(&calipso_cache[iter].list); } return 0; } /** * calipso_cache_invalidate - Invalidates the current CALIPSO cache * * Description: * Invalidates and frees any entries in the CALIPSO cache. Returns zero on * success and negative values on failure. * */ static void calipso_cache_invalidate(void) { struct calipso_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CALIPSO_CACHE_BUCKETS; iter++) { spin_lock_bh(&calipso_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &calipso_cache[iter].list, list) { list_del(&entry->list); calipso_cache_entry_free(entry); } calipso_cache[iter].size = 0; spin_unlock_bh(&calipso_cache[iter].lock); } } /** * calipso_cache_check - Check the CALIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CALIPSO_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int calipso_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct calipso_map_cache_entry *entry; struct calipso_map_cache_entry *prev_entry = NULL; u32 hash; if (!calipso_cache_enabled) return -ENOENT; hash = calipso_map_cache_hash(key, key_len); bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); spin_lock_bh(&calipso_cache[bkt].lock); list_for_each_entry(entry, &calipso_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; refcount_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CALIPSO; if (!prev_entry) { spin_unlock_bh(&calipso_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CALIPSO_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&calipso_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&calipso_cache[bkt].lock); return -ENOENT; } /** * calipso_cache_add - Add an entry to the CALIPSO cache * @calipso_ptr: the CALIPSO option * @secattr: the packet's security attributes * * Description: * Add a new entry into the CALIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. The key stored starts at calipso_ptr + 2, * i.e. the type and length bytes are not stored, this corresponds to * calipso_ptr[1] bytes of data. * */ static int calipso_cache_add(const unsigned char *calipso_ptr, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct calipso_map_cache_entry *entry = NULL; struct calipso_map_cache_entry *old_entry = NULL; u32 calipso_ptr_len; if (!calipso_cache_enabled || calipso_cache_bucketsize <= 0) return 0; calipso_ptr_len = calipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; entry->key = kmemdup(calipso_ptr + 2, calipso_ptr_len, GFP_ATOMIC); if (!entry->key) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = calipso_ptr_len; entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len); refcount_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); spin_lock_bh(&calipso_cache[bkt].lock); if (calipso_cache[bkt].size < calipso_cache_bucketsize) { list_add(&entry->list, &calipso_cache[bkt].list); calipso_cache[bkt].size += 1; } else { old_entry = list_entry(calipso_cache[bkt].list.prev, struct calipso_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &calipso_cache[bkt].list); calipso_cache_entry_free(old_entry); } spin_unlock_bh(&calipso_cache[bkt].lock); return 0; cache_add_failure: if (entry) calipso_cache_entry_free(entry); return ret_val; } /* DOI List Functions */ /** * calipso_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct calipso_doi *calipso_doi_search(u32 doi) { struct calipso_doi *iter; list_for_each_entry_rcu(iter, &calipso_doi_list, list) if (iter->doi == doi && refcount_read(&iter->refcount)) return iter; return NULL; } /** * calipso_doi_add - Add a new DOI to the CALIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CALIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see calipso.h for details). Returns * zero on success and non-zero on failure. * */ static int calipso_doi_add(struct calipso_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def->doi == CALIPSO_DOI_UNKNOWN) goto doi_add_return; refcount_set(&doi_def->refcount, 1); spin_lock(&calipso_doi_list_lock); if (calipso_doi_search(doi_def->doi)) { spin_unlock(&calipso_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &calipso_doi_list); spin_unlock(&calipso_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_ADD, audit_info); if (audit_buf) { const char *type_str; switch (doi_type) { case CALIPSO_MAP_PASS: type_str = "pass"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " calipso_doi=%u calipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * calipso_doi_free - Frees a DOI definition * @doi_def: the DOI definition * * Description: * This function frees all of the memory associated with a DOI definition. * */ static void calipso_doi_free(struct calipso_doi *doi_def) { kfree(doi_def); } /** * calipso_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void calipso_doi_free_rcu(struct rcu_head *entry) { struct calipso_doi *doi_def; doi_def = container_of(entry, struct calipso_doi, rcu); calipso_doi_free(doi_def); } /** * calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine * @doi: the DOI value * @audit_info: NetLabel audit information * * Description: * Removes a DOI definition from the CALIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ static int calipso_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct calipso_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&calipso_doi_list_lock); doi_def = calipso_doi_search(doi); if (!doi_def) { spin_unlock(&calipso_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&calipso_doi_list_lock); calipso_doi_putdef(doi_def); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CALIPSO_DEL, audit_info); if (audit_buf) { audit_log_format(audit_buf, " calipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * calipso_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * calipso_doi_putdef() is called when the caller is done. * */ static struct calipso_doi *calipso_doi_getdef(u32 doi) { struct calipso_doi *doi_def; rcu_read_lock(); doi_def = calipso_doi_search(doi); if (!doi_def) goto doi_getdef_return; if (!refcount_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * calipso_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from calipso_doi_getdef(). * */ static void calipso_doi_putdef(struct calipso_doi *doi_def) { if (!doi_def) return; if (!refcount_dec_and_test(&doi_def->refcount)) return; calipso_cache_invalidate(); call_rcu(&doi_def->rcu, calipso_doi_free_rcu); } /** * calipso_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ static int calipso_doi_walk(u32 *skip_cnt, int (*callback)(struct calipso_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct calipso_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &calipso_doi_list, list) if (refcount_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /** * calipso_validate - Validate a CALIPSO option * @skb: the packet * @option: the start of the option * * Description: * This routine is called to validate a CALIPSO option. * If the option is valid then %true is returned, otherwise * %false is returned. * * The caller should have already checked that the length of the * option (including the TLV header) is >= 10 and that the catmap * length is consistent with the option length. * * We leave checks on the level and categories to the socket layer. */ bool calipso_validate(const struct sk_buff *skb, const unsigned char *option) { struct calipso_doi *doi_def; bool ret_val; u16 crc, len = option[1] + 2; static const u8 zero[2]; /* The original CRC runs over the option including the TLV header * with the CRC-16 field (at offset 8) zeroed out. */ crc = crc_ccitt(0xffff, option, 8); crc = crc_ccitt(crc, zero, sizeof(zero)); if (len > 10) crc = crc_ccitt(crc, option + 10, len - 10); crc = ~crc; if (option[8] != (crc & 0xff) || option[9] != ((crc >> 8) & 0xff)) return false; rcu_read_lock(); doi_def = calipso_doi_search(get_unaligned_be32(option + 2)); ret_val = !!doi_def; rcu_read_unlock(); return ret_val; } /** * calipso_map_cat_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CALIPSO format * @net_cat_len: the length of the CALIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CALIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int calipso_map_cat_hton(const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int spot = -1; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; for (;;) { spot = netlbl_catmap_walk(secattr->attr.mls.cat, spot + 1); if (spot < 0) break; if (spot >= net_clen_bits) return -ENOSPC; netlbl_bitmap_setbit(net_cat, spot, 1); if (spot > net_spot_max) net_spot_max = spot; } return (net_spot_max / 32 + 1) * 4; } /** * calipso_map_cat_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CALIPSO format * @net_cat_len: the length of the CALIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CALIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int calipso_map_cat_ntoh(const struct calipso_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int spot = -1; u32 net_clen_bits = net_cat_len * 8; for (;;) { spot = netlbl_bitmap_walk(net_cat, net_clen_bits, spot + 1, 1); if (spot < 0) return 0; ret_val = netlbl_catmap_setbit(&secattr->attr.mls.cat, spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * calipso_pad_write - Writes pad bytes in TLV format * @buf: the buffer * @offset: offset from start of buffer to write padding * @count: number of pad bytes to write * * Description: * Write @count bytes of TLV padding into @buffer starting at offset @offset. * @count should be less than 8 - see RFC 4942. * */ static int calipso_pad_write(unsigned char *buf, unsigned int offset, unsigned int count) { if (WARN_ON_ONCE(count >= 8)) return -EINVAL; switch (count) { case 0: break; case 1: buf[offset] = IPV6_TLV_PAD1; break; default: buf[offset] = IPV6_TLV_PADN; buf[offset + 1] = count - 2; if (count > 2) memset(buf + offset + 2, 0, count - 2); break; } return 0; } /** * calipso_genopt - Generate a CALIPSO option * @buf: the option buffer * @start: offset from which to write * @buf_len: the size of opt_buf * @doi_def: the CALIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CALIPSO option using the DOI definition and security attributes * passed to the function. This also generates upto three bytes of leading * padding that ensures that the option is 4n + 2 aligned. It returns the * number of bytes written (including any initial padding). */ static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 len, pad; u16 crc; static const unsigned char padding[4] = {2, 1, 0, 3}; unsigned char *calipso; /* CALIPSO has 4n + 2 alignment */ pad = padding[start & 3]; if (buf_len <= start + pad + CALIPSO_HDR_LEN) return -ENOSPC; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; len = CALIPSO_HDR_LEN; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = calipso_map_cat_hton(doi_def, secattr, buf + start + pad + len, buf_len - start - pad - len); if (ret_val < 0) return ret_val; len += ret_val; } calipso_pad_write(buf, start, pad); calipso = buf + start + pad; calipso[0] = IPV6_TLV_CALIPSO; calipso[1] = len - 2; *(__be32 *)(calipso + 2) = htonl(doi_def->doi); calipso[6] = (len - CALIPSO_HDR_LEN) / 4; calipso[7] = secattr->attr.mls.lvl; crc = ~crc_ccitt(0xffff, calipso, len); calipso[8] = crc & 0xff; calipso[9] = (crc >> 8) & 0xff; return pad + len; } /* Hop-by-hop hdr helper functions */ /** * calipso_opt_update - Replaces socket's hop options with a new set * @sk: the socket * @hop: new hop options * * Description: * Replaces @sk's hop options with @hop. @hop may be NULL to leave * the socket with no hop options. * */ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop) { struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts; txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop); txopt_put(old); if (IS_ERR(txopts)) return PTR_ERR(txopts); txopts = ipv6_update_options(sk, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } return 0; } /** * calipso_tlv_len - Returns the length of the TLV * @opt: the option header * @offset: offset of the TLV within the header * * Description: * Returns the length of the TLV option at offset @offset within * the option header @opt. Checks that the entire TLV fits inside * the option header, returns a negative value if this is not the case. */ static int calipso_tlv_len(struct ipv6_opt_hdr *opt, unsigned int offset) { unsigned char *tlv = (unsigned char *)opt; unsigned int opt_len = ipv6_optlen(opt), tlv_len; if (offset < sizeof(*opt) || offset >= opt_len) return -EINVAL; if (tlv[offset] == IPV6_TLV_PAD1) return 1; if (offset + 1 >= opt_len) return -EINVAL; tlv_len = tlv[offset + 1] + 2; if (offset + tlv_len > opt_len) return -EINVAL; return tlv_len; } /** * calipso_opt_find - Finds the CALIPSO option in an IPv6 hop options header * @hop: the hop options header * @start: on return holds the offset of any leading padding * @end: on return holds the offset of the first non-pad TLV after CALIPSO * * Description: * Finds the space occupied by a CALIPSO option (including any leading and * trailing padding). * * If a CALIPSO option exists set @start and @end to the * offsets within @hop of the start of padding before the first * CALIPSO option and the end of padding after the first CALIPSO * option. In this case the function returns 0. * * In the absence of a CALIPSO option, @start and @end will be * set to the start and end of any trailing padding in the header. * This is useful when appending a new option, as the caller may want * to overwrite some of this padding. In this case the function will * return -ENOENT. */ static int calipso_opt_find(struct ipv6_opt_hdr *hop, unsigned int *start, unsigned int *end) { int ret_val = -ENOENT, tlv_len; unsigned int opt_len, offset, offset_s = 0, offset_e = 0; unsigned char *opt = (unsigned char *)hop; opt_len = ipv6_optlen(hop); offset = sizeof(*hop); while (offset < opt_len) { tlv_len = calipso_tlv_len(hop, offset); if (tlv_len < 0) return tlv_len; switch (opt[offset]) { case IPV6_TLV_PAD1: case IPV6_TLV_PADN: if (offset_e) offset_e = offset; break; case IPV6_TLV_CALIPSO: ret_val = 0; offset_e = offset; break; default: if (offset_e == 0) offset_s = offset; else goto out; } offset += tlv_len; } out: if (offset_s) *start = offset_s + calipso_tlv_len(hop, offset_s); else *start = sizeof(*hop); if (offset_e) *end = offset_e + calipso_tlv_len(hop, offset_e); else *end = opt_len; return ret_val; } /** * calipso_opt_insert - Inserts a CALIPSO option into an IPv6 hop opt hdr * @hop: the original hop options header * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Creates a new hop options header based on @hop with a * CALIPSO option added to it. If @hop already contains a CALIPSO * option this is overwritten, otherwise the new option is appended * after any existing options. If @hop is NULL then the new header * will contain just the CALIPSO option and any needed padding. * */ static struct ipv6_opt_hdr * calipso_opt_insert(struct ipv6_opt_hdr *hop, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { unsigned int start, end, buf_len, pad, hop_len; struct ipv6_opt_hdr *new; int ret_val; if (hop) { hop_len = ipv6_optlen(hop); ret_val = calipso_opt_find(hop, &start, &end); if (ret_val && ret_val != -ENOENT) return ERR_PTR(ret_val); } else { hop_len = 0; start = sizeof(*hop); end = 0; } buf_len = hop_len + start - end + CALIPSO_OPT_LEN_MAX_WITH_PAD; new = kzalloc(buf_len, GFP_ATOMIC); if (!new) return ERR_PTR(-ENOMEM); if (start > sizeof(*hop)) memcpy(new, hop, start); ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, secattr); if (ret_val < 0) { kfree(new); return ERR_PTR(ret_val); } buf_len = start + ret_val; /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ pad = ((buf_len & 4) + (end & 7)) & 7; calipso_pad_write((unsigned char *)new, buf_len, pad); buf_len += pad; if (end != hop_len) { memcpy((char *)new + buf_len, (char *)hop + end, hop_len - end); buf_len += hop_len - end; } new->nexthdr = 0; new->hdrlen = buf_len / 8 - 1; return new; } /** * calipso_opt_del - Removes the CALIPSO option from an option header * @hop: the original header * @new: the new header * * Description: * Creates a new header based on @hop without any CALIPSO option. If @hop * doesn't contain a CALIPSO option it returns -ENOENT. If @hop contains * no other non-padding options, it returns zero with @new set to NULL. * Otherwise it returns zero, creates a new header without the CALIPSO * option (and removing as much padding as possible) and returns with * @new set to that header. * */ static int calipso_opt_del(struct ipv6_opt_hdr *hop, struct ipv6_opt_hdr **new) { int ret_val; unsigned int start, end, delta, pad, hop_len; ret_val = calipso_opt_find(hop, &start, &end); if (ret_val) return ret_val; hop_len = ipv6_optlen(hop); if (start == sizeof(*hop) && end == hop_len) { /* There's no other option in the header so return NULL */ *new = NULL; return 0; } delta = (end - start) & ~7; *new = kzalloc(hop_len - delta, GFP_ATOMIC); if (!*new) return -ENOMEM; memcpy(*new, hop, start); (*new)->hdrlen -= delta / 8; pad = (end - start) & 7; calipso_pad_write((unsigned char *)*new, start, pad); if (end != hop_len) memcpy((char *)*new + start + pad, (char *)hop + end, hop_len - end); return 0; } /** * calipso_opt_getattr - Get the security attributes from a memory block * @calipso: the CALIPSO option * @secattr: the security attributes * * Description: * Inspect @calipso and return the security attributes in @secattr. * Returns zero on success and negative values on failure. * */ static int calipso_opt_getattr(const unsigned char *calipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi, len = calipso[1], cat_len = calipso[6] * 4; struct calipso_doi *doi_def; if (cat_len + 8 > len) return -EINVAL; if (calipso_cache_check(calipso + 2, calipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(calipso + 2); rcu_read_lock(); doi_def = calipso_doi_search(doi); if (!doi_def) goto getattr_return; secattr->attr.mls.lvl = calipso[7]; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (cat_len) { ret_val = calipso_map_cat_ntoh(doi_def, calipso + 10, cat_len, secattr); if (ret_val != 0) { netlbl_catmap_free(secattr->attr.mls.cat); goto getattr_return; } if (secattr->attr.mls.cat) secattr->flags |= NETLBL_SECATTR_MLS_CAT; } secattr->type = NETLBL_NLTYPE_CALIPSO; getattr_return: rcu_read_unlock(); return ret_val; } /* sock functions. */ /** * calipso_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CALIPSO option attached to the sock and if * there is return the CALIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ static int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); if (!txopts || !txopts->hopopt) goto done; hop = txopts->hopopt; opt = (unsigned char *)hop; opt_len = ipv6_optlen(hop); offset = sizeof(*hop); while (offset < opt_len) { len = calipso_tlv_len(hop, offset); if (len < 0) { ret_val = len; goto done; } switch (opt[offset]) { case IPV6_TLV_CALIPSO: if (len < CALIPSO_HDR_LEN) ret_val = -EINVAL; else ret_val = calipso_opt_getattr(&opt[offset], secattr); goto done; default: offset += len; break; } } done: txopt_put(txopts); return ret_val; } /** * calipso_sock_setattr - Add a CALIPSO option to a socket * @sk: the socket * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CALIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ static int calipso_sock_setattr(struct sock *sk, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct ipv6_opt_hdr *old, *new; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); old = NULL; if (txopts) old = txopts->hopopt; new = calipso_opt_insert(old, doi_def, secattr); txopt_put(txopts); if (IS_ERR(new)) return PTR_ERR(new); ret_val = calipso_opt_update(sk, new); kfree(new); return ret_val; } /** * calipso_sock_delattr - Delete the CALIPSO option from a socket * @sk: the socket * * Description: * Removes the CALIPSO option from a socket, if present. * */ static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); if (!txopts || !txopts->hopopt) goto done; if (calipso_opt_del(txopts->hopopt, &new_hop)) goto done; calipso_opt_update(sk, new_hop); kfree(new_hop); done: txopt_put(txopts); } /* request sock functions. */ /** * calipso_req_setattr - Add a CALIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CALIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CALIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ static int calipso_req_setattr(struct request_sock *req, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { struct ipv6_txoptions *txopts; struct inet_request_sock *req_inet = inet_rsk(req); struct ipv6_opt_hdr *old, *new; struct sock *sk = sk_to_full_sk(req_to_sk(req)); if (req_inet->ipv6_opt && req_inet->ipv6_opt->hopopt) old = req_inet->ipv6_opt->hopopt; else old = NULL; new = calipso_opt_insert(old, doi_def, secattr); if (IS_ERR(new)) return PTR_ERR(new); txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); kfree(new); if (IS_ERR(txopts)) return PTR_ERR(txopts); txopts = xchg(&req_inet->ipv6_opt, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } return 0; } /** * calipso_req_delattr - Delete the CALIPSO option from a request socket * @req: the request socket * * Description: * Removes the CALIPSO option from a request socket, if present. * */ static void calipso_req_delattr(struct request_sock *req) { struct inet_request_sock *req_inet = inet_rsk(req); struct ipv6_opt_hdr *new; struct ipv6_txoptions *txopts; struct sock *sk = sk_to_full_sk(req_to_sk(req)); if (!req_inet->ipv6_opt || !req_inet->ipv6_opt->hopopt) return; if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new)) return; /* Nothing to do */ txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new); if (!IS_ERR(txopts)) { txopts = xchg(&req_inet->ipv6_opt, txopts); if (txopts) { atomic_sub(txopts->tot_len, &sk->sk_omem_alloc); txopt_put(txopts); } } kfree(new); } /* skbuff functions. */ /** * calipso_skbuff_optptr - Find the CALIPSO option in the packet * @skb: the packet * * Description: * Parse the packet's IP header looking for a CALIPSO option. Returns a pointer * to the start of the CALIPSO option on success, NULL if one if not found. * */ static unsigned char *calipso_skbuff_optptr(const struct sk_buff *skb) { const struct ipv6hdr *ip6_hdr = ipv6_hdr(skb); int offset; if (ip6_hdr->nexthdr != NEXTHDR_HOP) return NULL; offset = ipv6_find_tlv(skb, sizeof(*ip6_hdr), IPV6_TLV_CALIPSO); if (offset >= 0) return (unsigned char *)ip6_hdr + offset; return NULL; } /** * calipso_skbuff_setattr - Set the CALIPSO option on a packet * @skb: the packet * @doi_def: the CALIPSO DOI to use * @secattr: the security attributes * * Description: * Set the CALIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ static int calipso_skbuff_setattr(struct sk_buff *skb, const struct calipso_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct ipv6hdr *ip6_hdr; struct ipv6_opt_hdr *hop; unsigned char buf[CALIPSO_MAX_BUFFER]; int len_delta, new_end, pad, payload; unsigned int start, end; ip6_hdr = ipv6_hdr(skb); if (ip6_hdr->nexthdr == NEXTHDR_HOP) { hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); ret_val = calipso_opt_find(hop, &start, &end); if (ret_val && ret_val != -ENOENT) return ret_val; } else { start = 0; end = 0; } memset(buf, 0, sizeof(buf)); ret_val = calipso_genopt(buf, start & 3, sizeof(buf), doi_def, secattr); if (ret_val < 0) return ret_val; new_end = start + ret_val; /* At this point new_end aligns to 4n, so (new_end & 4) pads to 8n */ pad = ((new_end & 4) + (end & 7)) & 7; len_delta = new_end - (int)end + pad; ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */ if (len_delta) { if (len_delta > 0) skb_push(skb, len_delta); else skb_pull(skb, -len_delta); memmove((char *)ip6_hdr - len_delta, ip6_hdr, sizeof(*ip6_hdr) + start); skb_reset_network_header(skb); ip6_hdr = ipv6_hdr(skb); payload = ntohs(ip6_hdr->payload_len); ip6_hdr->payload_len = htons(payload + len_delta); } hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); if (start == 0) { struct ipv6_opt_hdr *new_hop = (struct ipv6_opt_hdr *)buf; new_hop->nexthdr = ip6_hdr->nexthdr; new_hop->hdrlen = len_delta / 8 - 1; ip6_hdr->nexthdr = NEXTHDR_HOP; } else { hop->hdrlen += len_delta / 8; } memcpy((char *)hop + start, buf + (start & 3), new_end - start); calipso_pad_write((unsigned char *)hop, new_end, pad); return 0; } /** * calipso_skbuff_delattr - Delete any CALIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CALIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ static int calipso_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct ipv6hdr *ip6_hdr; struct ipv6_opt_hdr *old_hop; u32 old_hop_len, start = 0, end = 0, delta, size, pad; if (!calipso_skbuff_optptr(skb)) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; ip6_hdr = ipv6_hdr(skb); old_hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); old_hop_len = ipv6_optlen(old_hop); ret_val = calipso_opt_find(old_hop, &start, &end); if (ret_val) return ret_val; if (start == sizeof(*old_hop) && end == old_hop_len) { /* There's no other option in the header so we delete * the whole thing. */ delta = old_hop_len; size = sizeof(*ip6_hdr); ip6_hdr->nexthdr = old_hop->nexthdr; } else { delta = (end - start) & ~7; if (delta) old_hop->hdrlen -= delta / 8; pad = (end - start) & 7; size = sizeof(*ip6_hdr) + start + pad; calipso_pad_write((unsigned char *)old_hop, start, pad); } if (delta) { skb_pull(skb, delta); memmove((char *)ip6_hdr + delta, ip6_hdr, size); skb_reset_network_header(skb); } return 0; } static const struct netlbl_calipso_ops ops = { .doi_add = calipso_doi_add, .doi_free = calipso_doi_free, .doi_remove = calipso_doi_remove, .doi_getdef = calipso_doi_getdef, .doi_putdef = calipso_doi_putdef, .doi_walk = calipso_doi_walk, .sock_getattr = calipso_sock_getattr, .sock_setattr = calipso_sock_setattr, .sock_delattr = calipso_sock_delattr, .req_setattr = calipso_req_setattr, .req_delattr = calipso_req_delattr, .opt_getattr = calipso_opt_getattr, .skbuff_optptr = calipso_skbuff_optptr, .skbuff_setattr = calipso_skbuff_setattr, .skbuff_delattr = calipso_skbuff_delattr, .cache_invalidate = calipso_cache_invalidate, .cache_add = calipso_cache_add }; /** * calipso_init - Initialize the CALIPSO module * * Description: * Initialize the CALIPSO module and prepare it for use. Returns zero on * success and negative values on failure. * */ int __init calipso_init(void) { int ret_val; ret_val = calipso_cache_init(); if (!ret_val) netlbl_calipso_ops_register(&ops); return ret_val; } void calipso_exit(void) { netlbl_calipso_ops_register(NULL); calipso_cache_invalidate(); kfree(calipso_cache); }
19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 /* SPDX-License-Identifier: LGPL-2.1 */ /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * Jeremy Allison (jra@samba.org) * */ #ifndef _CIFS_GLOB_H #define _CIFS_GLOB_H #include <linux/in.h> #include <linux/in6.h> #include <linux/inet.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/mm.h> #include <linux/mempool.h> #include <linux/workqueue.h> #include <linux/utsname.h> #include <linux/sched/mm.h> #include <linux/netfs.h> #include "cifs_fs_sb.h" #include "cifsacl.h" #include <crypto/internal/hash.h> #include <uapi/linux/cifs/cifs_mount.h> #include "../common/smb2pdu.h" #include "smb2pdu.h" #include <linux/filelock.h> #define SMB_PATH_MAX 260 #define CIFS_PORT 445 #define RFC1001_PORT 139 /* * The sizes of various internal tables and strings */ #define MAX_UID_INFO 16 #define MAX_SES_INFO 2 #define MAX_TCON_INFO 4 #define MAX_TREE_SIZE (2 + CIFS_NI_MAXHOST + 1 + CIFS_MAX_SHARE_LEN + 1) #define CIFS_MIN_RCV_POOL 4 #define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */ /* * default attribute cache timeout (jiffies) */ #define CIFS_DEF_ACTIMEO (1 * HZ) /* * max sleep time before retry to server */ #define CIFS_MAX_SLEEP 2000 /* * max attribute cache timeout (jiffies) - 2^30 */ #define CIFS_MAX_ACTIMEO (1 << 30) /* * Max persistent and resilient handle timeout (milliseconds). * Windows durable max was 960000 (16 minutes) */ #define SMB3_MAX_HANDLE_TIMEOUT 960000 /* * MAX_REQ is the maximum number of requests that WE will send * on one socket concurrently. */ #define CIFS_MAX_REQ 32767 #define RFC1001_NAME_LEN 15 #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) /* maximum length of ip addr as a string (including ipv6 and sctp) */ #define SERVER_NAME_LENGTH 80 #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) /* echo interval in seconds */ #define SMB_ECHO_INTERVAL_MIN 1 #define SMB_ECHO_INTERVAL_MAX 600 #define SMB_ECHO_INTERVAL_DEFAULT 60 /* smb multichannel query server interfaces interval in seconds */ #define SMB_INTERFACE_POLL_INTERVAL 600 /* maximum number of PDUs in one compound */ #define MAX_COMPOUND 7 /* * Default number of credits to keep available for SMB3. * This value is chosen somewhat arbitrarily. The Windows client * defaults to 128 credits, the Windows server allows clients up to * 512 credits (or 8K for later versions), and the NetApp server * does not limit clients at all. Choose a high enough default value * such that the client shouldn't limit performance, but allow mount * to override (until you approach 64K, where we limit credits to 65000 * to reduce possibility of seeing more server credit overflow bugs. */ #define SMB2_MAX_CREDITS_AVAILABLE 32000 #include "cifspdu.h" #ifndef XATTR_DOS_ATTRIB #define XATTR_DOS_ATTRIB "user.DOSATTRIB" #endif #define CIFS_MAX_WORKSTATION_LEN (__NEW_UTS_LEN + 1) /* reasonable max for client */ #define CIFS_DFS_ROOT_SES(ses) ((ses)->dfs_root_ses ?: (ses)) /* * CIFS vfs client Status information (based on what we know.) */ /* associated with each connection */ enum statusEnum { CifsNew = 0, CifsGood, CifsExiting, CifsNeedReconnect, CifsNeedNegotiate, CifsInNegotiate, }; /* associated with each smb session */ enum ses_status_enum { SES_NEW = 0, SES_GOOD, SES_EXITING, SES_NEED_RECON, SES_IN_SETUP }; /* associated with each tree connection to the server */ enum tid_status_enum { TID_NEW = 0, TID_GOOD, TID_EXITING, TID_NEED_RECON, TID_NEED_TCON, TID_IN_TCON, TID_NEED_FILES_INVALIDATE, /* currently unused */ TID_IN_FILES_INVALIDATE }; enum securityEnum { Unspecified = 0, /* not specified */ NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ Kerberos, /* Kerberos via SPNEGO */ }; enum cifs_reparse_type { CIFS_REPARSE_TYPE_NFS, CIFS_REPARSE_TYPE_WSL, CIFS_REPARSE_TYPE_DEFAULT = CIFS_REPARSE_TYPE_NFS, }; static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type) { switch (type) { case CIFS_REPARSE_TYPE_NFS: return "nfs"; case CIFS_REPARSE_TYPE_WSL: return "wsl"; default: return "unknown"; } } struct session_key { unsigned int len; char *response; }; /* crypto hashing related structure/fields, not specific to a sec mech */ struct cifs_secmech { struct shash_desc *hmacmd5; /* hmacmd5 hash function, for NTLMv2/CR1 hashes */ struct shash_desc *md5; /* md5 hash function, for CIFS/SMB1 signatures */ struct shash_desc *hmacsha256; /* hmac-sha256 hash function, for SMB2 signatures */ struct shash_desc *sha512; /* sha512 hash function, for SMB3.1.1 preauth hash */ struct shash_desc *aes_cmac; /* block-cipher based MAC function, for SMB3 signatures */ struct crypto_aead *enc; /* smb3 encryption AEAD TFM (AES-CCM and AES-GCM) */ struct crypto_aead *dec; /* smb3 decryption AEAD TFM (AES-CCM and AES-GCM) */ }; /* per smb session structure/fields */ struct ntlmssp_auth { bool sesskey_per_smbsess; /* whether session key is per smb session */ __u32 client_flags; /* sent by client in type 1 ntlmsssp exchange */ __u32 server_flags; /* sent by server in type 2 ntlmssp exchange */ unsigned char ciphertext[CIFS_CPHTXT_SIZE]; /* sent to server */ char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlmssp */ }; struct cifs_cred { int uid; int gid; int mode; int cecount; struct cifs_sid osid; struct cifs_sid gsid; struct cifs_ntace *ntaces; struct cifs_ace *aces; }; struct cifs_open_info_data { bool adjust_tz; union { bool reparse_point; bool symlink; }; struct { /* ioctl response buffer */ struct { int buftype; struct kvec iov; } io; __u32 tag; union { struct reparse_data_buffer *buf; struct reparse_posix_data *posix; }; } reparse; struct { __u8 eas[SMB2_WSL_MAX_QUERY_EA_RESP_SIZE]; unsigned int eas_len; } wsl; char *symlink_target; struct cifs_sid posix_owner; struct cifs_sid posix_group; union { struct smb2_file_all_info fi; struct smb311_posix_qinfo posix_fi; }; }; /* ***************************************************************** * Except the CIFS PDUs themselves all the * globally interesting structs should go here ***************************************************************** */ /* * A smb_rqst represents a complete request to be issued to a server. It's * formed by a kvec array, followed by an array of pages. Page data is assumed * to start at the beginning of the first page. */ struct smb_rqst { struct kvec *rq_iov; /* array of kvecs */ unsigned int rq_nvec; /* number of kvecs in array */ size_t rq_iter_size; /* Amount of data in ->rq_iter */ struct iov_iter rq_iter; /* Data iterator */ struct xarray rq_buffer; /* Page buffer for encryption */ }; struct mid_q_entry; struct TCP_Server_Info; struct cifsFileInfo; struct cifs_ses; struct cifs_tcon; struct dfs_info3_param; struct cifs_fattr; struct smb3_fs_context; struct cifs_fid; struct cifs_readdata; struct cifs_writedata; struct cifs_io_parms; struct cifs_search_info; struct cifsInodeInfo; struct cifs_open_parms; struct cifs_credits; struct smb_version_operations { int (*send_cancel)(struct TCP_Server_Info *, struct smb_rqst *, struct mid_q_entry *); bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *); /* setup request: allocate mid, sign message */ struct mid_q_entry *(*setup_request)(struct cifs_ses *, struct TCP_Server_Info *, struct smb_rqst *); /* setup async request: allocate mid, sign message */ struct mid_q_entry *(*setup_async_request)(struct TCP_Server_Info *, struct smb_rqst *); /* check response: verify signature, map error */ int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *, bool); void (*add_credits)(struct TCP_Server_Info *server, const struct cifs_credits *credits, const int optype); void (*set_credits)(struct TCP_Server_Info *, const int); int * (*get_credits_field)(struct TCP_Server_Info *, const int); unsigned int (*get_credits)(struct mid_q_entry *); __u64 (*get_next_mid)(struct TCP_Server_Info *); void (*revert_current_mid)(struct TCP_Server_Info *server, const unsigned int val); /* data offset from read response message */ unsigned int (*read_data_offset)(char *); /* * Data length from read response message * When in_remaining is true, the returned data length is in * message field DataRemaining for out-of-band data read (e.g through * Memory Registration RDMA write in SMBD). * Otherwise, the returned data length is in message field DataLength. */ unsigned int (*read_data_length)(char *, bool in_remaining); /* map smb to linux error */ int (*map_error)(char *, bool); /* find mid corresponding to the response message */ struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *); void (*dump_detail)(void *buf, struct TCP_Server_Info *ptcp_info); void (*clear_stats)(struct cifs_tcon *); void (*print_stats)(struct seq_file *m, struct cifs_tcon *); void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *); /* verify the message */ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *); bool (*is_oplock_break)(char *, struct TCP_Server_Info *); int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *); void (*downgrade_oplock)(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, unsigned int epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); /* check if we need to negotiate */ bool (*need_neg)(struct TCP_Server_Info *); /* negotiate to the server */ int (*negotiate)(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server); /* set negotiated write size */ unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx); /* set negotiated read size */ unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx); /* setup smb sessionn */ int (*sess_setup)(const unsigned int, struct cifs_ses *, struct TCP_Server_Info *server, const struct nls_table *); /* close smb session */ int (*logoff)(const unsigned int, struct cifs_ses *); /* connect to a server share */ int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *, struct cifs_tcon *, const struct nls_table *); /* close tree connecion */ int (*tree_disconnect)(const unsigned int, struct cifs_tcon *); /* get DFS referrals */ int (*get_dfs_refer)(const unsigned int, struct cifs_ses *, const char *, struct dfs_info3_param **, unsigned int *, const struct nls_table *, int); /* informational QFS call */ void (*qfs_tcon)(const unsigned int, struct cifs_tcon *, struct cifs_sb_info *); /* query for server interfaces */ int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *, bool); /* check if a path is accessible or not */ int (*is_path_accessible)(const unsigned int, struct cifs_tcon *, struct cifs_sb_info *, const char *); /* query path data from the server */ int (*query_path_info)(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data); /* query file data from the server */ int (*query_file_info)(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct cifs_open_info_data *data); /* query reparse point to determine which type of special file */ int (*query_reparse_point)(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u32 *tag, struct kvec *rsp, int *rsp_buftype); /* get server index number */ int (*get_srv_inum)(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid, struct cifs_open_info_data *data); /* set size by path */ int (*set_path_size)(const unsigned int, struct cifs_tcon *, const char *, __u64, struct cifs_sb_info *, bool, struct dentry *); /* set size by file handle */ int (*set_file_size)(const unsigned int, struct cifs_tcon *, struct cifsFileInfo *, __u64, bool); /* set attributes */ int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *, const unsigned int); int (*set_compression)(const unsigned int, struct cifs_tcon *, struct cifsFileInfo *); /* check if we can send an echo or nor */ bool (*can_echo)(struct TCP_Server_Info *); /* send echo request */ int (*echo)(struct TCP_Server_Info *); /* create directory */ int (*posix_mkdir)(const unsigned int xid, struct inode *inode, umode_t mode, struct cifs_tcon *tcon, const char *full_path, struct cifs_sb_info *cifs_sb); int (*mkdir)(const unsigned int xid, struct inode *inode, umode_t mode, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *sb); /* set info on created directory */ void (*mkdir_setinfo)(struct inode *, const char *, struct cifs_sb_info *, struct cifs_tcon *, const unsigned int); /* remove directory */ int (*rmdir)(const unsigned int, struct cifs_tcon *, const char *, struct cifs_sb_info *); /* unlink file */ int (*unlink)(const unsigned int, struct cifs_tcon *, const char *, struct cifs_sb_info *, struct dentry *); /* open, rename and delete file */ int (*rename_pending_delete)(const char *, struct dentry *, const unsigned int); /* send rename request */ int (*rename)(const unsigned int xid, struct cifs_tcon *tcon, struct dentry *source_dentry, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb); /* send create hardlink request */ int (*create_hardlink)(const unsigned int xid, struct cifs_tcon *tcon, struct dentry *source_dentry, const char *from_name, const char *to_name, struct cifs_sb_info *cifs_sb); /* query symlink target */ int (*query_symlink)(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, char **target_path); /* open a file for non-posix mounts */ int (*open)(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock, void *buf); /* set fid protocol-specific info */ void (*set_fid)(struct cifsFileInfo *, struct cifs_fid *, __u32); /* close a file */ int (*close)(const unsigned int, struct cifs_tcon *, struct cifs_fid *); /* close a file, returning file attributes and timestamps */ int (*close_getattr)(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *pfile_info); /* send a flush request to the server */ int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *); /* async read from the server */ int (*async_readv)(struct cifs_readdata *); /* async write to the server */ int (*async_writev)(struct cifs_writedata *, void (*release)(struct kref *)); /* sync read from the server */ int (*sync_read)(const unsigned int, struct cifs_fid *, struct cifs_io_parms *, unsigned int *, char **, int *); /* sync write to the server */ int (*sync_write)(const unsigned int, struct cifs_fid *, struct cifs_io_parms *, unsigned int *, struct kvec *, unsigned long); /* open dir, start readdir */ int (*query_dir_first)(const unsigned int, struct cifs_tcon *, const char *, struct cifs_sb_info *, struct cifs_fid *, __u16, struct cifs_search_info *); /* continue readdir */ int (*query_dir_next)(const unsigned int, struct cifs_tcon *, struct cifs_fid *, __u16, struct cifs_search_info *srch_inf); /* close dir */ int (*close_dir)(const unsigned int, struct cifs_tcon *, struct cifs_fid *); /* calculate a size of SMB message */ unsigned int (*calc_smb_size)(void *buf); /* check for STATUS_PENDING and process the response if yes */ bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server); /* check for STATUS_NETWORK_SESSION_EXPIRED */ bool (*is_session_expired)(char *); /* send oplock break response */ int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cifs_inode); /* query remote filesystem */ int (*queryfs)(const unsigned int, struct cifs_tcon *, struct cifs_sb_info *, struct kstatfs *); /* send mandatory brlock to the server */ int (*mand_lock)(const unsigned int, struct cifsFileInfo *, __u64, __u64, __u32, int, int, bool); /* unlock range of mandatory locks */ int (*mand_unlock_range)(struct cifsFileInfo *, struct file_lock *, const unsigned int); /* push brlocks from the cache to the server */ int (*push_mand_locks)(struct cifsFileInfo *); /* get lease key of the inode */ void (*get_lease_key)(struct inode *, struct cifs_fid *); /* set lease key of the inode */ void (*set_lease_key)(struct inode *, struct cifs_fid *); /* generate new lease key */ void (*new_lease_key)(struct cifs_fid *); int (*generate_signingkey)(struct cifs_ses *ses, struct TCP_Server_Info *server); int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *, bool allocate_crypto); int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon, struct cifsFileInfo *src_file); int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon, struct cifsFileInfo *src_file, void __user *); int (*notify)(const unsigned int xid, struct file *pfile, void __user *pbuf, bool return_changes); int (*query_mf_symlink)(unsigned int, struct cifs_tcon *, struct cifs_sb_info *, const unsigned char *, char *, unsigned int *); int (*create_mf_symlink)(unsigned int, struct cifs_tcon *, struct cifs_sb_info *, const unsigned char *, char *, unsigned int *); /* if we can do cache read operations */ bool (*is_read_op)(__u32); /* set oplock level for the inode */ void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, bool *); /* create lease context buffer for CREATE request */ char * (*create_lease_buf)(u8 *lease_key, u8 oplock); /* parse lease context buffer and return oplock/epoch info */ __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); ssize_t (*copychunk_range)(const unsigned int, struct cifsFileInfo *src_file, struct cifsFileInfo *target_file, u64 src_off, u64 len, u64 dest_off); int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src, struct cifsFileInfo *target_file, u64 src_off, u64 len, u64 dest_off); int (*validate_negotiate)(const unsigned int, struct cifs_tcon *); ssize_t (*query_all_EAs)(const unsigned int, struct cifs_tcon *, const unsigned char *, const unsigned char *, char *, size_t, struct cifs_sb_info *); int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *, const char *, const void *, const __u16, const struct nls_table *, struct cifs_sb_info *); struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *, const char *, u32 *, u32); struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *, const struct cifs_fid *, u32 *, u32); int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *, int); /* writepages retry size */ unsigned int (*wp_retry_size)(struct inode *); /* get mtu credits */ int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int, unsigned int *, struct cifs_credits *); /* adjust previously taken mtu credits to request size */ int (*adjust_credits)(struct TCP_Server_Info *server, struct cifs_credits *credits, const unsigned int payload_size); /* check if we need to issue closedir */ bool (*dir_needs_close)(struct cifsFileInfo *); long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t, loff_t); /* init transform request - used for encryption for now */ int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst, struct smb_rqst *, struct smb_rqst *); int (*is_transform_hdr)(void *buf); int (*receive_transform)(struct TCP_Server_Info *, struct mid_q_entry **, char **, int *); enum securityEnum (*select_sectype)(struct TCP_Server_Info *, enum securityEnum); int (*next_header)(struct TCP_Server_Info *server, char *buf, unsigned int *noff); /* ioctl passthrough for query_info */ int (*ioctl_query_info)(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, __le16 *path, int is_dir, unsigned long p); /* make unix special files (block, char, fifo, socket) */ int (*make_node)(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, umode_t mode, dev_t device_number); /* version specific fiemap implementation */ int (*fiemap)(struct cifs_tcon *tcon, struct cifsFileInfo *, struct fiemap_extent_info *, u64, u64); /* version specific llseek implementation */ loff_t (*llseek)(struct file *, struct cifs_tcon *, loff_t, int); /* Check for STATUS_IO_TIMEOUT */ bool (*is_status_io_timeout)(char *buf); /* Check for STATUS_NETWORK_NAME_DELETED */ bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv); int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb, struct kvec *rsp_iov, struct cifs_open_info_data *data); int (*create_reparse_symlink)(const unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, const char *symname); }; struct smb_version_values { char *version_string; __u16 protocol_id; __u32 req_capabilities; __u32 large_lock_type; __u32 exclusive_lock_type; __u32 shared_lock_type; __u32 unlock_lock_type; size_t header_preamble_size; size_t header_size; size_t max_header_size; size_t read_rsp_size; __le16 lock_cmd; unsigned int cap_unix; unsigned int cap_nt_find; unsigned int cap_large_files; __u16 signing_enabled; __u16 signing_required; size_t create_lease_size; }; #define HEADER_SIZE(server) (server->vals->header_size) #define MAX_HEADER_SIZE(server) (server->vals->max_header_size) #define HEADER_PREAMBLE_SIZE(server) (server->vals->header_preamble_size) #define MID_HEADER_SIZE(server) (HEADER_SIZE(server) - 1 - HEADER_PREAMBLE_SIZE(server)) /** * CIFS superblock mount flags (mnt_cifs_flags) to consider when * trying to reuse existing superblock for a new mount */ #define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \ CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \ CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \ CIFS_MOUNT_MAP_SFM_CHR | \ CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \ CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \ CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \ CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \ CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \ CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \ CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID | \ CIFS_MOUNT_UID_FROM_ACL | CIFS_MOUNT_NO_HANDLE_CACHE | \ CIFS_MOUNT_NO_DFS | CIFS_MOUNT_MODE_FROM_SID | \ CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE) /** * Generic VFS superblock mount flags (s_flags) to consider when * trying to reuse existing superblock for a new mount */ #define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \ SB_NODEV | SB_SYNCHRONOUS) struct cifs_mnt_data { struct cifs_sb_info *cifs_sb; struct smb3_fs_context *ctx; int flags; }; static inline unsigned int get_rfc1002_length(void *buf) { return be32_to_cpu(*((__be32 *)buf)) & 0xffffff; } static inline void inc_rfc1001_len(void *buf, int count) { be32_add_cpu((__be32 *)buf, count); } struct TCP_Server_Info { struct list_head tcp_ses_list; struct list_head smb_ses_list; spinlock_t srv_lock; /* protect anything here that is not protected */ __u64 conn_id; /* connection identifier (useful for debugging) */ int srv_count; /* reference counter */ /* 15 character server name + 0x20 16th byte indicating type = srv */ char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; struct smb_version_operations *ops; struct smb_version_values *vals; /* updates to tcpStatus protected by cifs_tcp_ses_lock */ enum statusEnum tcpStatus; /* what we think the status is */ char *hostname; /* hostname portion of UNC string */ struct socket *ssocket; struct sockaddr_storage dstaddr; struct sockaddr_storage srcaddr; /* locally bind to this IP */ #ifdef CONFIG_NET_NS struct net *net; #endif wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ spinlock_t mid_lock; /* protect mid queue and it's entries */ struct list_head pending_mid_q; bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ bool nosharesock; bool tcp_nodelay; bool terminate; unsigned int credits; /* send no more requests at once */ unsigned int max_credits; /* can override large 32000 default at mnt */ unsigned int in_flight; /* number of requests on the wire to server */ unsigned int max_in_flight; /* max number of requests that were on wire */ spinlock_t req_lock; /* protect the two values above */ struct mutex _srv_mutex; unsigned int nofs_flag; struct task_struct *tsk; char server_GUID[16]; __u16 sec_mode; bool sign; /* is signing enabled on this connection? */ bool ignore_signature:1; /* skip validation of signatures in SMB2/3 rsp */ bool session_estab; /* mark when very first sess is established */ int echo_credits; /* echo reserved slots */ int oplock_credits; /* oplock break reserved slots */ bool echoes:1; /* enable echoes */ __u8 client_guid[SMB2_CLIENT_GUID_SIZE]; /* Client GUID */ u16 dialect; /* dialect index that server chose */ bool oplocks:1; /* enable oplocks */ unsigned int maxReq; /* Clients should submit no more */ /* than maxReq distinct unanswered SMBs to the server when using */ /* multiplexed reads or writes (for SMB1/CIFS only, not SMB2/SMB3) */ unsigned int maxBuf; /* maxBuf specifies the maximum */ /* message size the server can send or receive for non-raw SMBs */ /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */ /* when socket is setup (and during reconnect) before NegProt sent */ unsigned int max_rw; /* maxRw specifies the maximum */ /* message size the server can send or receive for */ /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ unsigned int capabilities; /* selective disabling of caps by smb sess */ int timeAdj; /* Adjust for difference in server time zone in sec */ __u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */ char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */ /* 16th byte of RFC1001 workstation name is always null */ char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; __u32 sequence_number; /* for signing, protected by srv_mutex */ __u32 reconnect_instance; /* incremented on each reconnect */ struct session_key session_key; unsigned long lstrp; /* when we got last response from this server */ struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ #define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */ #define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */ char negflavor; /* NEGOTIATE response flavor */ /* extended security flavors that server supports */ bool sec_ntlmssp; /* supports NTLMSSP */ bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool large_buf; /* is current buffer large? */ /* use SMBD connection instead of socket */ bool rdma; /* point to the SMBD connection if RDMA is used instead of socket */ struct smbd_connection *smbd_conn; struct delayed_work echo; /* echo ping workqueue job */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ /* Total size of this PDU. Only valid from cifs_demultiplex_thread */ unsigned int pdu_size; unsigned int total_read; /* total amount of data read in this pass */ atomic_t in_send; /* requests trying to send */ atomic_t num_waiters; /* blocked waiting to get in sendrecv */ #ifdef CONFIG_CIFS_STATS2 atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */ atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */ __u64 time_per_cmd[NUMBER_OF_SMB2_COMMANDS]; /* total time per cmd */ __u32 slowest_cmd[NUMBER_OF_SMB2_COMMANDS]; __u32 fastest_cmd[NUMBER_OF_SMB2_COMMANDS]; #endif /* STATS2 */ unsigned int max_read; unsigned int max_write; unsigned int min_offload; unsigned int retrans; struct { bool requested; /* "compress" mount option set*/ bool enabled; /* actually negotiated with server */ __le16 alg; /* preferred alg negotiated with server */ } compression; __u16 signing_algorithm; __le16 cipher_type; /* save initital negprot hash */ __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE]; bool signing_negotiated; /* true if valid signing context rcvd from server */ bool posix_ext_supported; struct delayed_work reconnect; /* reconnect workqueue job */ struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ unsigned long echo_interval; /* * Number of targets available for reconnect. The more targets * the more tasks have to wait to let the demultiplex thread * reconnect. */ int nr_targets; bool noblockcnt; /* use non-blocking connect() */ /* * If this is a session channel, * primary_server holds the ref-counted * pointer to primary channel connection for the session. */ #define SERVER_IS_CHAN(server) (!!(server)->primary_server) struct TCP_Server_Info *primary_server; __u16 channel_sequence_num; /* incremented on primary channel on each chan reconnect */ #ifdef CONFIG_CIFS_SWN_UPCALL bool use_swn_dstaddr; struct sockaddr_storage swn_dstaddr; #endif struct mutex refpath_lock; /* protects leaf_fullpath */ /* * leaf_fullpath: Canonical DFS referral path related to this * connection. * It is used in DFS cache refresher, reconnect and may * change due to nested DFS links. * * Protected by @refpath_lock and @srv_lock. The @refpath_lock is * mostly used for not requiring a copy of @leaf_fullpath when getting * cached or new DFS referrals (which might also sleep during I/O). * While @srv_lock is held for making string and NULL comparions against * both fields as in mount(2) and cache refresh. * * format: \\HOST\SHARE[\OPTIONAL PATH] */ char *leaf_fullpath; }; static inline bool is_smb1(struct TCP_Server_Info *server) { return HEADER_PREAMBLE_SIZE(server) != 0; } static inline void cifs_server_lock(struct TCP_Server_Info *server) { unsigned int nofs_flag = memalloc_nofs_save(); mutex_lock(&server->_srv_mutex); server->nofs_flag = nofs_flag; } static inline void cifs_server_unlock(struct TCP_Server_Info *server) { unsigned int nofs_flag = server->nofs_flag; mutex_unlock(&server->_srv_mutex); memalloc_nofs_restore(nofs_flag); } struct cifs_credits { unsigned int value; unsigned int instance; }; static inline unsigned int in_flight(struct TCP_Server_Info *server) { unsigned int num; spin_lock(&server->req_lock); num = server->in_flight; spin_unlock(&server->req_lock); return num; } static inline bool has_credits(struct TCP_Server_Info *server, int *credits, int num_credits) { int num; spin_lock(&server->req_lock); num = *credits; spin_unlock(&server->req_lock); return num >= num_credits; } static inline void add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits, const int optype) { server->ops->add_credits(server, credits, optype); } static inline void add_credits_and_wake_if(struct TCP_Server_Info *server, const struct cifs_credits *credits, const int optype) { if (credits->value) { server->ops->add_credits(server, credits, optype); wake_up(&server->request_q); } } static inline void set_credits(struct TCP_Server_Info *server, const int val) { server->ops->set_credits(server, val); } static inline int adjust_credits(struct TCP_Server_Info *server, struct cifs_credits *credits, const unsigned int payload_size) { return server->ops->adjust_credits ? server->ops->adjust_credits(server, credits, payload_size) : 0; } static inline __le64 get_next_mid64(struct TCP_Server_Info *server) { return cpu_to_le64(server->ops->get_next_mid(server)); } static inline __le16 get_next_mid(struct TCP_Server_Info *server) { __u16 mid = server->ops->get_next_mid(server); /* * The value in the SMB header should be little endian for easy * on-the-wire decoding. */ return cpu_to_le16(mid); } static inline void revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) { if (server->ops->revert_current_mid) server->ops->revert_current_mid(server, val); } static inline void revert_current_mid_from_hdr(struct TCP_Server_Info *server, const struct smb2_hdr *shdr) { unsigned int num = le16_to_cpu(shdr->CreditCharge); return revert_current_mid(server, num > 0 ? num : 1); } static inline __u16 get_mid(const struct smb_hdr *smb) { return le16_to_cpu(smb->Mid); } static inline bool compare_mid(__u16 mid, const struct smb_hdr *smb) { return mid == le16_to_cpu(smb->Mid); } /* * When the server supports very large reads and writes via POSIX extensions, * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not * including the RFC1001 length. * * Note that this might make for "interesting" allocation problems during * writeback however as we have to allocate an array of pointers for the * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096. * * For reads, there is a similar problem as we need to allocate an array * of kvecs to handle the receive, though that should only need to be done * once. */ #define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) #define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4) /* * When the server doesn't allow large posix writes, only allow a rsize/wsize * of 2^17-1 minus the size of the call header. That allows for a read or * write up to the maximum size described by RFC1002. */ #define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4) #define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4) #define CIFS_DEFAULT_IOSIZE (1024 * 1024) /* * Windows only supports a max of 60kb reads and 65535 byte writes. Default to * those values when posix extensions aren't in force. In actuality here, we * use 65536 to allow for a write that is a multiple of 4k. Most servers seem * to be ok with the extra byte even though Windows doesn't send writes that * are that large. * * Citation: * * https://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx */ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) /* * Macros to allow the TCP_Server_Info->net field and related code to drop out * when CONFIG_NET_NS isn't set. */ #ifdef CONFIG_NET_NS static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) { return srv->net; } static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) { srv->net = net; } #else static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) { return &init_net; } static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) { } #endif struct cifs_server_iface { struct list_head iface_head; struct kref refcount; size_t speed; size_t weight_fulfilled; unsigned int num_channels; unsigned int rdma_capable : 1; unsigned int rss_capable : 1; unsigned int is_active : 1; /* unset if non existent */ struct sockaddr_storage sockaddr; }; /* release iface when last ref is dropped */ static inline void release_iface(struct kref *ref) { struct cifs_server_iface *iface = container_of(ref, struct cifs_server_iface, refcount); kfree(iface); } struct cifs_chan { unsigned int in_reconnect : 1; /* if session setup in progress for this channel */ struct TCP_Server_Info *server; struct cifs_server_iface *iface; /* interface in use */ __u8 signkey[SMB3_SIGN_KEY_SIZE]; }; #define CIFS_SES_FLAG_SCALE_CHANNELS (0x1) /* * Session structure. One of these for each uid session with a particular host */ struct cifs_ses { struct list_head smb_ses_list; struct list_head rlist; /* reconnect list */ struct list_head tcon_list; struct cifs_tcon *tcon_ipc; spinlock_t ses_lock; /* protect anything here that is not protected */ struct mutex session_mutex; struct TCP_Server_Info *server; /* pointer to server info */ int ses_count; /* reference counter */ enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */ unsigned int overrideSecFlg; /* if non-zero override global sec flags */ char *serverOS; /* name of operating system underlying server */ char *serverNOS; /* name of network operating system of server */ char *serverDomain; /* security realm of server */ __u64 Suid; /* remote smb uid */ kuid_t linux_uid; /* overriding owner of files on the mount */ kuid_t cred_uid; /* owner of credentials */ unsigned int capabilities; char ip_addr[INET6_ADDRSTRLEN + 1]; /* Max ipv6 (or v4) addr string len */ char *user_name; /* must not be null except during init of sess and after mount option parsing we fill it */ char *domainName; char *password; char *password2; /* When key rotation used, new password may be set before it expires */ char workstation_name[CIFS_MAX_WORKSTATION_LEN]; struct session_key auth_key; struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */ enum securityEnum sectype; /* what security flavor was specified? */ bool sign; /* is signing required? */ bool domainAuto:1; bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */ unsigned int flags; __u16 session_flags; __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE]; __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE]; __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE]; /* * Network interfaces available on the server this session is * connected to. * * Other channels can be opened by connecting and binding this * session to interfaces from this list. * * iface_lock should be taken when accessing any of these fields */ spinlock_t iface_lock; /* ========= begin: protected by iface_lock ======== */ struct list_head iface_list; size_t iface_count; unsigned long iface_last_update; /* jiffies */ /* ========= end: protected by iface_lock ======== */ spinlock_t chan_lock; /* ========= begin: protected by chan_lock ======== */ #define CIFS_MAX_CHANNELS 16 #define CIFS_INVAL_CHAN_INDEX (-1) #define CIFS_ALL_CHANNELS_SET(ses) \ ((1UL << (ses)->chan_count) - 1) #define CIFS_ALL_CHANS_GOOD(ses) \ (!(ses)->chans_need_reconnect) #define CIFS_ALL_CHANS_NEED_RECONNECT(ses) \ ((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses)) #define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses) \ ((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses)) #define CIFS_CHAN_NEEDS_RECONNECT(ses, index) \ test_bit((index), &(ses)->chans_need_reconnect) #define CIFS_CHAN_IN_RECONNECT(ses, index) \ ((ses)->chans[(index)].in_reconnect) struct cifs_chan chans[CIFS_MAX_CHANNELS]; size_t chan_count; size_t chan_max; atomic_t chan_seq; /* round robin state */ /* * chans_need_reconnect is a bitmap indicating which of the channels * under this smb session needs to be reconnected. * If not multichannel session, only one bit will be used. * * We will ask for sess and tcon reconnection only if all the * channels are marked for needing reconnection. This will * enable the sessions on top to continue to live till any * of the channels below are active. */ unsigned long chans_need_reconnect; /* ========= end: protected by chan_lock ======== */ struct cifs_ses *dfs_root_ses; struct nls_table *local_nls; }; static inline bool cap_unix(struct cifs_ses *ses) { return ses->server->vals->cap_unix & ses->capabilities; } /* * common struct for holding inode info when searching for or updating an * inode with new info */ #define CIFS_FATTR_JUNCTION 0x1 #define CIFS_FATTR_DELETE_PENDING 0x2 #define CIFS_FATTR_NEED_REVAL 0x4 #define CIFS_FATTR_INO_COLLISION 0x8 #define CIFS_FATTR_UNKNOWN_NLINK 0x10 #define CIFS_FATTR_FAKE_ROOT_INO 0x20 struct cifs_fattr { u32 cf_flags; u32 cf_cifsattrs; u64 cf_uniqueid; u64 cf_eof; u64 cf_bytes; u64 cf_createtime; kuid_t cf_uid; kgid_t cf_gid; umode_t cf_mode; dev_t cf_rdev; unsigned int cf_nlink; unsigned int cf_dtype; struct timespec64 cf_atime; struct timespec64 cf_mtime; struct timespec64 cf_ctime; u32 cf_cifstag; char *cf_symlink_target; }; /* * there is one of these for each connection to a resource on a particular * session */ struct cifs_tcon { struct list_head tcon_list; int debug_id; /* Debugging for tracing */ int tc_count; struct list_head rlist; /* reconnect list */ spinlock_t tc_lock; /* protect anything here that is not protected */ atomic_t num_local_opens; /* num of all opens including disconnected */ atomic_t num_remote_opens; /* num of all network opens on server */ struct list_head openFileList; spinlock_t open_file_lock; /* protects list above */ struct cifs_ses *ses; /* pointer to session associated with */ char tree_name[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ char *nativeFileSystem; char *password; /* for share-level security */ __u32 tid; /* The 4 byte tree id */ __u16 Flags; /* optional support bits */ enum tid_status_enum status; atomic_t num_smbs_sent; union { struct { atomic_t num_writes; atomic_t num_reads; atomic_t num_flushes; atomic_t num_oplock_brks; atomic_t num_opens; atomic_t num_closes; atomic_t num_deletes; atomic_t num_mkdirs; atomic_t num_posixopens; atomic_t num_posixmkdirs; atomic_t num_rmdirs; atomic_t num_renames; atomic_t num_t2renames; atomic_t num_ffirst; atomic_t num_fnext; atomic_t num_fclose; atomic_t num_hardlinks; atomic_t num_symlinks; atomic_t num_locks; atomic_t num_acl_get; atomic_t num_acl_set; } cifs_stats; struct { atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS]; atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS]; } smb2_stats; } stats; __u64 bytes_read; __u64 bytes_written; spinlock_t stat_lock; /* protects the two fields above */ time64_t stats_from_time; FILE_SYSTEM_DEVICE_INFO fsDevInfo; FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */ FILE_SYSTEM_UNIX_INFO fsUnixInfo; bool ipc:1; /* set if connection to IPC$ share (always also pipe) */ bool pipe:1; /* set if connection to pipe share */ bool print:1; /* set if connection to printer share */ bool retry:1; bool nocase:1; bool nohandlecache:1; /* if strange server resource prob can turn off */ bool nodelete:1; bool seal:1; /* transport encryption for this mounted share */ bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol for this mount even if server would support */ bool posix_extensions; /* if true SMB3.11 posix extensions enabled */ bool local_lease:1; /* check leases (only) on local system not remote */ bool broken_posix_open; /* e.g. Samba server versions < 3.3.2, 3.2.9 */ bool broken_sparse_sup; /* if server or share does not support sparse */ bool need_reconnect:1; /* connection reset, tid now invalid */ bool need_reopen_files:1; /* need to reopen tcon file handles */ bool use_resilient:1; /* use resilient instead of durable handles */ bool use_persistent:1; /* use persistent instead of durable handles */ bool no_lease:1; /* Do not request leases on files or directories */ bool use_witness:1; /* use witness protocol */ __le32 capabilities; __u32 share_flags; __u32 maximal_access; __u32 vol_serial_number; __le64 vol_create_time; __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ __u32 handle_timeout; /* persistent and durable handle timeout in ms */ __u32 ss_flags; /* sector size flags */ __u32 perf_sector_size; /* best sector size for perf */ __u32 max_chunks; __u32 max_bytes_chunk; __u32 max_bytes_copy; __u32 max_cached_dirs; #ifdef CONFIG_CIFS_FSCACHE u64 resource_id; /* server resource id */ bool fscache_acquired; /* T if we've tried acquiring a cookie */ struct fscache_volume *fscache; /* cookie for share */ struct mutex fscache_lock; /* Prevent regetting a cookie */ #endif struct list_head pending_opens; /* list of incomplete opens */ struct cached_fids *cfids; /* BB add field for back pointer to sb struct(s)? */ #ifdef CONFIG_CIFS_DFS_UPCALL struct delayed_work dfs_cache_work; #endif struct delayed_work query_interfaces; /* query interfaces workqueue job */ char *origin_fullpath; /* canonical copy of smb3_fs_context::source */ }; /* * This is a refcounted and timestamped container for a tcon pointer. The * container holds a tcon reference. It is considered safe to free one of * these when the tl_count goes to 0. The tl_time is the time of the last * "get" on the container. */ struct tcon_link { struct rb_node tl_rbnode; kuid_t tl_uid; unsigned long tl_flags; #define TCON_LINK_MASTER 0 #define TCON_LINK_PENDING 1 #define TCON_LINK_IN_TREE 2 unsigned long tl_time; atomic_t tl_count; struct cifs_tcon *tl_tcon; }; extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb); extern void smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst); static inline struct cifs_tcon * tlink_tcon(struct tcon_link *tlink) { return tlink->tl_tcon; } static inline struct tcon_link * cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) { return cifs_sb->master_tlink; } extern void cifs_put_tlink(struct tcon_link *tlink); static inline struct tcon_link * cifs_get_tlink(struct tcon_link *tlink) { if (tlink && !IS_ERR(tlink)) atomic_inc(&tlink->tl_count); return tlink; } /* This function is always expected to succeed */ extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb); #define CIFS_OPLOCK_NO_CHANGE 0xfe struct cifs_pending_open { struct list_head olist; struct tcon_link *tlink; __u8 lease_key[16]; __u32 oplock; }; struct cifs_deferred_close { struct list_head dlist; struct tcon_link *tlink; __u16 netfid; __u64 persistent_fid; __u64 volatile_fid; }; /* * This info hangs off the cifsFileInfo structure, pointed to by llist. * This is used to track byte stream locks on the file */ struct cifsLockInfo { struct list_head llist; /* pointer to next cifsLockInfo */ struct list_head blist; /* pointer to locks blocked on this */ wait_queue_head_t block_q; __u64 offset; __u64 length; __u32 pid; __u16 type; __u16 flags; }; /* * One of these for each open instance of a file */ struct cifs_search_info { loff_t index_of_last_entry; __u16 entries_in_buffer; __u16 info_level; __u32 resume_key; char *ntwrk_buf_start; char *srch_entries_start; char *last_entry; const char *presume_name; unsigned int resume_name_len; bool endOfSearch:1; bool emptyDir:1; bool unicode:1; bool smallBuf:1; /* so we know which buf_release function to call */ }; #define ACL_NO_MODE ((umode_t)(-1)) struct cifs_open_parms { struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; int disposition; int desired_access; int create_options; const char *path; struct cifs_fid *fid; umode_t mode; bool reconnect:1; bool replay:1; /* indicates that this open is for a replay */ struct kvec *ea_cctx; }; struct cifs_fid { __u16 netfid; __u64 persistent_fid; /* persist file id for smb2 */ __u64 volatile_fid; /* volatile file id for smb2 */ __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */ __u8 create_guid[16]; __u32 access; struct cifs_pending_open *pending_open; unsigned int epoch; #ifdef CONFIG_CIFS_DEBUG2 __u64 mid; #endif /* CIFS_DEBUG2 */ bool purge_cache; }; struct cifs_fid_locks { struct list_head llist; struct cifsFileInfo *cfile; /* fid that owns locks */ struct list_head locks; /* locks held by fid above */ }; struct cifsFileInfo { /* following two lists are protected by tcon->open_file_lock */ struct list_head tlist; /* pointer to next fid owned by tcon */ struct list_head flist; /* next fid (file instance) for this inode */ /* lock list below protected by cifsi->lock_sem */ struct cifs_fid_locks *llist; /* brlocks held by this fid */ kuid_t uid; /* allows finding which FileInfo structure */ __u32 pid; /* process id who opened file */ struct cifs_fid fid; /* file id from remote */ struct list_head rlist; /* reconnect list */ /* BB add lock scope info here if needed */ /* lock scope id (0 if none) */ struct dentry *dentry; struct tcon_link *tlink; unsigned int f_flags; bool invalidHandle:1; /* file closed via session abend */ bool swapfile:1; bool oplock_break_cancelled:1; bool status_file_deleted:1; /* file has been deleted */ bool offload:1; /* offload final part of _put to a wq */ unsigned int oplock_epoch; /* epoch from the lease break */ __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ struct mutex fh_mutex; /* prevents reopen race after dead ses*/ struct cifs_search_info srch_inf; struct work_struct oplock_break; /* work for oplock breaks */ struct work_struct put; /* work for the final part of _put */ struct work_struct serverclose; /* work for serverclose */ struct delayed_work deferred; bool deferred_close_scheduled; /* Flag to indicate close is scheduled */ char *symlink_target; }; struct cifs_io_parms { __u16 netfid; __u64 persistent_fid; /* persist file id for smb2 */ __u64 volatile_fid; /* volatile file id for smb2 */ __u32 pid; __u64 offset; unsigned int length; struct cifs_tcon *tcon; struct TCP_Server_Info *server; }; struct cifs_aio_ctx { struct kref refcount; struct list_head list; struct mutex aio_mutex; struct completion done; struct iov_iter iter; struct kiocb *iocb; struct cifsFileInfo *cfile; struct bio_vec *bv; loff_t pos; unsigned int nr_pinned_pages; ssize_t rc; unsigned int len; unsigned int total_len; unsigned int bv_need_unpin; /* If ->bv[] needs unpinning */ bool should_dirty; /* * Indicates if this aio_ctx is for direct_io, * If yes, iter is a copy of the user passed iov_iter */ bool direct_io; }; /* asynchronous read support */ struct cifs_readdata { struct kref refcount; struct list_head list; struct completion done; struct cifsFileInfo *cfile; struct address_space *mapping; struct cifs_aio_ctx *ctx; __u64 offset; ssize_t got_bytes; unsigned int bytes; pid_t pid; int result; struct work_struct work; struct iov_iter iter; struct kvec iov[2]; struct TCP_Server_Info *server; #ifdef CONFIG_CIFS_SMB_DIRECT struct smbd_mr *mr; #endif struct cifs_credits credits; }; /* asynchronous write support */ struct cifs_writedata { struct kref refcount; struct list_head list; struct completion done; enum writeback_sync_modes sync_mode; struct work_struct work; struct cifsFileInfo *cfile; struct cifs_aio_ctx *ctx; struct iov_iter iter; struct bio_vec *bv; __u64 offset; pid_t pid; unsigned int bytes; int result; struct TCP_Server_Info *server; #ifdef CONFIG_CIFS_SMB_DIRECT struct smbd_mr *mr; #endif struct cifs_credits credits; bool replay; }; /* * Take a reference on the file private data. Must be called with * cfile->file_info_lock held. */ static inline void cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file) { ++cifs_file->count; } struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file); void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr, bool offload); void cifsFileInfo_put(struct cifsFileInfo *cifs_file); #define CIFS_CACHE_READ_FLG 1 #define CIFS_CACHE_HANDLE_FLG 2 #define CIFS_CACHE_RH_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_HANDLE_FLG) #define CIFS_CACHE_WRITE_FLG 4 #define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG) #define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG) #define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)) #define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG) #define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)) /* * One of these for each file inode */ struct cifsInodeInfo { struct netfs_inode netfs; /* Netfslib context and vfs inode */ bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ /* * NOTE: Some code paths call down_read(lock_sem) twice, so * we must always use cifs_down_write() instead of down_write() * for this semaphore to avoid deadlocks. */ struct rw_semaphore lock_sem; /* protect the fields above */ /* BB add in lists for dirty pages i.e. write caching info for oplock */ struct list_head openFileList; spinlock_t open_file_lock; /* protects openFileList */ __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ unsigned int oplock; /* oplock/lease level we have */ unsigned int epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ #define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */ #define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */ #define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */ unsigned long flags; spinlock_t writers_lock; unsigned int writers; /* Number of writers on this inode */ unsigned long time; /* jiffies of last update of inode */ u64 uniqueid; /* server inode number */ u64 createtime; /* creation time on server */ __u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for this inode */ struct list_head deferred_closes; /* list of deferred closes */ spinlock_t deferred_lock; /* protection on deferred list */ bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */ char *symlink_target; __u32 reparse_tag; }; static inline struct cifsInodeInfo * CIFS_I(struct inode *inode) { return container_of(inode, struct cifsInodeInfo, netfs.inode); } static inline struct cifs_sb_info * CIFS_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct cifs_sb_info * CIFS_FILE_SB(struct file *file) { return CIFS_SB(file_inode(file)->i_sb); } static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) return '/'; else return '\\'; } static inline void convert_delimiter(char *path, char delim) { char old_delim, *pos; if (delim == '/') old_delim = '\\'; else old_delim = '/'; pos = path; while ((pos = strchr(pos, old_delim))) *pos = delim; } #define cifs_stats_inc atomic_inc static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon, unsigned int bytes) { if (bytes) { spin_lock(&tcon->stat_lock); tcon->bytes_written += bytes; spin_unlock(&tcon->stat_lock); } } static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon, unsigned int bytes) { spin_lock(&tcon->stat_lock); tcon->bytes_read += bytes; spin_unlock(&tcon->stat_lock); } /* * This is the prototype for the mid receive function. This function is for * receiving the rest of the SMB frame, starting with the WordCount (which is * just after the MID in struct smb_hdr). Note: * * - This will be called by cifsd, with no locks held. * - The mid will still be on the pending_mid_q. * - mid->resp_buf will point to the current buffer. * * Returns zero on a successful receive, or an error. The receive state in * the TCP_Server_Info will also be updated. */ typedef int (mid_receive_t)(struct TCP_Server_Info *server, struct mid_q_entry *mid); /* * This is the prototype for the mid callback function. This is called once the * mid has been received off of the socket. When creating one, take special * care to avoid deadlocks. Things to bear in mind: * * - it will be called by cifsd, with no locks held * - the mid will be removed from any lists */ typedef void (mid_callback_t)(struct mid_q_entry *mid); /* * This is the protopyte for mid handle function. This is called once the mid * has been recognized after decryption of the message. */ typedef int (mid_handle_t)(struct TCP_Server_Info *server, struct mid_q_entry *mid); /* one of these for every pending CIFS request to the server */ struct mid_q_entry { struct list_head qhead; /* mids waiting on reply from this server */ struct kref refcount; struct TCP_Server_Info *server; /* server corresponding to this mid */ __u64 mid; /* multiplex id */ __u16 credits; /* number of credits consumed by this mid */ __u16 credits_received; /* number of credits from the response */ __u32 pid; /* process id */ __u32 sequence_number; /* for CIFS signing */ unsigned long when_alloc; /* when mid was created */ #ifdef CONFIG_CIFS_STATS2 unsigned long when_sent; /* time when smb send finished */ unsigned long when_received; /* when demux complete (taken off wire) */ #endif mid_receive_t *receive; /* call receive callback */ mid_callback_t *callback; /* call completion callback */ mid_handle_t *handle; /* call handle mid callback */ void *callback_data; /* general purpose pointer for callback */ struct task_struct *creator; void *resp_buf; /* pointer to received SMB header */ unsigned int resp_buf_size; int mid_state; /* wish this were enum but can not pass to wait_event */ unsigned int mid_flags; __le16 command; /* smb command code */ unsigned int optype; /* operation type */ bool large_buf:1; /* if valid response, is pointer to large buf */ bool multiRsp:1; /* multiple trans2 responses for one request */ bool multiEnd:1; /* both received */ bool decrypted:1; /* decrypted entry */ }; struct close_cancelled_open { struct cifs_fid fid; struct cifs_tcon *tcon; struct work_struct work; __u64 mid; __u16 cmd; }; /* Make code in transport.c a little cleaner by moving update of optional stats into function below */ static inline void cifs_in_send_inc(struct TCP_Server_Info *server) { atomic_inc(&server->in_send); } static inline void cifs_in_send_dec(struct TCP_Server_Info *server) { atomic_dec(&server->in_send); } static inline void cifs_num_waiters_inc(struct TCP_Server_Info *server) { atomic_inc(&server->num_waiters); } static inline void cifs_num_waiters_dec(struct TCP_Server_Info *server) { atomic_dec(&server->num_waiters); } #ifdef CONFIG_CIFS_STATS2 static inline void cifs_save_when_sent(struct mid_q_entry *mid) { mid->when_sent = jiffies; } #else static inline void cifs_save_when_sent(struct mid_q_entry *mid) { } #endif /* for pending dnotify requests */ struct dir_notify_req { struct list_head lhead; __le16 Pid; __le16 PidHigh; __u16 Mid; __u16 Tid; __u16 Uid; __u16 netfid; __u32 filter; /* CompletionFilter (for multishot) */ int multishot; struct file *pfile; }; struct dfs_info3_param { int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/ int path_consumed; int server_type; int ref_flag; char *path_name; char *node_name; int ttl; }; struct file_list { struct list_head list; struct cifsFileInfo *cfile; }; struct cifs_mount_ctx { struct cifs_sb_info *cifs_sb; struct smb3_fs_context *fs_ctx; unsigned int xid; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; }; static inline void __free_dfs_info_param(struct dfs_info3_param *param) { kfree(param->path_name); kfree(param->node_name); } static inline void free_dfs_info_param(struct dfs_info3_param *param) { if (param) __free_dfs_info_param(param); } static inline void zfree_dfs_info_param(struct dfs_info3_param *param) { if (param) { __free_dfs_info_param(param); memset(param, 0, sizeof(*param)); } } static inline void free_dfs_info_array(struct dfs_info3_param *param, int number_of_items) { int i; if ((number_of_items == 0) || (param == NULL)) return; for (i = 0; i < number_of_items; i++) { kfree(param[i].path_name); kfree(param[i].node_name); } kfree(param); } static inline bool is_interrupt_error(int error) { switch (error) { case -EINTR: case -ERESTARTSYS: case -ERESTARTNOHAND: case -ERESTARTNOINTR: return true; } return false; } static inline bool is_retryable_error(int error) { if (is_interrupt_error(error) || error == -EAGAIN) return true; return false; } static inline bool is_replayable_error(int error) { if (error == -EAGAIN || error == -ECONNABORTED) return true; return false; } /* cifs_get_writable_file() flags */ #define FIND_WR_ANY 0 #define FIND_WR_FSUID_ONLY 1 #define FIND_WR_WITH_DELETE 2 #define MID_FREE 0 #define MID_REQUEST_ALLOCATED 1 #define MID_REQUEST_SUBMITTED 2 #define MID_RESPONSE_RECEIVED 4 #define MID_RETRY_NEEDED 8 /* session closed while this request out */ #define MID_RESPONSE_MALFORMED 0x10 #define MID_SHUTDOWN 0x20 #define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */ /* Flags */ #define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */ #define MID_DELETED 2 /* Mid has been dequeued/deleted */ /* Types of response buffer returned from SendReceive2 */ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ #define CIFS_SMALL_BUFFER 1 #define CIFS_LARGE_BUFFER 2 #define CIFS_IOVEC 4 /* array of response buffers */ /* Type of Request to SendReceive2 */ #define CIFS_BLOCKING_OP 1 /* operation can block */ #define CIFS_NON_BLOCKING 2 /* do not block waiting for credits */ #define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */ #define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */ #define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */ #define CIFS_NO_RSP_BUF 0x040 /* no response buffer required */ /* Type of request operation */ #define CIFS_ECHO_OP 0x080 /* echo request */ #define CIFS_OBREAK_OP 0x0100 /* oplock break request */ #define CIFS_NEG_OP 0x0200 /* negotiate request */ #define CIFS_CP_CREATE_CLOSE_OP 0x0400 /* compound create+close request */ /* Lower bitmask values are reserved by others below. */ #define CIFS_SESS_OP 0x2000 /* session setup request */ #define CIFS_OP_MASK 0x2780 /* mask request type */ #define CIFS_HAS_CREDITS 0x0400 /* already has credits */ #define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */ #define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */ /* Security Flags: indicate type of session setup needed */ #define CIFSSEC_MAY_SIGN 0x00001 #define CIFSSEC_MAY_NTLMV2 0x00004 #define CIFSSEC_MAY_KRB5 0x00008 #define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */ #define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */ #define CIFSSEC_MUST_SIGN 0x01001 /* note that only one of the following can be set so the result of setting MUST flags more than once will be to require use of the stronger protocol */ #define CIFSSEC_MUST_NTLMV2 0x04004 #define CIFSSEC_MUST_KRB5 0x08008 #ifdef CONFIG_CIFS_UPCALL #define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */ #else #define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */ #endif /* UPCALL */ #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ #define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP) #define CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2) #define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP) /* ***************************************************************** * All constants go here ***************************************************************** */ #define UID_HASH (16) /* * Note that ONE module should define _DECLARE_GLOBALS_HERE to cause the * following to be declared. */ /**************************************************************************** * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according * to the locking order. i.e. if two locks are to be held together, the lock that * appears higher in this list needs to be taken before the other. * * If you hold a lock that is lower in this list, and you need to take a higher lock * (or if you think that one of the functions that you're calling may need to), first * drop the lock you hold, pick up the higher lock, then the lower one. This will * ensure that locks are picked up only in one direction in the below table * (top to bottom). * * Also, if you expect a function to be called with a lock held, explicitly document * this in the comments on top of your function definition. * * And also, try to keep the critical sections (lock hold time) to be as minimal as * possible. Blocking / calling other functions with a lock held always increase * the risk of a possible deadlock. * * Following this rule will avoid unnecessary deadlocks, which can get really hard to * debug. Also, any new lock that you introduce, please add to this list in the correct * order. * * Please populate this list whenever you introduce new locks in your changes. Or in * case I've missed some existing locks. Please ensure that it's added in the list * based on the locking order expected. * * ===================================================================================== * Lock Protects Initialization fn * ===================================================================================== * vol_list_lock * vol_info->ctx_lock vol_info->ctx * cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb * TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session * reconnect_mutex * TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session * cifs_ses->session_mutex cifs_ses sesInfoAlloc * cifs_tcon * cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc * cifs_tcon->pending_opens * cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc * cifs_tcon->bytes_written * cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc * GlobalMid_Lock GlobalMaxActiveXid init_cifs * GlobalCurrentXid * GlobalTotalActiveXid * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change) * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session * ->CurrentMid * (any changes in mid_q_entry fields) * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session * ->credits * ->echo_credits * ->oplock_credits * ->reconnect_instance * cifs_ses->ses_lock (anything that is not protected by another lock and can change) * cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc * ->iface_count * ->iface_last_update * cifs_ses->chan_lock cifs_ses->chans * ->chans_need_reconnect * ->chans_in_reconnect * cifs_tcon->tc_lock (anything that is not protected by another lock and can change) * cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode * cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once * ->can_cache_brlcks * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo * ->invalidHandle initiate_cifs_search * ->oplock_break_cancelled * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc ****************************************************************************/ #ifdef DECLARE_GLOBALS_HERE #define GLOBAL_EXTERN #else #define GLOBAL_EXTERN extern #endif /* * the list of TCP_Server_Info structures, ie each of the sockets * connecting our client to a distinct server (ip address), is * chained together by cifs_tcp_ses_list. The list of all our SMB * sessions (and from that the tree connections) can be found * by iterating over cifs_tcp_ses_list */ extern struct list_head cifs_tcp_ses_list; /* * This lock protects the cifs_tcp_ses_list, the list of smb sessions per * tcp session, and the list of tcon's per smb session. It also protects * the reference counters for the server, smb session, and tcon. * generally the locks should be taken in order tcp_ses_lock before * tcon->open_file_lock and that before file->file_info_lock since the * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file */ extern spinlock_t cifs_tcp_ses_lock; /* * Global transaction id (XID) information */ extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ /* * Global counters, updated atomically */ extern atomic_t sesInfoAllocCount; extern atomic_t tconInfoAllocCount; extern atomic_t tcpSesNextId; extern atomic_t tcpSesAllocCount; extern atomic_t tcpSesReconnectCount; extern atomic_t tconInfoReconnectCount; /* Various Debug counters */ extern atomic_t buf_alloc_count; /* current number allocated */ extern atomic_t small_buf_alloc_count; #ifdef CONFIG_CIFS_STATS2 extern atomic_t total_buf_alloc_count; /* total allocated over all time */ extern atomic_t total_small_buf_alloc_count; extern unsigned int slow_rsp_threshold; /* number of secs before logging */ #endif /* Misc globals */ extern bool enable_oplocks; /* enable or disable oplocks */ extern bool lookupCacheEnabled; extern unsigned int global_secflags; /* if on, session setup sent with more secure ntlmssp2 challenge/resp */ extern unsigned int sign_CIFS_PDUs; /* enable smb packet signing */ extern bool enable_gcm_256; /* allow optional negotiate of strongest signing (aes-gcm-256) */ extern bool require_gcm_256; /* require use of strongest signing (aes-gcm-256) */ extern bool enable_negotiate_signing; /* request use of faster (GMAC) signing if available */ extern bool linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/ extern unsigned int CIFSMaxBufSize; /* max size not including hdr */ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ extern unsigned int cifs_min_small; /* min size of small buf pool */ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/ extern unsigned int dir_cache_timeout; /* max time for directory lease caching of dir */ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ extern atomic_t mid_count; void cifs_oplock_break(struct work_struct *work); void cifs_queue_oplock_break(struct cifsFileInfo *cfile); void smb2_deferred_work_close(struct work_struct *work); extern const struct slow_work_ops cifs_oplock_break_ops; extern struct workqueue_struct *cifsiod_wq; extern struct workqueue_struct *decrypt_wq; extern struct workqueue_struct *fileinfo_put_wq; extern struct workqueue_struct *cifsoplockd_wq; extern struct workqueue_struct *deferredclose_wq; extern struct workqueue_struct *serverclose_wq; extern __u32 cifs_lock_secret; extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; /* Operations for different SMB versions */ #define SMB1_VERSION_STRING "1.0" #define SMB20_VERSION_STRING "2.0" #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY extern struct smb_version_operations smb1_operations; extern struct smb_version_values smb1_values; extern struct smb_version_operations smb20_operations; extern struct smb_version_values smb20_values; #endif /* CIFS_ALLOW_INSECURE_LEGACY */ #define SMB21_VERSION_STRING "2.1" extern struct smb_version_operations smb21_operations; extern struct smb_version_values smb21_values; #define SMBDEFAULT_VERSION_STRING "default" extern struct smb_version_values smbdefault_values; #define SMB3ANY_VERSION_STRING "3" extern struct smb_version_values smb3any_values; #define SMB30_VERSION_STRING "3.0" extern struct smb_version_operations smb30_operations; extern struct smb_version_values smb30_values; #define SMB302_VERSION_STRING "3.02" #define ALT_SMB302_VERSION_STRING "3.0.2" /*extern struct smb_version_operations smb302_operations;*/ /* not needed yet */ extern struct smb_version_values smb302_values; #define SMB311_VERSION_STRING "3.1.1" #define ALT_SMB311_VERSION_STRING "3.11" extern struct smb_version_operations smb311_operations; extern struct smb_version_values smb311_values; static inline char *get_security_type_str(enum securityEnum sectype) { switch (sectype) { case RawNTLMSSP: return "RawNTLMSSP"; case Kerberos: return "Kerberos"; case NTLMv2: return "NTLMv2"; default: return "Unknown"; } } static inline bool is_smb1_server(struct TCP_Server_Info *server) { return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0; } static inline bool is_tcon_dfs(struct cifs_tcon *tcon) { /* * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS * Subsystem Notifies That a Share Is a DFS Share. * * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server * Application Updates a Share. */ if (!tcon || !tcon->ses || !tcon->ses->server) return false; return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS : tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT); } static inline bool cifs_is_referral_server(struct cifs_tcon *tcon, const struct dfs_info3_param *ref) { /* * Check if all targets are capable of handling DFS referrals as per * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL. */ return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER)); } static inline u64 cifs_flock_len(const struct file_lock *fl) { return (u64)fl->fl_end - fl->fl_start + 1; } static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses) { if (WARN_ON_ONCE(!ses || !ses->server)) return 0; /* * Make workstation name no more than 15 chars when using insecure dialects as some legacy * servers do require it during NTLMSSP. */ if (ses->server->dialect <= SMB20_PROT_ID) return min_t(size_t, sizeof(ses->workstation_name), RFC1001_NAME_LEN_WITH_NULL); return sizeof(ses->workstation_name); } static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src) { memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src)); dst->AccessFlags = src->AccessFlags; dst->CurrentByteOffset = src->CurrentByteOffset; dst->Mode = src->Mode; dst->AlignmentRequirement = src->AlignmentRequirement; dst->FileNameLength = src->FileNameLength; } static inline int cifs_get_num_sgs(const struct smb_rqst *rqst, int num_rqst, const u8 *sig) { unsigned int len, skip; unsigned int nents = 0; unsigned long addr; size_t data_size; int i, j; /* * The first rqst has a transform header where the first 20 bytes are * not part of the encrypted blob. */ skip = 20; /* Assumes the first rqst has a transform header as the first iov. * I.e. * rqst[0].rq_iov[0] is transform header * rqst[0].rq_iov[1+] data to be encrypted/decrypted * rqst[1+].rq_iov[0+] data to be encrypted/decrypted */ for (i = 0; i < num_rqst; i++) { data_size = iov_iter_count(&rqst[i].rq_iter); /* We really don't want a mixture of pinned and unpinned pages * in the sglist. It's hard to keep track of which is what. * Instead, we convert to a BVEC-type iterator higher up. */ if (data_size && WARN_ON_ONCE(user_backed_iter(&rqst[i].rq_iter))) return -EIO; /* We also don't want to have any extra refs or pins to clean * up in the sglist. */ if (data_size && WARN_ON_ONCE(iov_iter_extract_will_pin(&rqst[i].rq_iter))) return -EIO; for (j = 0; j < rqst[i].rq_nvec; j++) { struct kvec *iov = &rqst[i].rq_iov[j]; addr = (unsigned long)iov->iov_base + skip; if (unlikely(is_vmalloc_addr((void *)addr))) { len = iov->iov_len - skip; nents += DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE); } else { nents++; } skip = 0; } if (data_size) nents += iov_iter_npages(&rqst[i].rq_iter, INT_MAX); } nents += DIV_ROUND_UP(offset_in_page(sig) + SMB2_SIGNATURE_SIZE, PAGE_SIZE); return nents; } /* We can not use the normal sg_set_buf() as we will sometimes pass a * stack object as buf. */ static inline void cifs_sg_set_buf(struct sg_table *sgtable, const void *buf, unsigned int buflen) { unsigned long addr = (unsigned long)buf; unsigned int off = offset_in_page(addr); addr &= PAGE_MASK; if (unlikely(is_vmalloc_addr((void *)addr))) { do { unsigned int len = min_t(unsigned int, buflen, PAGE_SIZE - off); sg_set_page(&sgtable->sgl[sgtable->nents++], vmalloc_to_page((void *)addr), len, off); off = 0; addr += PAGE_SIZE; buflen -= len; } while (buflen); } else { sg_set_page(&sgtable->sgl[sgtable->nents++], virt_to_page((void *)addr), buflen, off); } } #define CIFS_OPARMS(_cifs_sb, _tcon, _path, _da, _cd, _co, _mode) \ ((struct cifs_open_parms) { \ .tcon = _tcon, \ .path = _path, \ .desired_access = (_da), \ .disposition = (_cd), \ .create_options = cifs_create_options(_cifs_sb, (_co)), \ .mode = (_mode), \ .cifs_sb = _cifs_sb, \ }) struct smb2_compound_vars { struct cifs_open_parms oparms; struct kvec rsp_iov[MAX_COMPOUND]; struct smb_rqst rqst[MAX_COMPOUND]; struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; struct kvec qi_iov; struct kvec io_iov[SMB2_IOCTL_IOV_SIZE]; struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE]; struct kvec close_iov; struct smb2_file_rename_info rename_info; struct smb2_file_link_info link_info; struct kvec ea_iov; }; static inline bool cifs_ses_exiting(struct cifs_ses *ses) { bool ret; spin_lock(&ses->ses_lock); ret = ses->ses_status == SES_EXITING; spin_unlock(&ses->ses_lock); return ret; } #endif /* _CIFS_GLOB_H */
5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 // SPDX-License-Identifier: GPL-2.0-only /* * Common code for control of lockd and nfsv4 grace periods. * * Transplanted from lockd code */ #include <linux/module.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/fs.h> #include <linux/filelock.h> static unsigned int grace_net_id; static DEFINE_SPINLOCK(grace_lock); /** * locks_start_grace * @net: net namespace that this lock manager belongs to * @lm: who this grace period is for * * A grace period is a period during which locks should not be given * out. Currently grace periods are only enforced by the two lock * managers (lockd and nfsd), using the locks_in_grace() function to * check when they are in a grace period. * * This function is called to start a grace period. */ void locks_start_grace(struct net *net, struct lock_manager *lm) { struct list_head *grace_list = net_generic(net, grace_net_id); spin_lock(&grace_lock); if (list_empty(&lm->list)) list_add(&lm->list, grace_list); else WARN(1, "double list_add attempt detected in net %x %s\n", net->ns.inum, (net == &init_net) ? "(init_net)" : ""); spin_unlock(&grace_lock); } EXPORT_SYMBOL_GPL(locks_start_grace); /** * locks_end_grace * @lm: who this grace period is for * * Call this function to state that the given lock manager is ready to * resume regular locking. The grace period will not end until all lock * managers that called locks_start_grace() also call locks_end_grace(). * Note that callers count on it being safe to call this more than once, * and the second call should be a no-op. */ void locks_end_grace(struct lock_manager *lm) { spin_lock(&grace_lock); list_del_init(&lm->list); spin_unlock(&grace_lock); } EXPORT_SYMBOL_GPL(locks_end_grace); static bool __state_in_grace(struct net *net, bool open) { struct list_head *grace_list = net_generic(net, grace_net_id); struct lock_manager *lm; if (!open) return !list_empty(grace_list); spin_lock(&grace_lock); list_for_each_entry(lm, grace_list, list) { if (lm->block_opens) { spin_unlock(&grace_lock); return true; } } spin_unlock(&grace_lock); return false; } /** * locks_in_grace * @net: network namespace * * Lock managers call this function to determine when it is OK for them * to answer ordinary lock requests, and when they should accept only * lock reclaims. */ bool locks_in_grace(struct net *net) { return __state_in_grace(net, false); } EXPORT_SYMBOL_GPL(locks_in_grace); bool opens_in_grace(struct net *net) { return __state_in_grace(net, true); } EXPORT_SYMBOL_GPL(opens_in_grace); static int __net_init grace_init_net(struct net *net) { struct list_head *grace_list = net_generic(net, grace_net_id); INIT_LIST_HEAD(grace_list); return 0; } static void __net_exit grace_exit_net(struct net *net) { struct list_head *grace_list = net_generic(net, grace_net_id); WARN_ONCE(!list_empty(grace_list), "net %x %s: grace_list is not empty\n", net->ns.inum, __func__); } static struct pernet_operations grace_net_ops = { .init = grace_init_net, .exit = grace_exit_net, .id = &grace_net_id, .size = sizeof(struct list_head), }; static int __init init_grace(void) { return register_pernet_subsys(&grace_net_ops); } static void __exit exit_grace(void) { unregister_pernet_subsys(&grace_net_ops); } MODULE_AUTHOR("Jeff Layton <jlayton@primarydata.com>"); MODULE_LICENSE("GPL"); module_init(init_grace) module_exit(exit_grace)
8 5 10 1 7 12 3 1 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * https://www.huawei.com/ * Copyright (C) 2022, Alibaba Cloud */ #include "internal.h" static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, void *dentry_blk, struct erofs_dirent *de, unsigned int nameoff, unsigned int maxsize) { const struct erofs_dirent *end = dentry_blk + nameoff; while (de < end) { const char *de_name; unsigned int de_namelen; unsigned char d_type; d_type = fs_ftype_to_dtype(de->file_type); nameoff = le16_to_cpu(de->nameoff); de_name = (char *)dentry_blk + nameoff; /* the last dirent in the block? */ if (de + 1 >= end) de_namelen = strnlen(de_name, maxsize - nameoff); else de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; /* a corrupted entry is found */ if (nameoff + de_namelen > maxsize || de_namelen > EROFS_NAME_LEN) { erofs_err(dir->i_sb, "bogus dirent @ nid %llu", EROFS_I(dir)->nid); DBG_BUGON(1); return -EFSCORRUPTED; } if (!dir_emit(ctx, de_name, de_namelen, le64_to_cpu(de->nid), d_type)) return 1; ++de; ctx->pos += sizeof(struct erofs_dirent); } return 0; } static int erofs_readdir(struct file *f, struct dir_context *ctx) { struct inode *dir = file_inode(f); struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct super_block *sb = dir->i_sb; unsigned long bsz = sb->s_blocksize; const size_t dirsize = i_size_read(dir); unsigned int i = erofs_blknr(sb, ctx->pos); unsigned int ofs = erofs_blkoff(sb, ctx->pos); int err = 0; bool initial = true; buf.inode = dir; while (ctx->pos < dirsize) { struct erofs_dirent *de; unsigned int nameoff, maxsize; de = erofs_bread(&buf, i, EROFS_KMAP); if (IS_ERR(de)) { erofs_err(sb, "fail to readdir of logical block %u of nid %llu", i, EROFS_I(dir)->nid); err = PTR_ERR(de); break; } nameoff = le16_to_cpu(de->nameoff); if (nameoff < sizeof(struct erofs_dirent) || nameoff >= bsz) { erofs_err(sb, "invalid de[0].nameoff %u @ nid %llu", nameoff, EROFS_I(dir)->nid); err = -EFSCORRUPTED; break; } maxsize = min_t(unsigned int, dirsize - ctx->pos + ofs, bsz); /* search dirents at the arbitrary position */ if (initial) { initial = false; ofs = roundup(ofs, sizeof(struct erofs_dirent)); ctx->pos = erofs_pos(sb, i) + ofs; if (ofs >= nameoff) goto skip_this; } err = erofs_fill_dentries(dir, ctx, de, (void *)de + ofs, nameoff, maxsize); if (err) break; skip_this: ctx->pos = erofs_pos(sb, i) + maxsize; ++i; ofs = 0; } erofs_put_metabuf(&buf); return err < 0 ? err : 0; } const struct file_operations erofs_dir_fops = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = erofs_readdir, };
31 30 31 31 30 34 34 34 34 1 30 4 6 3 6 2 30 4 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 // SPDX-License-Identifier: GPL-2.0-or-later /* Client connection-specific management code. * * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * Client connections need to be cached for a little while after they've made a * call so as to handle retransmitted DATA packets in case the server didn't * receive the final ACK or terminating ABORT we sent it. * * There are flags of relevance to the cache: * * (2) DONT_REUSE - The connection should be discarded as soon as possible and * should not be reused. This is set when an exclusive connection is used * or a call ID counter overflows. * * The caching state may only be changed if the cache lock is held. * * There are two idle client connection expiry durations. If the total number * of connections is below the reap threshold, we use the normal duration; if * it's above, we use the fast duration. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/slab.h> #include <linux/idr.h> #include <linux/timer.h> #include <linux/sched/signal.h> #include "ar-internal.h" __read_mostly unsigned int rxrpc_reap_client_connections = 900; __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle) { atomic_inc(&bundle->active); } /* * Release a connection ID for a client connection. */ static void rxrpc_put_client_connection_id(struct rxrpc_local *local, struct rxrpc_connection *conn) { idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT); } /* * Destroy the client connection ID tree. */ static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) { struct rxrpc_connection *conn; int id; if (!idr_is_empty(&local->conn_ids)) { idr_for_each_entry(&local->conn_ids, conn, id) { pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", conn, refcount_read(&conn->ref)); } BUG(); } idr_destroy(&local->conn_ids); } /* * Allocate a connection bundle. */ static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, gfp_t gfp) { static atomic_t rxrpc_bundle_id; struct rxrpc_bundle *bundle; bundle = kzalloc(sizeof(*bundle), gfp); if (bundle) { bundle->local = call->local; bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle); bundle->key = key_get(call->key); bundle->security = call->security; bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); bundle->service_id = call->dest_srx.srx_service; bundle->security_level = call->security_level; bundle->debug_id = atomic_inc_return(&rxrpc_bundle_id); refcount_set(&bundle->ref, 1); atomic_set(&bundle->active, 1); INIT_LIST_HEAD(&bundle->waiting_calls); trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new); write_lock(&bundle->local->rxnet->conn_lock); list_add_tail(&bundle->proc_link, &bundle->local->rxnet->bundle_proc_list); write_unlock(&bundle->local->rxnet->conn_lock); } return bundle; } struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why) { int r; __refcount_inc(&bundle->ref, &r); trace_rxrpc_bundle(bundle->debug_id, r + 1, why); return bundle; } static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) { trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref), rxrpc_bundle_free); write_lock(&bundle->local->rxnet->conn_lock); list_del(&bundle->proc_link); write_unlock(&bundle->local->rxnet->conn_lock); rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle); key_put(bundle->key); kfree(bundle); } void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why) { unsigned int id; bool dead; int r; if (bundle) { id = bundle->debug_id; dead = __refcount_dec_and_test(&bundle->ref, &r); trace_rxrpc_bundle(id, r - 1, why); if (dead) rxrpc_free_bundle(bundle); } } /* * Get rid of outstanding client connection preallocations when a local * endpoint is destroyed. */ void rxrpc_purge_client_connections(struct rxrpc_local *local) { rxrpc_destroy_client_conn_ids(local); } /* * Allocate a client connection. */ static struct rxrpc_connection * rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle) { struct rxrpc_connection *conn; struct rxrpc_local *local = bundle->local; struct rxrpc_net *rxnet = local->rxnet; int id; _enter(""); conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN); if (!conn) return ERR_PTR(-ENOMEM); id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000, GFP_ATOMIC | __GFP_NOWARN); if (id < 0) { kfree(conn); return ERR_PTR(id); } refcount_set(&conn->ref, 1); conn->proto.cid = id << RXRPC_CIDSHIFT; conn->proto.epoch = local->rxnet->epoch; conn->out_clientflag = RXRPC_CLIENT_INITIATED; conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn); conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn); conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn); conn->key = key_get(bundle->key); conn->security = bundle->security; conn->exclusive = bundle->exclusive; conn->upgrade = bundle->upgrade; conn->orig_service_id = bundle->service_id; conn->security_level = bundle->security_level; conn->state = RXRPC_CONN_CLIENT_UNSECURED; conn->service_id = conn->orig_service_id; if (conn->security == &rxrpc_no_security) conn->state = RXRPC_CONN_CLIENT; atomic_inc(&rxnet->nr_conns); write_lock(&rxnet->conn_lock); list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); write_unlock(&rxnet->conn_lock); rxrpc_see_connection(conn, rxrpc_conn_new_client); atomic_inc(&rxnet->nr_client_conns); trace_rxrpc_client(conn, -1, rxrpc_client_alloc); return conn; } /* * Determine if a connection may be reused. */ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) { struct rxrpc_net *rxnet; int id_cursor, id, distance, limit; if (!conn) goto dont_reuse; rxnet = conn->rxnet; if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) goto dont_reuse; if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED && conn->state != RXRPC_CONN_CLIENT) || conn->proto.epoch != rxnet->epoch) goto mark_dont_reuse; /* The IDR tree gets very expensive on memory if the connection IDs are * widely scattered throughout the number space, so we shall want to * kill off connections that, say, have an ID more than about four * times the maximum number of client conns away from the current * allocation point to try and keep the IDs concentrated. */ id_cursor = idr_get_cursor(&conn->local->conn_ids); id = conn->proto.cid >> RXRPC_CIDSHIFT; distance = id - id_cursor; if (distance < 0) distance = -distance; limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024); if (distance > limit) goto mark_dont_reuse; return true; mark_dont_reuse: set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); dont_reuse: return false; } /* * Look up the conn bundle that matches the connection parameters, adding it if * it doesn't yet exist. */ int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) { struct rxrpc_bundle *bundle, *candidate; struct rxrpc_local *local = call->local; struct rb_node *p, **pp, *parent; long diff; bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); _enter("{%px,%x,%u,%u}", call->peer, key_serial(call->key), call->security_level, upgrade); if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) { call->bundle = rxrpc_alloc_bundle(call, gfp); return call->bundle ? 0 : -ENOMEM; } /* First, see if the bundle is already there. */ _debug("search 1"); spin_lock(&local->client_bundles_lock); p = local->client_bundles.rb_node; while (p) { bundle = rb_entry(p, struct rxrpc_bundle, local_node); #define cmp(X, Y) ((long)(X) - (long)(Y)) diff = (cmp(bundle->peer, call->peer) ?: cmp(bundle->key, call->key) ?: cmp(bundle->security_level, call->security_level) ?: cmp(bundle->upgrade, upgrade)); #undef cmp if (diff < 0) p = p->rb_left; else if (diff > 0) p = p->rb_right; else goto found_bundle; } spin_unlock(&local->client_bundles_lock); _debug("not found"); /* It wasn't. We need to add one. */ candidate = rxrpc_alloc_bundle(call, gfp); if (!candidate) return -ENOMEM; _debug("search 2"); spin_lock(&local->client_bundles_lock); pp = &local->client_bundles.rb_node; parent = NULL; while (*pp) { parent = *pp; bundle = rb_entry(parent, struct rxrpc_bundle, local_node); #define cmp(X, Y) ((long)(X) - (long)(Y)) diff = (cmp(bundle->peer, call->peer) ?: cmp(bundle->key, call->key) ?: cmp(bundle->security_level, call->security_level) ?: cmp(bundle->upgrade, upgrade)); #undef cmp if (diff < 0) pp = &(*pp)->rb_left; else if (diff > 0) pp = &(*pp)->rb_right; else goto found_bundle_free; } _debug("new bundle"); rb_link_node(&candidate->local_node, parent, pp); rb_insert_color(&candidate->local_node, &local->client_bundles); call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); spin_unlock(&local->client_bundles_lock); _leave(" = B=%u [new]", call->bundle->debug_id); return 0; found_bundle_free: rxrpc_free_bundle(candidate); found_bundle: call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call); rxrpc_activate_bundle(bundle); spin_unlock(&local->client_bundles_lock); _leave(" = B=%u [found]", call->bundle->debug_id); return 0; } /* * Allocate a new connection and add it into a bundle. */ static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, unsigned int slot) { struct rxrpc_connection *conn, *old; unsigned int shift = slot * RXRPC_MAXCALLS; unsigned int i; old = bundle->conns[slot]; if (old) { bundle->conns[slot] = NULL; bundle->conn_ids[slot] = 0; trace_rxrpc_client(old, -1, rxrpc_client_replace); rxrpc_put_connection(old, rxrpc_conn_put_noreuse); } conn = rxrpc_alloc_client_connection(bundle); if (IS_ERR(conn)) { bundle->alloc_error = PTR_ERR(conn); return false; } rxrpc_activate_bundle(bundle); conn->bundle_shift = shift; bundle->conns[slot] = conn; bundle->conn_ids[slot] = conn->debug_id; for (i = 0; i < RXRPC_MAXCALLS; i++) set_bit(shift + i, &bundle->avail_chans); return true; } /* * Add a connection to a bundle if there are no usable connections or we have * connections waiting for extra capacity. */ static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle) { int slot = -1, i, usable; _enter(""); bundle->alloc_error = 0; /* See if there are any usable connections. */ usable = 0; for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { if (rxrpc_may_reuse_conn(bundle->conns[i])) usable++; else if (slot == -1) slot = i; } if (!usable && bundle->upgrade) bundle->try_upgrade = true; if (!usable) goto alloc_conn; if (!bundle->avail_chans && !bundle->try_upgrade && usable < ARRAY_SIZE(bundle->conns)) goto alloc_conn; _leave(""); return usable; alloc_conn: return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false; } /* * Assign a channel to the call at the front of the queue and wake the call up. * We don't increment the callNumber counter until this number has been exposed * to the world. */ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, unsigned int channel) { struct rxrpc_channel *chan = &conn->channels[channel]; struct rxrpc_bundle *bundle = conn->bundle; struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, struct rxrpc_call, wait_link); u32 call_id = chan->call_counter + 1; _enter("C=%x,%u", conn->debug_id, channel); list_del_init(&call->wait_link); trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); /* Cancel the final ACK on the previous call if it hasn't been sent yet * as the DATA packet will implicitly ACK it. */ clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); rxrpc_see_call(call, rxrpc_call_see_activate_client); call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call); call->cid = conn->proto.cid | channel; call->call_id = call_id; call->dest_srx.srx_service = conn->service_id; call->cong_ssthresh = call->peer->cong_ssthresh; if (call->cong_cwnd >= call->cong_ssthresh) call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; else call->cong_mode = RXRPC_CALL_SLOW_START; chan->call_id = call_id; chan->call_debug_id = call->debug_id; chan->call = call; rxrpc_see_call(call, rxrpc_call_see_connected); trace_rxrpc_connect_call(call); call->tx_last_sent = ktime_get_real(); rxrpc_start_call_timer(call); rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST); wake_up(&call->waitq); } /* * Remove a connection from the idle list if it's on it. */ static void rxrpc_unidle_conn(struct rxrpc_connection *conn) { if (!list_empty(&conn->cache_link)) { list_del_init(&conn->cache_link); rxrpc_put_connection(conn, rxrpc_conn_put_unidle); } } /* * Assign channels and callNumbers to waiting calls. */ static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) { struct rxrpc_connection *conn; unsigned long avail, mask; unsigned int channel, slot; trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); if (bundle->try_upgrade) mask = 1; else mask = ULONG_MAX; while (!list_empty(&bundle->waiting_calls)) { avail = bundle->avail_chans & mask; if (!avail) break; channel = __ffs(avail); clear_bit(channel, &bundle->avail_chans); slot = channel / RXRPC_MAXCALLS; conn = bundle->conns[slot]; if (!conn) break; if (bundle->try_upgrade) set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); rxrpc_unidle_conn(conn); channel &= (RXRPC_MAXCALLS - 1); conn->act_chans |= 1 << channel; rxrpc_activate_one_channel(conn, channel); } } /* * Connect waiting channels (called from the I/O thread). */ void rxrpc_connect_client_calls(struct rxrpc_local *local) { struct rxrpc_call *call; while ((call = list_first_entry_or_null(&local->new_client_calls, struct rxrpc_call, wait_link)) ) { struct rxrpc_bundle *bundle = call->bundle; spin_lock(&local->client_call_lock); list_move_tail(&call->wait_link, &bundle->waiting_calls); spin_unlock(&local->client_call_lock); if (rxrpc_bundle_has_space(bundle)) rxrpc_activate_channels(bundle); } } /* * Note that a call, and thus a connection, is about to be exposed to the * world. */ void rxrpc_expose_client_call(struct rxrpc_call *call) { unsigned int channel = call->cid & RXRPC_CHANNELMASK; struct rxrpc_connection *conn = call->conn; struct rxrpc_channel *chan = &conn->channels[channel]; if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) { /* Mark the call ID as being used. If the callNumber counter * exceeds ~2 billion, we kill the connection after its * outstanding calls have finished so that the counter doesn't * wrap. */ chan->call_counter++; if (chan->call_counter >= INT_MAX) set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); trace_rxrpc_client(conn, channel, rxrpc_client_exposed); spin_lock(&call->peer->lock); hlist_add_head(&call->error_link, &call->peer->error_targets); spin_unlock(&call->peer->lock); } } /* * Set the reap timer. */ static void rxrpc_set_client_reap_timer(struct rxrpc_local *local) { if (!local->kill_all_client_conns) { unsigned long now = jiffies; unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; if (local->rxnet->live) timer_reduce(&local->client_conn_reap_timer, reap_at); } } /* * Disconnect a client call. */ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call) { struct rxrpc_connection *conn; struct rxrpc_channel *chan = NULL; struct rxrpc_local *local = bundle->local; unsigned int channel; bool may_reuse; u32 cid; _enter("c=%x", call->debug_id); /* Calls that have never actually been assigned a channel can simply be * discarded. */ conn = call->conn; if (!conn) { _debug("call is waiting"); ASSERTCMP(call->call_id, ==, 0); ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); list_del_init(&call->wait_link); return; } cid = call->cid; channel = cid & RXRPC_CHANNELMASK; chan = &conn->channels[channel]; trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); if (WARN_ON(chan->call != call)) return; may_reuse = rxrpc_may_reuse_conn(conn); /* If a client call was exposed to the world, we save the result for * retransmission. * * We use a barrier here so that the call number and abort code can be * read without needing to take a lock. * * TODO: Make the incoming packet handler check this and handle * terminal retransmission without requiring access to the call. */ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { _debug("exposed %u,%u", call->call_id, call->abort_code); __rxrpc_disconnect_call(conn, call); if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) { trace_rxrpc_client(conn, channel, rxrpc_client_to_active); bundle->try_upgrade = false; if (may_reuse) rxrpc_activate_channels(bundle); } } /* See if we can pass the channel directly to another call. */ if (may_reuse && !list_empty(&bundle->waiting_calls)) { trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); rxrpc_activate_one_channel(conn, channel); return; } /* Schedule the final ACK to be transmitted in a short while so that it * can be skipped if we find a follow-on call. The first DATA packet * of the follow on call will implicitly ACK this call. */ if (call->completion == RXRPC_CALL_SUCCEEDED && test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { unsigned long final_ack_at = jiffies + 2; chan->final_ack_at = final_ack_at; smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); rxrpc_reduce_conn_timer(conn, final_ack_at); } /* Deactivate the channel. */ chan->call = NULL; set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); conn->act_chans &= ~(1 << channel); /* If no channels remain active, then put the connection on the idle * list for a short while. Give it a ref to stop it going away if it * becomes unbundled. */ if (!conn->act_chans) { trace_rxrpc_client(conn, channel, rxrpc_client_to_idle); conn->idle_timestamp = jiffies; rxrpc_get_connection(conn, rxrpc_conn_get_idle); list_move_tail(&conn->cache_link, &local->idle_client_conns); rxrpc_set_client_reap_timer(local); } } /* * Remove a connection from a bundle. */ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) { struct rxrpc_bundle *bundle = conn->bundle; unsigned int bindex; int i; _enter("C=%x", conn->debug_id); if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) rxrpc_process_delayed_final_acks(conn, true); bindex = conn->bundle_shift / RXRPC_MAXCALLS; if (bundle->conns[bindex] == conn) { _debug("clear slot %u", bindex); bundle->conns[bindex] = NULL; bundle->conn_ids[bindex] = 0; for (i = 0; i < RXRPC_MAXCALLS; i++) clear_bit(conn->bundle_shift + i, &bundle->avail_chans); rxrpc_put_client_connection_id(bundle->local, conn); rxrpc_deactivate_bundle(bundle); rxrpc_put_connection(conn, rxrpc_conn_put_unbundle); } } /* * Drop the active count on a bundle. */ void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) { struct rxrpc_local *local; bool need_put = false; if (!bundle) return; local = bundle->local; if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { if (!bundle->exclusive) { _debug("erase bundle"); rb_erase(&bundle->local_node, &local->client_bundles); need_put = true; } spin_unlock(&local->client_bundles_lock); if (need_put) rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard); } } /* * Clean up a dead client connection. */ void rxrpc_kill_client_conn(struct rxrpc_connection *conn) { struct rxrpc_local *local = conn->local; struct rxrpc_net *rxnet = local->rxnet; _enter("C=%x", conn->debug_id); trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); atomic_dec(&rxnet->nr_client_conns); rxrpc_put_client_connection_id(local, conn); } /* * Discard expired client connections from the idle list. Each conn in the * idle list has been exposed and holds an extra ref because of that. * * This may be called from conn setup or from a work item so cannot be * considered non-reentrant. */ void rxrpc_discard_expired_client_conns(struct rxrpc_local *local) { struct rxrpc_connection *conn; unsigned long expiry, conn_expires_at, now; unsigned int nr_conns; _enter(""); /* We keep an estimate of what the number of conns ought to be after * we've discarded some so that we don't overdo the discarding. */ nr_conns = atomic_read(&local->rxnet->nr_client_conns); next: conn = list_first_entry_or_null(&local->idle_client_conns, struct rxrpc_connection, cache_link); if (!conn) return; if (!local->kill_all_client_conns) { /* If the number of connections is over the reap limit, we * expedite discard by reducing the expiry timeout. We must, * however, have at least a short grace period to be able to do * final-ACK or ABORT retransmission. */ expiry = rxrpc_conn_idle_client_expiry; if (nr_conns > rxrpc_reap_client_connections) expiry = rxrpc_conn_idle_client_fast_expiry; if (conn->local->service_closed) expiry = rxrpc_closed_conn_expiry * HZ; conn_expires_at = conn->idle_timestamp + expiry; now = jiffies; if (time_after(conn_expires_at, now)) goto not_yet_expired; } atomic_dec(&conn->active); trace_rxrpc_client(conn, -1, rxrpc_client_discard); list_del_init(&conn->cache_link); rxrpc_unbundle_conn(conn); /* Drop the ->cache_link ref */ rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); nr_conns--; goto next; not_yet_expired: /* The connection at the front of the queue hasn't yet expired, so * schedule the work item for that point if we discarded something. * * We don't worry if the work item is already scheduled - it can look * after rescheduling itself at a later time. We could cancel it, but * then things get messier. */ _debug("not yet"); if (!local->kill_all_client_conns) timer_reduce(&local->client_conn_reap_timer, conn_expires_at); _leave(""); } /* * Clean up the client connections on a local endpoint. */ void rxrpc_clean_up_local_conns(struct rxrpc_local *local) { struct rxrpc_connection *conn; _enter(""); local->kill_all_client_conns = true; del_timer_sync(&local->client_conn_reap_timer); while ((conn = list_first_entry_or_null(&local->idle_client_conns, struct rxrpc_connection, cache_link))) { list_del_init(&conn->cache_link); atomic_dec(&conn->active); trace_rxrpc_client(conn, -1, rxrpc_client_discard); rxrpc_unbundle_conn(conn); rxrpc_put_connection(conn, rxrpc_conn_put_local_dead); } _leave(" [culled]"); }
1 1 1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 // SPDX-License-Identifier: GPL-2.0-or-later /* * IPVS: Source Hashing scheduling module * * Authors: Wensong Zhang <wensong@gnuchina.org> * * Changes: */ /* * The sh algorithm is to select server by the hash key of source IP * address. The pseudo code is as follows: * * n <- servernode[src_ip]; * if (n is dead) OR * (n is overloaded) or (n.weight <= 0) then * return NULL; * * return n; * * Notes that servernode is a 256-bucket hash table that maps the hash * index derived from packet source IP address to the current server * array. If the sh scheduler is used in cache cluster, it is good to * combine it with cache_bypass feature. When the statically assigned * server is dead or overloaded, the load balancer can bypass the cache * server and send requests to the original server directly. * * The weight destination attribute can be used to control the * distribution of connections to the destinations in servernode. The * greater the weight, the more connections the destination * will receive. * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/ip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/ip_vs.h> #include <net/tcp.h> #include <linux/udp.h> #include <linux/sctp.h> /* * IPVS SH bucket */ struct ip_vs_sh_bucket { struct ip_vs_dest __rcu *dest; /* real server (cache) */ }; /* * for IPVS SH entry hash table */ #ifndef CONFIG_IP_VS_SH_TAB_BITS #define CONFIG_IP_VS_SH_TAB_BITS 8 #endif #define IP_VS_SH_TAB_BITS CONFIG_IP_VS_SH_TAB_BITS #define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS) #define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1) struct ip_vs_sh_state { struct rcu_head rcu_head; struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; }; /* Helper function to determine if server is unavailable */ static inline bool is_unavailable(struct ip_vs_dest *dest) { return atomic_read(&dest->weight) <= 0 || dest->flags & IP_VS_DEST_F_OVERLOAD; } /* * Returns hash value for IPVS SH entry */ static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr, __be16 port, unsigned int offset) { __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif return (offset + hash_32(ntohs(port) + ntohl(addr_fold), IP_VS_SH_TAB_BITS)) & IP_VS_SH_TAB_MASK; } /* * Get ip_vs_dest associated with supplied parameters. */ static inline struct ip_vs_dest * ip_vs_sh_get(struct ip_vs_service *svc, struct ip_vs_sh_state *s, const union nf_inet_addr *addr, __be16 port) { unsigned int hash = ip_vs_sh_hashkey(svc->af, addr, port, 0); struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); return (!dest || is_unavailable(dest)) ? NULL : dest; } /* As ip_vs_sh_get, but with fallback if selected server is unavailable * * The fallback strategy loops around the table starting from a "random" * point (in fact, it is chosen to be the original hash value to make the * algorithm deterministic) to find a new server. */ static inline struct ip_vs_dest * ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s, const union nf_inet_addr *addr, __be16 port) { unsigned int offset, roffset; unsigned int hash, ihash; struct ip_vs_dest *dest; /* first try the dest it's supposed to go to */ ihash = ip_vs_sh_hashkey(svc->af, addr, port, 0); dest = rcu_dereference(s->buckets[ihash].dest); if (!dest) return NULL; if (!is_unavailable(dest)) return dest; IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); /* if the original dest is unavailable, loop around the table * starting from ihash to find a new dest */ for (offset = 0; offset < IP_VS_SH_TAB_SIZE; offset++) { roffset = (offset + ihash) % IP_VS_SH_TAB_SIZE; hash = ip_vs_sh_hashkey(svc->af, addr, port, roffset); dest = rcu_dereference(s->buckets[hash].dest); if (!dest) break; if (!is_unavailable(dest)) return dest; IP_VS_DBG_BUF(6, "SH: selected unavailable " "server %s:%d (offset %d), reselecting", IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), roffset); } return NULL; } /* * Assign all the hash buckets of the specified table with the service. */ static int ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc) { int i; struct ip_vs_sh_bucket *b; struct list_head *p; struct ip_vs_dest *dest; int d_count; bool empty; b = &s->buckets[0]; p = &svc->destinations; empty = list_empty(p); d_count = 0; for (i=0; i<IP_VS_SH_TAB_SIZE; i++) { dest = rcu_dereference_protected(b->dest, 1); if (dest) ip_vs_dest_put(dest); if (empty) RCU_INIT_POINTER(b->dest, NULL); else { if (p == &svc->destinations) p = p->next; dest = list_entry(p, struct ip_vs_dest, n_list); ip_vs_dest_hold(dest); RCU_INIT_POINTER(b->dest, dest); IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n", i, IP_VS_DBG_ADDR(dest->af, &dest->addr), atomic_read(&dest->weight)); /* Don't move to next dest until filling weight */ if (++d_count >= atomic_read(&dest->weight)) { p = p->next; d_count = 0; } } b++; } return 0; } /* * Flush all the hash buckets of the specified table. */ static void ip_vs_sh_flush(struct ip_vs_sh_state *s) { int i; struct ip_vs_sh_bucket *b; struct ip_vs_dest *dest; b = &s->buckets[0]; for (i=0; i<IP_VS_SH_TAB_SIZE; i++) { dest = rcu_dereference_protected(b->dest, 1); if (dest) { ip_vs_dest_put(dest); RCU_INIT_POINTER(b->dest, NULL); } b++; } } static int ip_vs_sh_init_svc(struct ip_vs_service *svc) { struct ip_vs_sh_state *s; /* allocate the SH table for this service */ s = kzalloc(sizeof(struct ip_vs_sh_state), GFP_KERNEL); if (s == NULL) return -ENOMEM; svc->sched_data = s; IP_VS_DBG(6, "SH hash table (memory=%zdbytes) allocated for " "current service\n", sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); /* assign the hash buckets with current dests */ ip_vs_sh_reassign(s, svc); return 0; } static void ip_vs_sh_done_svc(struct ip_vs_service *svc) { struct ip_vs_sh_state *s = svc->sched_data; /* got to clean up hash buckets here */ ip_vs_sh_flush(s); /* release the table itself */ kfree_rcu(s, rcu_head); IP_VS_DBG(6, "SH hash table (memory=%zdbytes) released\n", sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); } static int ip_vs_sh_dest_changed(struct ip_vs_service *svc, struct ip_vs_dest *dest) { struct ip_vs_sh_state *s = svc->sched_data; /* assign the hash buckets with the updated service */ ip_vs_sh_reassign(s, svc); return 0; } /* Helper function to get port number */ static inline __be16 ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph) { __be16 _ports[2], *ports; /* At this point we know that we have a valid packet of some kind. * Because ICMP packets are only guaranteed to have the first 8 * bytes, let's just grab the ports. Fortunately they're in the * same position for all three of the protocols we care about. */ switch (iph->protocol) { case IPPROTO_TCP: case IPPROTO_UDP: case IPPROTO_SCTP: ports = skb_header_pointer(skb, iph->len, sizeof(_ports), &_ports); if (unlikely(!ports)) return 0; if (likely(!ip_vs_iph_inverse(iph))) return ports[0]; else return ports[1]; default: return 0; } } /* * Source Hashing scheduling */ static struct ip_vs_dest * ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, struct ip_vs_iphdr *iph) { struct ip_vs_dest *dest; struct ip_vs_sh_state *s; __be16 port = 0; const union nf_inet_addr *hash_addr; hash_addr = ip_vs_iph_inverse(iph) ? &iph->daddr : &iph->saddr; IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); if (svc->flags & IP_VS_SVC_F_SCHED_SH_PORT) port = ip_vs_sh_get_port(skb, iph); s = (struct ip_vs_sh_state *) svc->sched_data; if (svc->flags & IP_VS_SVC_F_SCHED_SH_FALLBACK) dest = ip_vs_sh_get_fallback(svc, s, hash_addr, port); else dest = ip_vs_sh_get(svc, s, hash_addr, port); if (!dest) { ip_vs_scheduler_err(svc, "no destination available"); return NULL; } IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, hash_addr), IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port)); return dest; } /* * IPVS SH Scheduler structure */ static struct ip_vs_scheduler ip_vs_sh_scheduler = { .name = "sh", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), .init_service = ip_vs_sh_init_svc, .done_service = ip_vs_sh_done_svc, .add_dest = ip_vs_sh_dest_changed, .del_dest = ip_vs_sh_dest_changed, .upd_dest = ip_vs_sh_dest_changed, .schedule = ip_vs_sh_schedule, }; static int __init ip_vs_sh_init(void) { return register_ip_vs_scheduler(&ip_vs_sh_scheduler); } static void __exit ip_vs_sh_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_sh_scheduler); synchronize_rcu(); } module_init(ip_vs_sh_init); module_exit(ip_vs_sh_cleanup); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ipvs source hashing scheduler");
1 1 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 // SPDX-License-Identifier: GPL-2.0-or-later /* Driver for Philips webcam Functions that send various control messages to the webcam, including video modes. (C) 1999-2003 Nemosoft Unv. (C) 2004-2006 Luc Saillard (luc@saillard.org) (C) 2011 Hans de Goede <hdegoede@redhat.com> NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx driver and thus may have bugs that are not present in the original version. Please send bug reports and support requests to <luc@saillard.org>. The decompression routines have been implemented by reverse-engineering the Nemosoft binary pwcx module. Caveat emptor. */ /* Changes 2001/08/03 Alvarado Added methods for changing white balance and red/green gains */ /* Control functions for the cam; brightness, contrast, video mode, etc. */ #ifdef __KERNEL__ #include <linux/uaccess.h> #endif #include <asm/errno.h> #include "pwc.h" #include "pwc-kiara.h" #include "pwc-timon.h" #include "pwc-dec1.h" #include "pwc-dec23.h" /* Selectors for status controls used only in this file */ #define GET_STATUS_B00 0x0B00 #define SENSOR_TYPE_FORMATTER1 0x0C00 #define GET_STATUS_3000 0x3000 #define READ_RAW_Y_MEAN_FORMATTER 0x3100 #define SET_POWER_SAVE_MODE_FORMATTER 0x3200 #define MIRROR_IMAGE_FORMATTER 0x3300 #define LED_FORMATTER 0x3400 #define LOWLIGHT 0x3500 #define GET_STATUS_3600 0x3600 #define SENSOR_TYPE_FORMATTER2 0x3700 #define GET_STATUS_3800 0x3800 #define GET_STATUS_4000 0x4000 #define GET_STATUS_4100 0x4100 /* Get */ #define CTL_STATUS_4200 0x4200 /* [GS] 1 */ /* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */ #define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100 static const char *size2name[PSZ_MAX] = { "subQCIF", "QSIF", "QCIF", "SIF", "CIF", "VGA", }; /********/ /* Entries for the Nala (645/646) camera; the Nala doesn't have compression preferences, so you either get compressed or non-compressed streams. An alternate value of 0 means this mode is not available at all. */ #define PWC_FPS_MAX_NALA 8 struct Nala_table_entry { char alternate; /* USB alternate setting */ int compressed; /* Compressed yes/no */ unsigned char mode[3]; /* precomputed mode table */ }; static unsigned int Nala_fps_vector[PWC_FPS_MAX_NALA] = { 4, 5, 7, 10, 12, 15, 20, 24 }; static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] = { #include "pwc-nala.h" }; /****************************************************************************/ static int recv_control_msg(struct pwc_device *pdev, u8 request, u16 value, int recv_count) { int rc; rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0), request, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, pdev->vcinterface, pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT); if (rc < 0) PWC_ERROR("recv_control_msg error %d req %02x val %04x\n", rc, request, value); return rc; } static inline int send_video_command(struct pwc_device *pdev, int index, const unsigned char *buf, int buflen) { int rc; memcpy(pdev->ctrl_buf, buf, buflen); rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), SET_EP_STREAM_CTL, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, VIDEO_OUTPUT_CONTROL_FORMATTER, index, pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT); if (rc >= 0) memcpy(pdev->cmd_buf, buf, buflen); else PWC_ERROR("send_video_command error %d\n", rc); return rc; } int send_control_msg(struct pwc_device *pdev, u8 request, u16 value, void *buf, int buflen) { return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0), request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, pdev->vcinterface, buf, buflen, USB_CTRL_SET_TIMEOUT); } static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { int fps, ret = 0; struct Nala_table_entry *pEntry; int frames2frames[31] = { /* closest match of framerate */ 0, 0, 0, 0, 4, /* 0-4 */ 5, 5, 7, 7, 10, /* 5-9 */ 10, 10, 12, 12, 15, /* 10-14 */ 15, 15, 15, 20, 20, /* 15-19 */ 20, 20, 20, 24, 24, /* 20-24 */ 24, 24, 24, 24, 24, /* 25-29 */ 24 /* 30 */ }; int frames2table[31] = { 0, 0, 0, 0, 0, /* 0-4 */ 1, 1, 1, 2, 2, /* 5-9 */ 3, 3, 4, 4, 4, /* 10-14 */ 5, 5, 5, 5, 5, /* 15-19 */ 6, 6, 6, 6, 7, /* 20-24 */ 7, 7, 7, 7, 7, /* 25-29 */ 7 /* 30 */ }; if (size < 0 || size > PSZ_CIF) return -EINVAL; if (frames < 4) frames = 4; else if (size > PSZ_QCIF && frames > 15) frames = 15; else if (frames > 25) frames = 25; frames = frames2frames[frames]; fps = frames2table[frames]; pEntry = &Nala_table[size][fps]; if (pEntry->alternate == 0) return -EINVAL; if (send_to_cam) ret = send_video_command(pdev, pdev->vendpoint, pEntry->mode, 3); if (ret < 0) return ret; if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec1_init(pdev, pEntry->mode); /* Set various parameters */ pdev->pixfmt = pixfmt; pdev->vframes = frames; pdev->valternate = pEntry->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->frame_size = (pdev->width * pdev->height * 3) / 2; if (pEntry->compressed) { if (pdev->release < 5) { /* 4 fold compression */ pdev->vbandlength = 528; pdev->frame_size /= 4; } else { pdev->vbandlength = 704; pdev->frame_size /= 3; } } else pdev->vbandlength = 0; /* Let pwc-if.c:isoc_init know we don't support higher compression */ *compression = 3; return 0; } static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { const struct Timon_table_entry *pChoose; int fps, ret = 0; if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; if (frames < 5) frames = 5; else if (size == PSZ_VGA && frames > 15) frames = 15; else if (frames > 30) frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ do { pChoose = &Timon_table[size][fps][*compression]; if (pChoose->alternate != 0) break; (*compression)++; } while (*compression <= 3); if (pChoose->alternate == 0) return -ENOENT; /* Not supported. */ if (send_to_cam) ret = send_video_command(pdev, pdev->vendpoint, pChoose->mode, 13); if (ret < 0) return ret; if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec23_init(pdev, pChoose->mode); /* Set various parameters */ pdev->pixfmt = pixfmt; pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->vbandlength = pChoose->bandlength; if (pChoose->bandlength > 0) pdev->frame_size = (pChoose->bandlength * pdev->height) / 4; else pdev->frame_size = (pdev->width * pdev->height * 12) / 8; return 0; } static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt, int frames, int *compression, int send_to_cam) { const struct Kiara_table_entry *pChoose; int fps, ret = 0; if (size >= PSZ_MAX || *compression < 0 || *compression > 3) return -EINVAL; if (frames < 5) frames = 5; else if (size == PSZ_VGA && frames > 15) frames = 15; else if (frames > 30) frames = 30; fps = (frames / 5) - 1; /* Find a supported framerate with progressively higher compression */ do { pChoose = &Kiara_table[size][fps][*compression]; if (pChoose->alternate != 0) break; (*compression)++; } while (*compression <= 3); if (pChoose->alternate == 0) return -ENOENT; /* Not supported. */ /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */ if (send_to_cam) ret = send_video_command(pdev, 4, pChoose->mode, 12); if (ret < 0) return ret; if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420) pwc_dec23_init(pdev, pChoose->mode); /* All set and go */ pdev->pixfmt = pixfmt; pdev->vframes = (fps + 1) * 5; pdev->valternate = pChoose->alternate; pdev->width = pwc_image_sizes[size][0]; pdev->height = pwc_image_sizes[size][1]; pdev->vbandlength = pChoose->bandlength; if (pdev->vbandlength > 0) pdev->frame_size = (pdev->vbandlength * pdev->height) / 4; else pdev->frame_size = (pdev->width * pdev->height * 12) / 8; PWC_TRACE("frame_size=%d, vframes=%d, vsize=%d, vbandlength=%d\n", pdev->frame_size, pdev->vframes, size, pdev->vbandlength); return 0; } int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int pixfmt, int frames, int *compression, int send_to_cam) { int ret, size; PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pixfmt); size = pwc_get_size(pdev, width, height); PWC_TRACE("decode_size = %d.\n", size); if (DEVICE_USE_CODEC1(pdev->type)) { ret = set_video_mode_Nala(pdev, size, pixfmt, frames, compression, send_to_cam); } else if (DEVICE_USE_CODEC3(pdev->type)) { ret = set_video_mode_Kiara(pdev, size, pixfmt, frames, compression, send_to_cam); } else { ret = set_video_mode_Timon(pdev, size, pixfmt, frames, compression, send_to_cam); } if (ret < 0) { PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret); return ret; } pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size; PWC_DEBUG_SIZE("Set resolution to %dx%d\n", pdev->width, pdev->height); return 0; } static unsigned int pwc_get_fps_Nala(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i = 0; i < PWC_FPS_MAX_NALA; i++) { if (Nala_table[size][i].alternate) { if (index--==0) return Nala_fps_vector[i]; } } return 0; } static unsigned int pwc_get_fps_Kiara(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i = 0; i < PWC_FPS_MAX_KIARA; i++) { if (Kiara_table[size][i][3].alternate) { if (index--==0) return Kiara_fps_vector[i]; } } return 0; } static unsigned int pwc_get_fps_Timon(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int i; for (i=0; i < PWC_FPS_MAX_TIMON; i++) { if (Timon_table[size][i][3].alternate) { if (index--==0) return Timon_fps_vector[i]; } } return 0; } unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size) { unsigned int ret; if (DEVICE_USE_CODEC1(pdev->type)) { ret = pwc_get_fps_Nala(pdev, index, size); } else if (DEVICE_USE_CODEC3(pdev->type)) { ret = pwc_get_fps_Kiara(pdev, index, size); } else { ret = pwc_get_fps_Timon(pdev, index, size); } return ret; } int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; *data = pdev->ctrl_buf[0]; return 0; } int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data) { int ret; pdev->ctrl_buf[0] = data; ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1); if (ret < 0) return ret; return 0; } int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 1); if (ret < 0) return ret; *data = ((s8 *)pdev->ctrl_buf)[0]; return 0; } int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data) { int ret; ret = recv_control_msg(pdev, request, value, 2); if (ret < 0) return ret; *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0]; return 0; } int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data) { int ret; pdev->ctrl_buf[0] = data & 0xff; pdev->ctrl_buf[1] = data >> 8; ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2); if (ret < 0) return ret; return 0; } int pwc_button_ctrl(struct pwc_device *pdev, u16 value) { int ret; ret = send_control_msg(pdev, SET_STATUS_CTL, value, NULL, 0); if (ret < 0) return ret; return 0; } /* POWER */ void pwc_camera_power(struct pwc_device *pdev, int power) { int r; if (!pdev->power_save) return; if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6)) return; /* Not supported by Nala or Timon < release 6 */ if (power) pdev->ctrl_buf[0] = 0x00; /* active */ else pdev->ctrl_buf[0] = 0xFF; /* power save */ r = send_control_msg(pdev, SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1); if (r < 0) PWC_ERROR("Failed to power %s camera (%d)\n", power ? "on" : "off", r); } int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value) { int r; if (pdev->type < 730) return 0; on_value /= 100; off_value /= 100; if (on_value < 0) on_value = 0; if (on_value > 0xff) on_value = 0xff; if (off_value < 0) off_value = 0; if (off_value > 0xff) off_value = 0xff; pdev->ctrl_buf[0] = on_value; pdev->ctrl_buf[1] = off_value; r = send_control_msg(pdev, SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2); if (r < 0) PWC_ERROR("Failed to set LED on/off time (%d)\n", r); return r; } #ifdef CONFIG_USB_PWC_DEBUG int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor) { int ret, request; if (pdev->type < 675) request = SENSOR_TYPE_FORMATTER1; else if (pdev->type < 730) return -1; /* The Vesta series doesn't have this call */ else request = SENSOR_TYPE_FORMATTER2; ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1); if (ret < 0) return ret; if (pdev->type < 675) *sensor = pdev->ctrl_buf[0] | 0x100; else *sensor = pdev->ctrl_buf[0]; return 0; } #endif
22 22 22 22 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 // SPDX-License-Identifier: GPL-2.0-only /* * File: sysctl.c * * Phonet /proc/sys/net/phonet interface implementation * * Copyright (C) 2008 Nokia Corporation. * * Author: Rémi Denis-Courmont */ #include <linux/seqlock.h> #include <linux/sysctl.h> #include <linux/errno.h> #include <linux/init.h> #include <net/sock.h> #include <linux/phonet.h> #include <net/phonet/phonet.h> #define DYNAMIC_PORT_MIN 0x40 #define DYNAMIC_PORT_MAX 0x7f static DEFINE_SEQLOCK(local_port_range_lock); static int local_port_range_min[2] = {0, 0}; static int local_port_range_max[2] = {1023, 1023}; static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX}; static struct ctl_table_header *phonet_table_hrd; static void set_local_port_range(int range[2]) { write_seqlock(&local_port_range_lock); local_port_range[0] = range[0]; local_port_range[1] = range[1]; write_sequnlock(&local_port_range_lock); } void phonet_get_local_port_range(int *min, int *max) { unsigned int seq; do { seq = read_seqbegin(&local_port_range_lock); if (min) *min = local_port_range[0]; if (max) *max = local_port_range[1]; } while (read_seqretry(&local_port_range_lock, seq)); } static int proc_local_port_range(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; int range[2] = {local_port_range[0], local_port_range[1]}; struct ctl_table tmp = { .data = &range, .maxlen = sizeof(range), .mode = table->mode, .extra1 = &local_port_range_min, .extra2 = &local_port_range_max, }; ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { if (range[1] < range[0]) ret = -EINVAL; else set_local_port_range(range); } return ret; } static struct ctl_table phonet_table[] = { { .procname = "local_port_range", .data = &local_port_range, .maxlen = sizeof(local_port_range), .mode = 0644, .proc_handler = proc_local_port_range, }, { } }; int __init phonet_sysctl_init(void) { phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } void phonet_sysctl_exit(void) { unregister_net_sysctl_table(phonet_table_hrd); }
4863 2395 2829 5788 141 7153 139 568 269 8 2811 190 6400 7292 141 111 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 // SPDX-License-Identifier: GPL-2.0 // Generated by scripts/atomic/gen-atomic-instrumented.sh // DO NOT MODIFY THIS FILE DIRECTLY /* * This file provoides atomic operations with explicit instrumentation (e.g. * KASAN, KCSAN), which should be used unless it is necessary to avoid * instrumentation. Where it is necessary to aovid instrumenation, the * raw_atomic*() operations should be used. */ #ifndef _LINUX_ATOMIC_INSTRUMENTED_H #define _LINUX_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> #include <linux/compiler.h> #include <linux/instrumented.h> /** * atomic_read() - atomic load with relaxed ordering * @v: pointer to atomic_t * * Atomically loads the value of @v with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_read() there. * * Return: The value loaded from @v. */ static __always_inline int atomic_read(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic_read(v); } /** * atomic_read_acquire() - atomic load with acquire ordering * @v: pointer to atomic_t * * Atomically loads the value of @v with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_read_acquire() there. * * Return: The value loaded from @v. */ static __always_inline int atomic_read_acquire(const atomic_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic_read_acquire(v); } /** * atomic_set() - atomic set with relaxed ordering * @v: pointer to atomic_t * @i: int value to assign * * Atomically sets @v to @i with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_set() there. * * Return: Nothing. */ static __always_inline void atomic_set(atomic_t *v, int i) { instrument_atomic_write(v, sizeof(*v)); raw_atomic_set(v, i); } /** * atomic_set_release() - atomic set with release ordering * @v: pointer to atomic_t * @i: int value to assign * * Atomically sets @v to @i with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_set_release() there. * * Return: Nothing. */ static __always_inline void atomic_set_release(atomic_t *v, int i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); raw_atomic_set_release(v, i); } /** * atomic_add() - atomic add with relaxed ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_add() there. * * Return: Nothing. */ static __always_inline void atomic_add(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_add(i, v); } /** * atomic_add_return() - atomic add with full ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_return() there. * * Return: The updated value of @v. */ static __always_inline int atomic_add_return(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_return(i, v); } /** * atomic_add_return_acquire() - atomic add with acquire ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_return_acquire(i, v); } /** * atomic_add_return_release() - atomic add with release ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_return_release() there. * * Return: The updated value of @v. */ static __always_inline int atomic_add_return_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_return_release(i, v); } /** * atomic_add_return_relaxed() - atomic add with relaxed ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_return_relaxed(i, v); } /** * atomic_fetch_add() - atomic add with full ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_add() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_add(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_add(i, v); } /** * atomic_fetch_add_acquire() - atomic add with acquire ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_add_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_add_acquire(i, v); } /** * atomic_fetch_add_release() - atomic add with release ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_add_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_add_release(i, v); } /** * atomic_fetch_add_relaxed() - atomic add with relaxed ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_add_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_add_relaxed(i, v); } /** * atomic_sub() - atomic subtract with relaxed ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub() there. * * Return: Nothing. */ static __always_inline void atomic_sub(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_sub(i, v); } /** * atomic_sub_return() - atomic subtract with full ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub_return() there. * * Return: The updated value of @v. */ static __always_inline int atomic_sub_return(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_sub_return(i, v); } /** * atomic_sub_return_acquire() - atomic subtract with acquire ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_sub_return_acquire(i, v); } /** * atomic_sub_return_release() - atomic subtract with release ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub_return_release() there. * * Return: The updated value of @v. */ static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_sub_return_release(i, v); } /** * atomic_sub_return_relaxed() - atomic subtract with relaxed ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_sub_return_relaxed(i, v); } /** * atomic_fetch_sub() - atomic subtract with full ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_sub() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_sub(i, v); } /** * atomic_fetch_sub_acquire() - atomic subtract with acquire ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_sub_acquire(i, v); } /** * atomic_fetch_sub_release() - atomic subtract with release ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_sub_release(i, v); } /** * atomic_fetch_sub_relaxed() - atomic subtract with relaxed ordering * @i: int value to subtract * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_sub_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_sub_relaxed(i, v); } /** * atomic_inc() - atomic increment with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc() there. * * Return: Nothing. */ static __always_inline void atomic_inc(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_inc(v); } /** * atomic_inc_return() - atomic increment with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc_return() there. * * Return: The updated value of @v. */ static __always_inline int atomic_inc_return(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_return(v); } /** * atomic_inc_return_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline int atomic_inc_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_return_acquire(v); } /** * atomic_inc_return_release() - atomic increment with release ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc_return_release() there. * * Return: The updated value of @v. */ static __always_inline int atomic_inc_return_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_return_release(v); } /** * atomic_inc_return_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_return_relaxed(v); } /** * atomic_fetch_inc() - atomic increment with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_inc() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_inc(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_inc(v); } /** * atomic_fetch_inc_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_inc_acquire(v); } /** * atomic_fetch_inc_release() - atomic increment with release ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_inc_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_inc_release(v); } /** * atomic_fetch_inc_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_inc_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_inc_relaxed(v); } /** * atomic_dec() - atomic decrement with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec() there. * * Return: Nothing. */ static __always_inline void atomic_dec(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_dec(v); } /** * atomic_dec_return() - atomic decrement with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec_return() there. * * Return: The updated value of @v. */ static __always_inline int atomic_dec_return(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_return(v); } /** * atomic_dec_return_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline int atomic_dec_return_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_return_acquire(v); } /** * atomic_dec_return_release() - atomic decrement with release ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec_return_release() there. * * Return: The updated value of @v. */ static __always_inline int atomic_dec_return_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_return_release(v); } /** * atomic_dec_return_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_return_relaxed(v); } /** * atomic_fetch_dec() - atomic decrement with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_dec() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_dec(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_dec(v); } /** * atomic_fetch_dec_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_dec_acquire(v); } /** * atomic_fetch_dec_release() - atomic decrement with release ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_dec_release(atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_dec_release(v); } /** * atomic_fetch_dec_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_dec_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_dec_relaxed(v); } /** * atomic_and() - atomic bitwise AND with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_and() there. * * Return: Nothing. */ static __always_inline void atomic_and(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_and(i, v); } /** * atomic_fetch_and() - atomic bitwise AND with full ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_and() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_and(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_and(i, v); } /** * atomic_fetch_and_acquire() - atomic bitwise AND with acquire ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_and_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_and_acquire(i, v); } /** * atomic_fetch_and_release() - atomic bitwise AND with release ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_and_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_and_release(i, v); } /** * atomic_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_and_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_and_relaxed(i, v); } /** * atomic_andnot() - atomic bitwise AND NOT with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_andnot() there. * * Return: Nothing. */ static __always_inline void atomic_andnot(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_andnot(i, v); } /** * atomic_fetch_andnot() - atomic bitwise AND NOT with full ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & ~@i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_andnot(i, v); } /** * atomic_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & ~@i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_andnot_acquire(i, v); } /** * atomic_fetch_andnot_release() - atomic bitwise AND NOT with release ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & ~@i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_andnot_release(i, v); } /** * atomic_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_andnot_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_andnot_relaxed(i, v); } /** * atomic_or() - atomic bitwise OR with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_or() there. * * Return: Nothing. */ static __always_inline void atomic_or(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_or(i, v); } /** * atomic_fetch_or() - atomic bitwise OR with full ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v | @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_or() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_or(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_or(i, v); } /** * atomic_fetch_or_acquire() - atomic bitwise OR with acquire ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v | @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_or_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_or_acquire(i, v); } /** * atomic_fetch_or_release() - atomic bitwise OR with release ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v | @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_or_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_or_release(i, v); } /** * atomic_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_or_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_or_relaxed(i, v); } /** * atomic_xor() - atomic bitwise XOR with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_xor() there. * * Return: Nothing. */ static __always_inline void atomic_xor(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_xor(i, v); } /** * atomic_fetch_xor() - atomic bitwise XOR with full ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v ^ @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_xor() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_xor(i, v); } /** * atomic_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v ^ @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_xor_acquire(i, v); } /** * atomic_fetch_xor_release() - atomic bitwise XOR with release ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v ^ @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_xor_release(i, v); } /** * atomic_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering * @i: int value * @v: pointer to atomic_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_fetch_xor_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_xor_relaxed(i, v); } /** * atomic_xchg() - atomic exchange with full ordering * @v: pointer to atomic_t * @new: int value to assign * * Atomically updates @v to @new with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_xchg() there. * * Return: The original value of @v. */ static __always_inline int atomic_xchg(atomic_t *v, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_xchg(v, new); } /** * atomic_xchg_acquire() - atomic exchange with acquire ordering * @v: pointer to atomic_t * @new: int value to assign * * Atomically updates @v to @new with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_xchg_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_xchg_acquire(atomic_t *v, int new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_xchg_acquire(v, new); } /** * atomic_xchg_release() - atomic exchange with release ordering * @v: pointer to atomic_t * @new: int value to assign * * Atomically updates @v to @new with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_xchg_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_xchg_release(atomic_t *v, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_xchg_release(v, new); } /** * atomic_xchg_relaxed() - atomic exchange with relaxed ordering * @v: pointer to atomic_t * @new: int value to assign * * Atomically updates @v to @new with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_xchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_xchg_relaxed(atomic_t *v, int new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_xchg_relaxed(v, new); } /** * atomic_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_t * @old: int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there. * * Return: The original value of @v. */ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_cmpxchg(v, old, new); } /** * atomic_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_t * @old: int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there. * * Return: The original value of @v. */ static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_cmpxchg_acquire(v, old, new); } /** * atomic_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_t * @old: int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there. * * Return: The original value of @v. */ static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_cmpxchg_release(v, old, new); } /** * atomic_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_t * @old: int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_cmpxchg_relaxed(v, old, new); } /** * atomic_try_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_t * @old: pointer to int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_try_cmpxchg(v, old, new); } /** * atomic_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_t * @old: pointer to int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_try_cmpxchg_acquire(v, old, new); } /** * atomic_try_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_t * @old: pointer to int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_try_cmpxchg_release(v, old, new); } /** * atomic_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_t * @old: pointer to int value to compare with * @new: int value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_try_cmpxchg_relaxed(v, old, new); } /** * atomic_sub_and_test() - atomic subtract and test if zero with full ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_sub_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_sub_and_test(i, v); } /** * atomic_dec_and_test() - atomic decrement and test if zero with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_dec_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_dec_and_test(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_and_test(v); } /** * atomic_inc_and_test() - atomic increment and test if zero with full ordering * @v: pointer to atomic_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_inc_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_inc_and_test(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_and_test(v); } /** * atomic_add_negative() - atomic add and test if negative with full ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_negative() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_add_negative(int i, atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_negative(i, v); } /** * atomic_add_negative_acquire() - atomic add and test if negative with acquire ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_negative_acquire() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_add_negative_acquire(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_negative_acquire(i, v); } /** * atomic_add_negative_release() - atomic add and test if negative with release ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_negative_release() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_add_negative_release(int i, atomic_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_negative_release(i, v); } /** * atomic_add_negative_relaxed() - atomic add and test if negative with relaxed ordering * @i: int value to add * @v: pointer to atomic_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_add_negative_relaxed() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_add_negative_relaxed(int i, atomic_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_negative_relaxed(i, v); } /** * atomic_fetch_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_t * @a: int value to add * @u: int value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there. * * Return: The original value of @v. */ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_fetch_add_unless(v, a, u); } /** * atomic_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_t * @a: int value to add * @u: int value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_add_unless() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_add_unless(v, a, u); } /** * atomic_inc_not_zero() - atomic increment unless zero with full ordering * @v: pointer to atomic_t * * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_inc_not_zero(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_not_zero(v); } /** * atomic_inc_unless_negative() - atomic increment unless negative with full ordering * @v: pointer to atomic_t * * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_inc_unless_negative(v); } /** * atomic_dec_unless_positive() - atomic decrement unless positive with full ordering * @v: pointer to atomic_t * * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_unless_positive(v); } /** * atomic_dec_if_positive() - atomic decrement if positive with full ordering * @v: pointer to atomic_t * * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there. * * Return: The old value of (@v - 1), regardless of whether @v was updated. */ static __always_inline int atomic_dec_if_positive(atomic_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_dec_if_positive(v); } /** * atomic64_read() - atomic load with relaxed ordering * @v: pointer to atomic64_t * * Atomically loads the value of @v with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_read() there. * * Return: The value loaded from @v. */ static __always_inline s64 atomic64_read(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic64_read(v); } /** * atomic64_read_acquire() - atomic load with acquire ordering * @v: pointer to atomic64_t * * Atomically loads the value of @v with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_read_acquire() there. * * Return: The value loaded from @v. */ static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic64_read_acquire(v); } /** * atomic64_set() - atomic set with relaxed ordering * @v: pointer to atomic64_t * @i: s64 value to assign * * Atomically sets @v to @i with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_set() there. * * Return: Nothing. */ static __always_inline void atomic64_set(atomic64_t *v, s64 i) { instrument_atomic_write(v, sizeof(*v)); raw_atomic64_set(v, i); } /** * atomic64_set_release() - atomic set with release ordering * @v: pointer to atomic64_t * @i: s64 value to assign * * Atomically sets @v to @i with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_set_release() there. * * Return: Nothing. */ static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); raw_atomic64_set_release(v, i); } /** * atomic64_add() - atomic add with relaxed ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add() there. * * Return: Nothing. */ static __always_inline void atomic64_add(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_add(i, v); } /** * atomic64_add_return() - atomic add with full ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_return() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_return(i, v); } /** * atomic64_add_return_acquire() - atomic add with acquire ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_return_acquire(i, v); } /** * atomic64_add_return_release() - atomic add with release ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_return_release() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_return_release(i, v); } /** * atomic64_add_return_relaxed() - atomic add with relaxed ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_return_relaxed(i, v); } /** * atomic64_fetch_add() - atomic add with full ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_add() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_add(i, v); } /** * atomic64_fetch_add_acquire() - atomic add with acquire ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_add_acquire(i, v); } /** * atomic64_fetch_add_release() - atomic add with release ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_add_release(i, v); } /** * atomic64_fetch_add_relaxed() - atomic add with relaxed ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_add_relaxed(i, v); } /** * atomic64_sub() - atomic subtract with relaxed ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub() there. * * Return: Nothing. */ static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_sub(i, v); } /** * atomic64_sub_return() - atomic subtract with full ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub_return() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_sub_return(i, v); } /** * atomic64_sub_return_acquire() - atomic subtract with acquire ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_sub_return_acquire(i, v); } /** * atomic64_sub_return_release() - atomic subtract with release ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub_return_release() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_sub_return_release(i, v); } /** * atomic64_sub_return_relaxed() - atomic subtract with relaxed ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_sub_return_relaxed(i, v); } /** * atomic64_fetch_sub() - atomic subtract with full ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_sub(i, v); } /** * atomic64_fetch_sub_acquire() - atomic subtract with acquire ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_sub_acquire(i, v); } /** * atomic64_fetch_sub_release() - atomic subtract with release ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_sub_release(i, v); } /** * atomic64_fetch_sub_relaxed() - atomic subtract with relaxed ordering * @i: s64 value to subtract * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_sub_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_sub_relaxed(i, v); } /** * atomic64_inc() - atomic increment with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc() there. * * Return: Nothing. */ static __always_inline void atomic64_inc(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_inc(v); } /** * atomic64_inc_return() - atomic increment with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc_return() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_inc_return(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_return(v); } /** * atomic64_inc_return_acquire() - atomic increment with acquire ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_return_acquire(v); } /** * atomic64_inc_return_release() - atomic increment with release ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc_return_release() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_return_release(v); } /** * atomic64_inc_return_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_return_relaxed(v); } /** * atomic64_fetch_inc() - atomic increment with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_inc(v); } /** * atomic64_fetch_inc_acquire() - atomic increment with acquire ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_inc_acquire(v); } /** * atomic64_fetch_inc_release() - atomic increment with release ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_inc_release(v); } /** * atomic64_fetch_inc_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_inc_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_inc_relaxed(v); } /** * atomic64_dec() - atomic decrement with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec() there. * * Return: Nothing. */ static __always_inline void atomic64_dec(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_dec(v); } /** * atomic64_dec_return() - atomic decrement with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec_return() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_dec_return(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_return(v); } /** * atomic64_dec_return_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_return_acquire(v); } /** * atomic64_dec_return_release() - atomic decrement with release ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec_return_release() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_return_release(v); } /** * atomic64_dec_return_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_return_relaxed(v); } /** * atomic64_fetch_dec() - atomic decrement with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_dec(v); } /** * atomic64_fetch_dec_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_dec_acquire(v); } /** * atomic64_fetch_dec_release() - atomic decrement with release ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_dec_release(v); } /** * atomic64_fetch_dec_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_dec_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_dec_relaxed(v); } /** * atomic64_and() - atomic bitwise AND with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_and() there. * * Return: Nothing. */ static __always_inline void atomic64_and(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_and(i, v); } /** * atomic64_fetch_and() - atomic bitwise AND with full ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_and() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_and(i, v); } /** * atomic64_fetch_and_acquire() - atomic bitwise AND with acquire ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_and_acquire(i, v); } /** * atomic64_fetch_and_release() - atomic bitwise AND with release ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_and_release(i, v); } /** * atomic64_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_and_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_and_relaxed(i, v); } /** * atomic64_andnot() - atomic bitwise AND NOT with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_andnot() there. * * Return: Nothing. */ static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_andnot(i, v); } /** * atomic64_fetch_andnot() - atomic bitwise AND NOT with full ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & ~@i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_andnot(i, v); } /** * atomic64_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & ~@i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_andnot_acquire(i, v); } /** * atomic64_fetch_andnot_release() - atomic bitwise AND NOT with release ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & ~@i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_andnot_release(i, v); } /** * atomic64_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_andnot_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_andnot_relaxed(i, v); } /** * atomic64_or() - atomic bitwise OR with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_or() there. * * Return: Nothing. */ static __always_inline void atomic64_or(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_or(i, v); } /** * atomic64_fetch_or() - atomic bitwise OR with full ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v | @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_or() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_or(i, v); } /** * atomic64_fetch_or_acquire() - atomic bitwise OR with acquire ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v | @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_or_acquire(i, v); } /** * atomic64_fetch_or_release() - atomic bitwise OR with release ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v | @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_or_release(i, v); } /** * atomic64_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_or_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_or_relaxed(i, v); } /** * atomic64_xor() - atomic bitwise XOR with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_xor() there. * * Return: Nothing. */ static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic64_xor(i, v); } /** * atomic64_fetch_xor() - atomic bitwise XOR with full ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v ^ @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_xor(i, v); } /** * atomic64_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v ^ @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_xor_acquire(i, v); } /** * atomic64_fetch_xor_release() - atomic bitwise XOR with release ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v ^ @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_xor_release(i, v); } /** * atomic64_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering * @i: s64 value * @v: pointer to atomic64_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_xor_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_xor_relaxed(i, v); } /** * atomic64_xchg() - atomic exchange with full ordering * @v: pointer to atomic64_t * @new: s64 value to assign * * Atomically updates @v to @new with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_xchg() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_xchg(v, new); } /** * atomic64_xchg_acquire() - atomic exchange with acquire ordering * @v: pointer to atomic64_t * @new: s64 value to assign * * Atomically updates @v to @new with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_xchg_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_xchg_acquire(v, new); } /** * atomic64_xchg_release() - atomic exchange with release ordering * @v: pointer to atomic64_t * @new: s64 value to assign * * Atomically updates @v to @new with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_xchg_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_xchg_release(v, new); } /** * atomic64_xchg_relaxed() - atomic exchange with relaxed ordering * @v: pointer to atomic64_t * @new: s64 value to assign * * Atomically updates @v to @new with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_xchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_xchg_relaxed(v, new); } /** * atomic64_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic64_t * @old: s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_cmpxchg(v, old, new); } /** * atomic64_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic64_t * @old: s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_cmpxchg_acquire(v, old, new); } /** * atomic64_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic64_t * @old: s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_cmpxchg_release(v, old, new); } /** * atomic64_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic64_t * @old: s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_cmpxchg_relaxed(v, old, new); } /** * atomic64_try_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic64_t * @old: pointer to s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic64_try_cmpxchg(v, old, new); } /** * atomic64_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic64_t * @old: pointer to s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic64_try_cmpxchg_acquire(v, old, new); } /** * atomic64_try_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic64_t * @old: pointer to s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic64_try_cmpxchg_release(v, old, new); } /** * atomic64_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic64_t * @old: pointer to s64 value to compare with * @new: s64 value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic64_try_cmpxchg_relaxed(v, old, new); } /** * atomic64_sub_and_test() - atomic subtract and test if zero with full ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_sub_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_sub_and_test(i, v); } /** * atomic64_dec_and_test() - atomic decrement and test if zero with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_dec_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_and_test(v); } /** * atomic64_inc_and_test() - atomic increment and test if zero with full ordering * @v: pointer to atomic64_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_inc_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_and_test(v); } /** * atomic64_add_negative() - atomic add and test if negative with full ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_negative() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_negative(i, v); } /** * atomic64_add_negative_acquire() - atomic add and test if negative with acquire ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_negative_acquire() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic64_add_negative_acquire(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_negative_acquire(i, v); } /** * atomic64_add_negative_release() - atomic add and test if negative with release ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_negative_release() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic64_add_negative_release(s64 i, atomic64_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_negative_release(i, v); } /** * atomic64_add_negative_relaxed() - atomic add and test if negative with relaxed ordering * @i: s64 value to add * @v: pointer to atomic64_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic64_add_negative_relaxed() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic64_add_negative_relaxed(s64 i, atomic64_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_negative_relaxed(i, v); } /** * atomic64_fetch_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic64_t * @a: s64 value to add * @u: s64 value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there. * * Return: The original value of @v. */ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_fetch_add_unless(v, a, u); } /** * atomic64_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic64_t * @a: s64 value to add * @u: s64 value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_add_unless() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_add_unless(v, a, u); } /** * atomic64_inc_not_zero() - atomic increment unless zero with full ordering * @v: pointer to atomic64_t * * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_not_zero(v); } /** * atomic64_inc_unless_negative() - atomic increment unless negative with full ordering * @v: pointer to atomic64_t * * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_inc_unless_negative(v); } /** * atomic64_dec_unless_positive() - atomic decrement unless positive with full ordering * @v: pointer to atomic64_t * * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_unless_positive(v); } /** * atomic64_dec_if_positive() - atomic decrement if positive with full ordering * @v: pointer to atomic64_t * * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there. * * Return: The old value of (@v - 1), regardless of whether @v was updated. */ static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic64_dec_if_positive(v); } /** * atomic_long_read() - atomic load with relaxed ordering * @v: pointer to atomic_long_t * * Atomically loads the value of @v with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_read() there. * * Return: The value loaded from @v. */ static __always_inline long atomic_long_read(const atomic_long_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic_long_read(v); } /** * atomic_long_read_acquire() - atomic load with acquire ordering * @v: pointer to atomic_long_t * * Atomically loads the value of @v with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_read_acquire() there. * * Return: The value loaded from @v. */ static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { instrument_atomic_read(v, sizeof(*v)); return raw_atomic_long_read_acquire(v); } /** * atomic_long_set() - atomic set with relaxed ordering * @v: pointer to atomic_long_t * @i: long value to assign * * Atomically sets @v to @i with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_set() there. * * Return: Nothing. */ static __always_inline void atomic_long_set(atomic_long_t *v, long i) { instrument_atomic_write(v, sizeof(*v)); raw_atomic_long_set(v, i); } /** * atomic_long_set_release() - atomic set with release ordering * @v: pointer to atomic_long_t * @i: long value to assign * * Atomically sets @v to @i with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_set_release() there. * * Return: Nothing. */ static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { kcsan_release(); instrument_atomic_write(v, sizeof(*v)); raw_atomic_long_set_release(v, i); } /** * atomic_long_add() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add() there. * * Return: Nothing. */ static __always_inline void atomic_long_add(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_add(i, v); } /** * atomic_long_add_return() - atomic add with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_return() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_return(i, v); } /** * atomic_long_add_return_acquire() - atomic add with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_return_acquire(i, v); } /** * atomic_long_add_return_release() - atomic add with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_return_release() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_return_release(i, v); } /** * atomic_long_add_return_relaxed() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_return_relaxed(i, v); } /** * atomic_long_fetch_add() - atomic add with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_add(i, v); } /** * atomic_long_fetch_add_acquire() - atomic add with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_add_acquire(i, v); } /** * atomic_long_fetch_add_release() - atomic add with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_add_release(i, v); } /** * atomic_long_fetch_add_relaxed() - atomic add with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_add_relaxed(i, v); } /** * atomic_long_sub() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub() there. * * Return: Nothing. */ static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_sub(i, v); } /** * atomic_long_sub_return() - atomic subtract with full ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub_return() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_sub_return(i, v); } /** * atomic_long_sub_return_acquire() - atomic subtract with acquire ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_sub_return_acquire(i, v); } /** * atomic_long_sub_return_release() - atomic subtract with release ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_release() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_sub_return_release(i, v); } /** * atomic_long_sub_return_relaxed() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_sub_return_relaxed(i, v); } /** * atomic_long_fetch_sub() - atomic subtract with full ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_sub(i, v); } /** * atomic_long_fetch_sub_acquire() - atomic subtract with acquire ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_sub_acquire(i, v); } /** * atomic_long_fetch_sub_release() - atomic subtract with release ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_sub_release(i, v); } /** * atomic_long_fetch_sub_relaxed() - atomic subtract with relaxed ordering * @i: long value to subtract * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_sub_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_sub_relaxed(i, v); } /** * atomic_long_inc() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc() there. * * Return: Nothing. */ static __always_inline void atomic_long_inc(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_inc(v); } /** * atomic_long_inc_return() - atomic increment with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_return() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_inc_return(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_return(v); } /** * atomic_long_inc_return_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_return_acquire(v); } /** * atomic_long_inc_return_release() - atomic increment with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_release() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_return_release(v); } /** * atomic_long_inc_return_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_return_relaxed(v); } /** * atomic_long_fetch_inc() - atomic increment with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_inc(v); } /** * atomic_long_fetch_inc_acquire() - atomic increment with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_inc_acquire(v); } /** * atomic_long_fetch_inc_release() - atomic increment with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_inc_release(v); } /** * atomic_long_fetch_inc_relaxed() - atomic increment with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_inc_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_inc_relaxed(v); } /** * atomic_long_dec() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec() there. * * Return: Nothing. */ static __always_inline void atomic_long_dec(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_dec(v); } /** * atomic_long_dec_return() - atomic decrement with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_return() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_dec_return(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_return(v); } /** * atomic_long_dec_return_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_acquire() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_return_acquire(v); } /** * atomic_long_dec_return_release() - atomic decrement with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_release() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_return_release(v); } /** * atomic_long_dec_return_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_return_relaxed() there. * * Return: The updated value of @v. */ static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_return_relaxed(v); } /** * atomic_long_fetch_dec() - atomic decrement with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_dec(v); } /** * atomic_long_fetch_dec_acquire() - atomic decrement with acquire ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_dec_acquire(v); } /** * atomic_long_fetch_dec_release() - atomic decrement with release ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_dec_release(v); } /** * atomic_long_fetch_dec_relaxed() - atomic decrement with relaxed ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_dec_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_dec_relaxed(v); } /** * atomic_long_and() - atomic bitwise AND with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_and() there. * * Return: Nothing. */ static __always_inline void atomic_long_and(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_and(i, v); } /** * atomic_long_fetch_and() - atomic bitwise AND with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_and(i, v); } /** * atomic_long_fetch_and_acquire() - atomic bitwise AND with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_and_acquire(i, v); } /** * atomic_long_fetch_and_release() - atomic bitwise AND with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_and_release(i, v); } /** * atomic_long_fetch_and_relaxed() - atomic bitwise AND with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_and_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_and_relaxed(i, v); } /** * atomic_long_andnot() - atomic bitwise AND NOT with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_andnot() there. * * Return: Nothing. */ static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_andnot(i, v); } /** * atomic_long_fetch_andnot() - atomic bitwise AND NOT with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_andnot(i, v); } /** * atomic_long_fetch_andnot_acquire() - atomic bitwise AND NOT with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_andnot_acquire(i, v); } /** * atomic_long_fetch_andnot_release() - atomic bitwise AND NOT with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_andnot_release(i, v); } /** * atomic_long_fetch_andnot_relaxed() - atomic bitwise AND NOT with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v & ~@i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_andnot_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_andnot_relaxed(i, v); } /** * atomic_long_or() - atomic bitwise OR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_or() there. * * Return: Nothing. */ static __always_inline void atomic_long_or(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_or(i, v); } /** * atomic_long_fetch_or() - atomic bitwise OR with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_or(i, v); } /** * atomic_long_fetch_or_acquire() - atomic bitwise OR with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_or_acquire(i, v); } /** * atomic_long_fetch_or_release() - atomic bitwise OR with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_or_release(i, v); } /** * atomic_long_fetch_or_relaxed() - atomic bitwise OR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v | @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_or_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_or_relaxed(i, v); } /** * atomic_long_xor() - atomic bitwise XOR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_xor() there. * * Return: Nothing. */ static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); raw_atomic_long_xor(i, v); } /** * atomic_long_fetch_xor() - atomic bitwise XOR with full ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_xor(i, v); } /** * atomic_long_fetch_xor_acquire() - atomic bitwise XOR with acquire ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_xor_acquire(i, v); } /** * atomic_long_fetch_xor_release() - atomic bitwise XOR with release ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_xor_release(i, v); } /** * atomic_long_fetch_xor_relaxed() - atomic bitwise XOR with relaxed ordering * @i: long value * @v: pointer to atomic_long_t * * Atomically updates @v to (@v ^ @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_xor_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_xor_relaxed(i, v); } /** * atomic_long_xchg() - atomic exchange with full ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_xchg() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_xchg(atomic_long_t *v, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_xchg(v, new); } /** * atomic_long_xchg_acquire() - atomic exchange with acquire ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_xchg_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_xchg_acquire(v, new); } /** * atomic_long_xchg_release() - atomic exchange with release ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_xchg_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_xchg_release(v, new); } /** * atomic_long_xchg_relaxed() - atomic exchange with relaxed ordering * @v: pointer to atomic_long_t * @new: long value to assign * * Atomically updates @v to @new with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_xchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_xchg_relaxed(v, new); } /** * atomic_long_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_cmpxchg(v, old, new); } /** * atomic_long_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_cmpxchg_acquire(v, old, new); } /** * atomic_long_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_cmpxchg_release(v, old, new); } /** * atomic_long_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_long_t * @old: long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_cmpxchg_relaxed(v, old, new); } /** * atomic_long_try_cmpxchg() - atomic compare and exchange with full ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with full ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_long_try_cmpxchg(v, old, new); } /** * atomic_long_try_cmpxchg_acquire() - atomic compare and exchange with acquire ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with acquire ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_long_try_cmpxchg_acquire(v, old, new); } /** * atomic_long_try_cmpxchg_release() - atomic compare and exchange with release ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with release ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_long_try_cmpxchg_release(v, old, new); } /** * atomic_long_try_cmpxchg_relaxed() - atomic compare and exchange with relaxed ordering * @v: pointer to atomic_long_t * @old: pointer to long value to compare with * @new: long value to assign * * If (@v == @old), atomically updates @v to @new with relaxed ordering. * Otherwise, @v is not modified, @old is updated to the current value of @v, * and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there. * * Return: @true if the exchange occured, @false otherwise. */ static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { instrument_atomic_read_write(v, sizeof(*v)); instrument_atomic_read_write(old, sizeof(*old)); return raw_atomic_long_try_cmpxchg_relaxed(v, old, new); } /** * atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_sub_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_sub_and_test(i, v); } /** * atomic_long_dec_and_test() - atomic decrement and test if zero with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v - 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_and_test(v); } /** * atomic_long_inc_and_test() - atomic increment and test if zero with full ordering * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + 1) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_and_test() there. * * Return: @true if the resulting value of @v is zero, @false otherwise. */ static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_and_test(v); } /** * atomic_long_add_negative() - atomic add and test if negative with full ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with full ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_negative() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_negative(i, v); } /** * atomic_long_add_negative_acquire() - atomic add and test if negative with acquire ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with acquire ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_acquire() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_long_add_negative_acquire(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_negative_acquire(i, v); } /** * atomic_long_add_negative_release() - atomic add and test if negative with release ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with release ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_release() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_long_add_negative_release(long i, atomic_long_t *v) { kcsan_release(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_negative_release(i, v); } /** * atomic_long_add_negative_relaxed() - atomic add and test if negative with relaxed ordering * @i: long value to add * @v: pointer to atomic_long_t * * Atomically updates @v to (@v + @i) with relaxed ordering. * * Unsafe to use in noinstr code; use raw_atomic_long_add_negative_relaxed() there. * * Return: @true if the resulting value of @v is negative, @false otherwise. */ static __always_inline bool atomic_long_add_negative_relaxed(long i, atomic_long_t *v) { instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_negative_relaxed(i, v); } /** * atomic_long_fetch_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_long_t * @a: long value to add * @u: long value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there. * * Return: The original value of @v. */ static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_fetch_add_unless(v, a, u); } /** * atomic_long_add_unless() - atomic add unless value with full ordering * @v: pointer to atomic_long_t * @a: long value to add * @u: long value to compare with * * If (@v != @u), atomically updates @v to (@v + @a) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_add_unless(v, a, u); } /** * atomic_long_inc_not_zero() - atomic increment unless zero with full ordering * @v: pointer to atomic_long_t * * If (@v != 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_not_zero(v); } /** * atomic_long_inc_unless_negative() - atomic increment unless negative with full ordering * @v: pointer to atomic_long_t * * If (@v >= 0), atomically updates @v to (@v + 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_inc_unless_negative(v); } /** * atomic_long_dec_unless_positive() - atomic decrement unless positive with full ordering * @v: pointer to atomic_long_t * * If (@v <= 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there. * * Return: @true if @v was updated, @false otherwise. */ static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_unless_positive(v); } /** * atomic_long_dec_if_positive() - atomic decrement if positive with full ordering * @v: pointer to atomic_long_t * * If (@v > 0), atomically updates @v to (@v - 1) with full ordering. * Otherwise, @v is not modified and relaxed ordering is provided. * * Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there. * * Return: The old value of (@v - 1), regardless of whether @v was updated. */ static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { kcsan_mb(); instrument_atomic_read_write(v, sizeof(*v)); return raw_atomic_long_dec_if_positive(v); } #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_xchg(__ai_ptr, __VA_ARGS__); \ }) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg128(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg128(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg128_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg128_acquire(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg128_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg128_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg128_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg128_relaxed(__ai_ptr, __VA_ARGS__); \ }) #define try_cmpxchg(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg128(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg128(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg128_acquire(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg128_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg128_release(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_release(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg128_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg128_relaxed(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg128_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg128_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_cmpxchg128_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define try_cmpxchg_local(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg64_local(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define try_cmpxchg128_local(ptr, oldp, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define sync_try_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ // ce5b65e0f1f8a276268b667194581d24bed219d4
31 4 3 2 1 18 1 2 11 9 6 1 1 1 1 1 1 2 1 1 9 6 3 7 6 1 1 1 49 14 35 2 6 2 3 11 1 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <linux/dccp.h> #include <linux/sctp.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/tcp.h> struct nft_exthdr { u8 type; u8 offset; u8 len; u8 op; u8 dreg; u8 sreg; u8 flags; }; static unsigned int optlen(const u8 *opt, unsigned int offset) { /* Beware zero-length options: make finite progress */ if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0) return 1; else return opt[offset + 1]; } static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len) { if (len % NFT_REG32_SIZE) dest[len / NFT_REG32_SIZE] = 0; return skb_copy_bits(skb, offset, dest, len); } static void nft_exthdr_ipv6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; unsigned int offset = 0; int err; if (pkt->skb->protocol != htons(ETH_P_IPV6)) goto err; err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); if (priv->flags & NFT_EXTHDR_F_PRESENT) { nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; } offset += priv->offset; if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) goto err; return; err: regs->verdict.code = NFT_BREAK; } /* find the offset to specified option. * * If target header is found, its offset is set in *offset and return option * number. Otherwise, return negative error. * * If the first fragment doesn't contain the End of Options it is considered * invalid. */ static int ipv4_find_option(struct net *net, struct sk_buff *skb, unsigned int *offset, int target) { unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; struct iphdr *iph, _iph; unsigned int start; bool found = false; __be32 info; int optlen; iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!iph) return -EBADMSG; start = sizeof(struct iphdr); optlen = iph->ihl * 4 - (int)sizeof(struct iphdr); if (optlen <= 0) return -ENOENT; memset(opt, 0, sizeof(struct ip_options)); /* Copy the options since __ip_options_compile() modifies * the options. */ if (skb_copy_bits(skb, start, opt->__data, optlen)) return -EBADMSG; opt->optlen = optlen; if (__ip_options_compile(net, opt, NULL, &info)) return -EBADMSG; switch (target) { case IPOPT_SSRR: case IPOPT_LSRR: if (!opt->srr) break; found = target == IPOPT_SSRR ? opt->is_strictroute : !opt->is_strictroute; if (found) *offset = opt->srr + start; break; case IPOPT_RR: if (!opt->rr) break; *offset = opt->rr + start; found = true; break; case IPOPT_RA: if (!opt->router_alert) break; *offset = opt->router_alert + start; found = true; break; default: return -EOPNOTSUPP; } return found ? target : -ENOENT; } static void nft_exthdr_ipv4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; struct sk_buff *skb = pkt->skb; unsigned int offset; int err; if (skb->protocol != htons(ETH_P_IP)) goto err; err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type); if (priv->flags & NFT_EXTHDR_F_PRESENT) { nft_reg_store8(dest, err >= 0); return; } else if (err < 0) { goto err; } offset += priv->offset; if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0) goto err; return; err: regs->verdict.code = NFT_BREAK; } static void * nft_tcp_header_pointer(const struct nft_pktinfo *pkt, unsigned int len, void *buffer, unsigned int *tcphdr_len) { struct tcphdr *tcph; if (pkt->tprot != IPPROTO_TCP || pkt->fragoff) return NULL; tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer); if (!tcph) return NULL; *tcphdr_len = __tcp_hdrlen(tcph); if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len) return NULL; return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer); } static void nft_exthdr_tcp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; struct nft_exthdr *priv = nft_expr_priv(expr); unsigned int i, optl, tcphdr_len, offset; u32 *dest = &regs->data[priv->dreg]; struct tcphdr *tcph; u8 *opt; tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) goto err; opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { optl = optlen(opt, i); if (priv->type != opt[i]) continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) goto err; offset = i + priv->offset; if (priv->flags & NFT_EXTHDR_F_PRESENT) { nft_reg_store8(dest, 1); } else { if (priv->len % NFT_REG32_SIZE) dest[priv->len / NFT_REG32_SIZE] = 0; memcpy(dest, opt + offset, priv->len); } return; } err: if (priv->flags & NFT_EXTHDR_F_PRESENT) *dest = 0; else regs->verdict.code = NFT_BREAK; } static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; struct nft_exthdr *priv = nft_expr_priv(expr); unsigned int i, optl, tcphdr_len, offset; struct tcphdr *tcph; u8 *opt; tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) goto err; if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len)) goto err; tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt)); opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { union { __be16 v16; __be32 v32; } old, new; optl = optlen(opt, i); if (priv->type != opt[i]) continue; if (i + optl > tcphdr_len || priv->len + priv->offset > optl) goto err; offset = i + priv->offset; switch (priv->len) { case 2: old.v16 = (__force __be16)get_unaligned((u16 *)(opt + offset)); new.v16 = (__force __be16)nft_reg_load16( &regs->data[priv->sreg]); switch (priv->type) { case TCPOPT_MSS: /* increase can cause connection to stall */ if (ntohs(old.v16) <= ntohs(new.v16)) return; break; } if (old.v16 == new.v16) return; put_unaligned(new.v16, (__be16*)(opt + offset)); inet_proto_csum_replace2(&tcph->check, pkt->skb, old.v16, new.v16, false); break; case 4: new.v32 = nft_reg_load_be32(&regs->data[priv->sreg]); old.v32 = (__force __be32)get_unaligned((u32 *)(opt + offset)); if (old.v32 == new.v32) return; put_unaligned(new.v32, (__be32*)(opt + offset)); inet_proto_csum_replace4(&tcph->check, pkt->skb, old.v32, new.v32, false); break; default: WARN_ON_ONCE(1); break; } return; } return; err: regs->verdict.code = NFT_BREAK; } static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; struct nft_exthdr *priv = nft_expr_priv(expr); unsigned int i, tcphdr_len, optl; struct tcphdr *tcph; u8 *opt; tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) goto err; if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len)) goto drop; tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt)); opt = (u8 *)tcph; for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { unsigned int j; optl = optlen(opt, i); if (priv->type != opt[i]) continue; if (i + optl > tcphdr_len) goto drop; for (j = 0; j < optl; ++j) { u16 n = TCPOPT_NOP; u16 o = opt[i+j]; if ((i + j) % 2 == 0) { o <<= 8; n <<= 8; } inet_proto_csum_replace2(&tcph->check, pkt->skb, htons(o), htons(n), false); } memset(opt + i, TCPOPT_NOP, optl); return; } /* option not found, continue. This allows to do multiple * option removals per rule. */ return; err: regs->verdict.code = NFT_BREAK; return; drop: /* can't remove, no choice but to drop */ regs->verdict.code = NF_DROP; } static void nft_exthdr_sctp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr); struct nft_exthdr *priv = nft_expr_priv(expr); u32 *dest = &regs->data[priv->dreg]; const struct sctp_chunkhdr *sch; struct sctp_chunkhdr _sch; if (pkt->tprot != IPPROTO_SCTP) goto err; do { sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch); if (!sch || !sch->length) break; if (sch->type == priv->type) { if (priv->flags & NFT_EXTHDR_F_PRESENT) { nft_reg_store8(dest, true); return; } if (priv->offset + priv->len > ntohs(sch->length) || offset + ntohs(sch->length) > pkt->skb->len) break; if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset, dest, priv->len) < 0) break; return; } offset += SCTP_PAD4(ntohs(sch->length)); } while (offset < pkt->skb->len); err: if (priv->flags & NFT_EXTHDR_F_PRESENT) nft_reg_store8(dest, false); else regs->verdict.code = NFT_BREAK; } static void nft_exthdr_dccp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_exthdr *priv = nft_expr_priv(expr); unsigned int thoff, dataoff, optoff, optlen, i; u32 *dest = &regs->data[priv->dreg]; const struct dccp_hdr *dh; struct dccp_hdr _dh; if (pkt->tprot != IPPROTO_DCCP || pkt->fragoff) goto err; thoff = nft_thoff(pkt); dh = skb_header_pointer(pkt->skb, thoff, sizeof(_dh), &_dh); if (!dh) goto err; dataoff = dh->dccph_doff * sizeof(u32); optoff = __dccp_hdr_len(dh); if (dataoff <= optoff) goto err; optlen = dataoff - optoff; for (i = 0; i < optlen; ) { /* Options 0 (DCCPO_PADDING) - 31 (DCCPO_MAX_RESERVED) are 1B in * the length; the remaining options are at least 2B long. In * all cases, the first byte contains the option type. In * multi-byte options, the second byte contains the option * length, which must be at least two: 1 for the type plus 1 for * the length plus 0-253 for any following option data. We * aren't interested in the option data, only the type and the * length, so we don't need to read more than two bytes at a * time. */ unsigned int buflen = optlen - i; u8 buf[2], *bufp; u8 type, len; if (buflen > sizeof(buf)) buflen = sizeof(buf); bufp = skb_header_pointer(pkt->skb, thoff + optoff + i, buflen, &buf); if (!bufp) goto err; type = bufp[0]; if (type == priv->type) { nft_reg_store8(dest, 1); return; } if (type <= DCCPO_MAX_RESERVED) { i++; continue; } if (buflen < 2) goto err; len = bufp[1]; if (len < 2) goto err; i += len; } err: *dest = 0; } static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, [NFTA_EXTHDR_LEN] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, [NFTA_EXTHDR_OP] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_EXTHDR_SREG] = { .type = NLA_U32 }, }; static int nft_exthdr_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6; int err; if (!tb[NFTA_EXTHDR_DREG] || !tb[NFTA_EXTHDR_TYPE] || !tb[NFTA_EXTHDR_OFFSET] || !tb[NFTA_EXTHDR_LEN]) return -EINVAL; err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset); if (err < 0) return err; err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len); if (err < 0) return err; if (tb[NFTA_EXTHDR_FLAGS]) { err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags); if (err < 0) return err; if (flags & ~NFT_EXTHDR_F_PRESENT) return -EINVAL; } if (tb[NFTA_EXTHDR_OP]) { err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op); if (err < 0) return err; } priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; priv->flags = flags; priv->op = op; return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, priv->len); } static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6; int err; if (!tb[NFTA_EXTHDR_SREG] || !tb[NFTA_EXTHDR_TYPE] || !tb[NFTA_EXTHDR_OFFSET] || !tb[NFTA_EXTHDR_LEN]) return -EINVAL; if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS]) return -EINVAL; err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset); if (err < 0) return err; err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len); if (err < 0) return err; if (offset < 2) return -EOPNOTSUPP; switch (len) { case 2: break; case 4: break; default: return -EOPNOTSUPP; } err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op); if (err < 0) return err; priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->offset = offset; priv->len = len; priv->flags = flags; priv->op = op; return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg, priv->len); } static int nft_exthdr_tcp_strip_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); if (tb[NFTA_EXTHDR_SREG] || tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS] || tb[NFTA_EXTHDR_OFFSET] || tb[NFTA_EXTHDR_LEN]) return -EINVAL; if (!tb[NFTA_EXTHDR_TYPE]) return -EINVAL; priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); priv->op = NFT_EXTHDR_OP_TCPOPT; return 0; } static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); int err = nft_exthdr_init(ctx, expr, tb); if (err < 0) return err; switch (priv->type) { case IPOPT_SSRR: case IPOPT_LSRR: case IPOPT_RR: case IPOPT_RA: break; default: return -EOPNOTSUPP; } return 0; } static int nft_exthdr_dccp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_exthdr *priv = nft_expr_priv(expr); int err = nft_exthdr_init(ctx, expr, tb); if (err < 0) return err; if (!(priv->flags & NFT_EXTHDR_F_PRESENT)) return -EOPNOTSUPP; return 0; } static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv) { if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags))) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_exthdr *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg)) return -1; return nft_exthdr_dump_common(skb, priv); } static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_exthdr *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg)) return -1; return nft_exthdr_dump_common(skb, priv); } static int nft_exthdr_dump_strip(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_exthdr *priv = nft_expr_priv(expr); return nft_exthdr_dump_common(skb, priv); } static bool nft_exthdr_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_exthdr *priv = nft_expr_priv(expr); const struct nft_exthdr *exthdr; if (!nft_reg_track_cmp(track, expr, priv->dreg)) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } exthdr = nft_expr_priv(track->regs[priv->dreg].selector); if (priv->type != exthdr->type || priv->op != exthdr->op || priv->flags != exthdr->flags || priv->offset != exthdr->offset || priv->len != exthdr->len) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } if (!track->regs[priv->dreg].bitwise) return true; return nft_expr_reduce_bitwise(track, expr); } static const struct nft_expr_ops nft_exthdr_ipv6_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_ipv6_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, .reduce = nft_exthdr_reduce, }; static const struct nft_expr_ops nft_exthdr_ipv4_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_ipv4_eval, .init = nft_exthdr_ipv4_init, .dump = nft_exthdr_dump, .reduce = nft_exthdr_reduce, }; static const struct nft_expr_ops nft_exthdr_tcp_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_tcp_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, .reduce = nft_exthdr_reduce, }; static const struct nft_expr_ops nft_exthdr_tcp_set_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_tcp_set_eval, .init = nft_exthdr_tcp_set_init, .dump = nft_exthdr_dump_set, .reduce = NFT_REDUCE_READONLY, }; static const struct nft_expr_ops nft_exthdr_tcp_strip_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_tcp_strip_eval, .init = nft_exthdr_tcp_strip_init, .dump = nft_exthdr_dump_strip, .reduce = NFT_REDUCE_READONLY, }; static const struct nft_expr_ops nft_exthdr_sctp_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_sctp_eval, .init = nft_exthdr_init, .dump = nft_exthdr_dump, .reduce = nft_exthdr_reduce, }; static const struct nft_expr_ops nft_exthdr_dccp_ops = { .type = &nft_exthdr_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), .eval = nft_exthdr_dccp_eval, .init = nft_exthdr_dccp_init, .dump = nft_exthdr_dump, .reduce = nft_exthdr_reduce, }; static const struct nft_expr_ops * nft_exthdr_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { u32 op; if (!tb[NFTA_EXTHDR_OP]) return &nft_exthdr_ipv6_ops; if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG]) return ERR_PTR(-EOPNOTSUPP); op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP])); switch (op) { case NFT_EXTHDR_OP_TCPOPT: if (tb[NFTA_EXTHDR_SREG]) return &nft_exthdr_tcp_set_ops; if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_tcp_ops; return &nft_exthdr_tcp_strip_ops; case NFT_EXTHDR_OP_IPV6: if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_ipv6_ops; break; case NFT_EXTHDR_OP_IPV4: if (ctx->family != NFPROTO_IPV6) { if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_ipv4_ops; } break; case NFT_EXTHDR_OP_SCTP: if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_sctp_ops; break; case NFT_EXTHDR_OP_DCCP: if (tb[NFTA_EXTHDR_DREG]) return &nft_exthdr_dccp_ops; break; } return ERR_PTR(-EOPNOTSUPP); } struct nft_expr_type nft_exthdr_type __read_mostly = { .name = "exthdr", .select_ops = nft_exthdr_select_ops, .policy = nft_exthdr_policy, .maxattr = NFTA_EXTHDR_MAX, .owner = THIS_MODULE, };
12 13 15 7 22 15 7 22 22 17 22 1 16 22 17 12 7 11 12 1 1 1 1 6 6 6 17 8 7 5 5 17 3 8 5 1 1 12 5 6 1 12 5 7 17 5 5 5 5 5 5 5 5 5 5 5 5 5 7 10 15 15 17 17 17 3 17 3 17 17 17 17 1 16 7 9 16 16 16 16 16 17 1 1 1 17 30 25 20 4 3 55 2 3 5 56 62 62 61 50 6 4 48 4 16 2 26 7 2 15 67 58 58 9 48 6 5 3 1 1 1 2 1 1 15 4 11 10 4 5 4 1 1 4 10 10 10 1 10 10 10 10 10 10 9 5 5 1 1 4 12 130 5 13 119 2 12 68 3 13 58 12 142 80 9 13 1 23 221 15 6 10 273 11 10 15 12 231 11 182 24 10 138 6 16 120 127 1 2 6 11 3 2 3 30 30 13 6 11 11 16 2 1 1 1 72 58 14 4 10 10 2 2 2 1 1 1 1 2 1 1 2 1 1 25 26 83 1 3 2 3 2 1 1 3 2 4 4 4 5 19 17 9 10 3 9 5 5 1 2 2 7 5 1 1 2 4 4 3 1 2 1 3 3 2 1 1 1 83 3 4 83 25 81 81 81 81 74 7 79 17 65 3 62 5 59 5 50 12 2 50 48 1 129 129 12 117 10 106 100 2 95 2 4 1 91 42 42 7 48 1 6 1 7 1 1 1 2 1 1 2 2 1 1 1 1 1 1 13 2 2 33 15 1 17 9 8 13 2 14 12 11 1 1 1 9 10 1 35 1 1 1 32 11 1 20 1 1 1 2 1 1 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 /* Connection tracking via netlink socket. Allows for user space * protocol helpers and general trouble making from userspace. * * (C) 2001 by Jay Schulist <jschlst@samba.org> * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> * (C) 2003 by Patrick Mchardy <kaber@trash.net> * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org> * * Initial connection tracking via netlink development funded and * generally made possible by Network Robots, Inc. (www.networkrobots.com) * * Further development of this code funded by Astaro AG (http://www.astaro.com) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/rculist.h> #include <linux/rculist_nulls.h> #include <linux/types.h> #include <linux/timer.h> #include <linux/security.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/netlink.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/siphash.h> #include <linux/netfilter.h> #include <net/netlink.h> #include <net/sock.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_seqadj.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <net/netfilter/nf_conntrack_labels.h> #include <net/netfilter/nf_conntrack_synproxy.h> #if IS_ENABLED(CONFIG_NF_NAT) #include <net/netfilter/nf_nat.h> #include <net/netfilter/nf_nat_helper.h> #endif #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include "nf_internals.h" MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("List and change connection tracking table"); struct ctnetlink_list_dump_ctx { struct nf_conn *last; unsigned int cpu; bool done; }; static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO); if (!nest_parms) goto nla_put_failure; if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) goto nla_put_failure; if (likely(l4proto->tuple_to_nlattr)) ret = l4proto->tuple_to_nlattr(skb, tuple); nla_nest_end(skb, nest_parms); return ret; nla_put_failure: return -1; } static int ipv4_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) return -EMSGSIZE; return 0; } static int ipv6_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) return -EMSGSIZE; return 0; } static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { int ret = 0; struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, CTA_TUPLE_IP); if (!nest_parms) goto nla_put_failure; switch (tuple->src.l3num) { case NFPROTO_IPV4: ret = ipv4_tuple_to_nlattr(skb, tuple); break; case NFPROTO_IPV6: ret = ipv6_tuple_to_nlattr(skb, tuple); break; } nla_nest_end(skb, nest_parms); return ret; nla_put_failure: return -1; } static int ctnetlink_dump_tuples(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple) { const struct nf_conntrack_l4proto *l4proto; int ret; rcu_read_lock(); ret = ctnetlink_dump_tuples_ip(skb, tuple); if (ret >= 0) { l4proto = nf_ct_l4proto_find(tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); } rcu_read_unlock(); return ret; } static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, const struct nf_conntrack_zone *zone, int dir) { if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir) return 0; if (nla_put_be16(skb, attrtype, htons(zone->id))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct, bool skip_zero) { long timeout; if (nf_ct_is_confirmed(ct)) timeout = nf_ct_expires(ct) / HZ; else timeout = ct->timeout / HZ; if (skip_zero && timeout == 0) return 0; if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct, bool destroy) { const struct nf_conntrack_l4proto *l4proto; struct nlattr *nest_proto; int ret; l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); if (!l4proto->to_nlattr) return 0; nest_proto = nla_nest_start(skb, CTA_PROTOINFO); if (!nest_proto) goto nla_put_failure; ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy); nla_nest_end(skb, nest_proto); return ret; nla_put_failure: return -1; } static int ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_helper; const struct nf_conn_help *help = nfct_help(ct); struct nf_conntrack_helper *helper; if (!help) return 0; rcu_read_lock(); helper = rcu_dereference(help->helper); if (!helper) goto out; nest_helper = nla_nest_start(skb, CTA_HELP); if (!nest_helper) goto nla_put_failure; if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) goto nla_put_failure; if (helper->to_nlattr) helper->to_nlattr(skb, ct); nla_nest_end(skb, nest_helper); out: rcu_read_unlock(); return 0; nla_put_failure: rcu_read_unlock(); return -1; } static int dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct, enum ip_conntrack_dir dir, int type) { enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; struct nf_conn_counter *counter = acct->counter; struct nlattr *nest_count; u64 pkts, bytes; if (type == IPCTNL_MSG_CT_GET_CTRZERO) { pkts = atomic64_xchg(&counter[dir].packets, 0); bytes = atomic64_xchg(&counter[dir].bytes, 0); } else { pkts = atomic64_read(&counter[dir].packets); bytes = atomic64_read(&counter[dir].bytes); } nest_count = nla_nest_start(skb, attr); if (!nest_count) goto nla_put_failure; if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), CTA_COUNTERS_PAD) || nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes), CTA_COUNTERS_PAD)) goto nla_put_failure; nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; } static int ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type) { struct nf_conn_acct *acct = nf_conn_acct_find(ct); if (!acct) return 0; if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0) return -1; if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0) return -1; return 0; } static int ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_count; const struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (!tstamp) return 0; nest_count = nla_nest_start(skb, CTA_TIMESTAMP); if (!nest_count) goto nla_put_failure; if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), CTA_TIMESTAMP_PAD) || (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, cpu_to_be64(tstamp->stop), CTA_TIMESTAMP_PAD))) goto nla_put_failure; nla_nest_end(skb, nest_count); return 0; nla_put_failure: return -1; } #ifdef CONFIG_NF_CONNTRACK_MARK static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct, bool dump) { u32 mark = READ_ONCE(ct->mark); if (!mark && !dump) return 0; if (nla_put_be32(skb, CTA_MARK, htonl(mark))) goto nla_put_failure; return 0; nla_put_failure: return -1; } #else #define ctnetlink_dump_mark(a, b, c) (0) #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_secctx; int len, ret; char *secctx; ret = security_secid_to_secctx(ct->secmark, &secctx, &len); if (ret) return 0; ret = -1; nest_secctx = nla_nest_start(skb, CTA_SECCTX); if (!nest_secctx) goto nla_put_failure; if (nla_put_string(skb, CTA_SECCTX_NAME, secctx)) goto nla_put_failure; nla_nest_end(skb, nest_secctx); ret = 0; nla_put_failure: security_release_secctx(secctx, len); return ret; } #else #define ctnetlink_dump_secctx(a, b) (0) #endif #ifdef CONFIG_NF_CONNTRACK_LABELS static inline int ctnetlink_label_size(const struct nf_conn *ct) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); if (!labels) return 0; return nla_total_size(sizeof(labels->bits)); } static int ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) { struct nf_conn_labels *labels = nf_ct_labels_find(ct); unsigned int i; if (!labels) return 0; i = 0; do { if (labels->bits[i] != 0) return nla_put(skb, CTA_LABELS, sizeof(labels->bits), labels->bits); i++; } while (i < ARRAY_SIZE(labels->bits)); return 0; } #else #define ctnetlink_dump_labels(a, b) (0) #define ctnetlink_label_size(a) (0) #endif #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) { struct nlattr *nest_parms; if (!(ct->status & IPS_EXPECTED)) return 0; nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type); if (!nest_parms) goto nla_put_failure; if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS, htonl(seq->correction_pos)) || nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE, htonl(seq->offset_before)) || nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER, htonl(seq->offset_after))) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct) { struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); struct nf_ct_seqadj *seq; if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) return 0; spin_lock_bh(&ct->lock); seq = &seqadj->seq[IP_CT_DIR_ORIGINAL]; if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1) goto err; seq = &seqadj->seq[IP_CT_DIR_REPLY]; if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1) goto err; spin_unlock_bh(&ct->lock); return 0; err: spin_unlock_bh(&ct->lock); return -1; } static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct) { struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); struct nlattr *nest_parms; if (!synproxy) return 0; nest_parms = nla_nest_start(skb, CTA_SYNPROXY); if (!nest_parms) goto nla_put_failure; if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) || nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) || nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff))) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) { __be32 id = (__force __be32)nf_ct_get_id(ct); if (nla_put_be32(skb, CTA_ID, id)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) { if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use)))) goto nla_put_failure; return 0; nla_put_failure: return -1; } /* all these functions access ct->ext. Caller must either hold a reference * on ct or prevent its deletion by holding either the bucket spinlock or * pcpu dying list lock. */ static int ctnetlink_dump_extinfo(struct sk_buff *skb, struct nf_conn *ct, u32 type) { if (ctnetlink_dump_acct(skb, ct, type) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0 || ctnetlink_dump_helpinfo(skb, ct) < 0 || ctnetlink_dump_labels(skb, ct) < 0 || ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || ctnetlink_dump_ct_synproxy(skb, ct) < 0) return -1; return 0; } static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) { if (ctnetlink_dump_status(skb, ct) < 0 || ctnetlink_dump_mark(skb, ct, true) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || ctnetlink_dump_master(skb, ct) < 0) return -1; if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && (ctnetlink_dump_timeout(skb, ct, false) < 0 || ctnetlink_dump_protoinfo(skb, ct, false) < 0)) return -1; return 0; } static int ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct nf_conn *ct, bool extinfo, unsigned int flags) { const struct nf_conntrack_zone *zone; struct nlmsghdr *nlh; struct nlattr *nest_parms; unsigned int event; if (portid) flags |= NLM_F_MULTI; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct), NFNETLINK_V0, 0); if (!nlh) goto nlmsg_failure; zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_ORIG) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_REPL) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, NF_CT_DEFAULT_ZONE_DIR) < 0) goto nla_put_failure; if (ctnetlink_dump_info(skb, ct) < 0) goto nla_put_failure; if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nlmsg_failure: nla_put_failure: nlmsg_cancel(skb, nlh); return -1; } static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = { [CTA_IP_V4_SRC] = { .type = NLA_U32 }, [CTA_IP_V4_DST] = { .type = NLA_U32 }, [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 }, [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 }, }; #if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS) static size_t ctnetlink_proto_size(const struct nf_conn *ct) { const struct nf_conntrack_l4proto *l4proto; size_t len, len4 = 0; len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); len *= 3u; /* ORIG, REPLY, MASTER */ l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); len += l4proto->nlattr_size; if (l4proto->nlattr_tuple_size) { len4 = l4proto->nlattr_tuple_size(); len4 *= 3u; /* ORIG, REPLY, MASTER */ } return len + len4; } #endif static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) { if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) return 0; return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ ; } static inline int ctnetlink_secctx_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_SECMARK int len, ret; ret = security_secid_to_secctx(ct->secmark, NULL, &len); if (ret) return 0; return nla_total_size(0) /* CTA_SECCTX */ + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */ #else return 0; #endif } static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) { #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) return 0; return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t)); #else return 0; #endif } #ifdef CONFIG_NF_CONNTRACK_EVENTS static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) { return NLMSG_ALIGN(sizeof(struct nfgenmsg)) + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ + ctnetlink_acct_size(ct) + ctnetlink_timestamp_size(ct) + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ + nla_total_size(0) /* CTA_PROTOINFO */ + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + ctnetlink_secctx_size(ct) #if IS_ENABLED(CONFIG_NF_NAT) + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ #endif #ifdef CONFIG_NF_CONNTRACK_MARK + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ #endif #ifdef CONFIG_NF_CONNTRACK_ZONES + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ #endif + ctnetlink_proto_size(ct) + ctnetlink_label_size(ct) ; } static int ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) { const struct nf_conntrack_zone *zone; struct net *net; struct nlmsghdr *nlh; struct nlattr *nest_parms; struct nf_conn *ct = item->ct; struct sk_buff *skb; unsigned int type; unsigned int flags = 0, group; int err; if (events & (1 << IPCT_DESTROY)) { type = IPCTNL_MSG_CT_DELETE; group = NFNLGRP_CONNTRACK_DESTROY; } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { type = IPCTNL_MSG_CT_NEW; flags = NLM_F_CREATE|NLM_F_EXCL; group = NFNLGRP_CONNTRACK_NEW; } else if (events) { type = IPCTNL_MSG_CT_NEW; group = NFNLGRP_CONNTRACK_UPDATE; } else return 0; net = nf_ct_net(ct); if (!item->report && !nfnetlink_has_listeners(net, group)) return 0; skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); if (skb == NULL) goto errout; type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), NFNETLINK_V0, 0); if (!nlh) goto nlmsg_failure; zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_ORIG) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_REPL) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, NF_CT_DEFAULT_ZONE_DIR) < 0) goto nla_put_failure; if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_DESTROY)) { if (ctnetlink_dump_timeout(skb, ct, true) < 0) goto nla_put_failure; if (ctnetlink_dump_acct(skb, ct, type) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0 || ctnetlink_dump_protoinfo(skb, ct, true) < 0) goto nla_put_failure; } else { if (ctnetlink_dump_timeout(skb, ct, false) < 0) goto nla_put_failure; if (events & (1 << IPCT_PROTOINFO) && ctnetlink_dump_protoinfo(skb, ct, false) < 0) goto nla_put_failure; if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) && ctnetlink_dump_helpinfo(skb, ct) < 0) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_SECMARK if ((events & (1 << IPCT_SECMARK) || ct->secmark) && ctnetlink_dump_secctx(skb, ct) < 0) goto nla_put_failure; #endif if (events & (1 << IPCT_LABEL) && ctnetlink_dump_labels(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_RELATED) && ctnetlink_dump_master(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_SEQADJ) && ctnetlink_dump_ct_seq_adj(skb, ct) < 0) goto nla_put_failure; if (events & (1 << IPCT_SYNPROXY) && ctnetlink_dump_ct_synproxy(skb, ct) < 0) goto nla_put_failure; } #ifdef CONFIG_NF_CONNTRACK_MARK if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK))) goto nla_put_failure; #endif nlmsg_end(skb, nlh); err = nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); if (err == -ENOBUFS || err == -EAGAIN) return -ENOBUFS; return 0; nla_put_failure: nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); errout: if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) return -ENOBUFS; return 0; } #endif /* CONFIG_NF_CONNTRACK_EVENTS */ static int ctnetlink_done(struct netlink_callback *cb) { if (cb->args[1]) nf_ct_put((struct nf_conn *)cb->args[1]); kfree(cb->data); return 0; } struct ctnetlink_filter_u32 { u32 val; u32 mask; }; struct ctnetlink_filter { u8 family; bool zone_filter; u_int32_t orig_flags; u_int32_t reply_flags; struct nf_conntrack_tuple orig; struct nf_conntrack_tuple reply; struct nf_conntrack_zone zone; struct ctnetlink_filter_u32 mark; struct ctnetlink_filter_u32 status; }; static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = { [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 }, [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 }, }; static int ctnetlink_parse_filter(const struct nlattr *attr, struct ctnetlink_filter *filter) { struct nlattr *tb[CTA_FILTER_MAX + 1]; int ret = 0; ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy, NULL); if (ret) return ret; if (tb[CTA_FILTER_ORIG_FLAGS]) { filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]); if (filter->orig_flags & ~CTA_FILTER_F_ALL) return -EOPNOTSUPP; } if (tb[CTA_FILTER_REPLY_FLAGS]) { filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]); if (filter->reply_flags & ~CTA_FILTER_F_ALL) return -EOPNOTSUPP; } return 0; } static int ctnetlink_parse_zone(const struct nlattr *attr, struct nf_conntrack_zone *zone); static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], struct nf_conntrack_tuple *tuple, u32 type, u_int8_t l3num, struct nf_conntrack_zone *zone, u_int32_t flags); static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark, const struct nlattr * const cda[]) { #ifdef CONFIG_NF_CONNTRACK_MARK if (cda[CTA_MARK]) { mark->val = ntohl(nla_get_be32(cda[CTA_MARK])); if (cda[CTA_MARK_MASK]) mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); else mark->mask = 0xffffffff; } else if (cda[CTA_MARK_MASK]) { return -EINVAL; } #endif return 0; } static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status, const struct nlattr * const cda[]) { if (cda[CTA_STATUS]) { status->val = ntohl(nla_get_be32(cda[CTA_STATUS])); if (cda[CTA_STATUS_MASK]) status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK])); else status->mask = status->val; /* status->val == 0? always true, else always false. */ if (status->mask == 0) return -EINVAL; } else if (cda[CTA_STATUS_MASK]) { return -EINVAL; } /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */ BUILD_BUG_ON(__IPS_MAX_BIT >= 32); return 0; } static struct ctnetlink_filter * ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) { struct ctnetlink_filter *filter; int err; #ifndef CONFIG_NF_CONNTRACK_MARK if (cda[CTA_MARK] || cda[CTA_MARK_MASK]) return ERR_PTR(-EOPNOTSUPP); #endif filter = kzalloc(sizeof(*filter), GFP_KERNEL); if (filter == NULL) return ERR_PTR(-ENOMEM); filter->family = family; err = ctnetlink_filter_parse_mark(&filter->mark, cda); if (err) goto err_filter; err = ctnetlink_filter_parse_status(&filter->status, cda); if (err) goto err_filter; if (cda[CTA_ZONE]) { err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone); if (err < 0) goto err_filter; filter->zone_filter = true; } if (!cda[CTA_FILTER]) return filter; err = ctnetlink_parse_filter(cda[CTA_FILTER], filter); if (err < 0) goto err_filter; if (filter->orig_flags) { if (!cda[CTA_TUPLE_ORIG]) { err = -EINVAL; goto err_filter; } err = ctnetlink_parse_tuple_filter(cda, &filter->orig, CTA_TUPLE_ORIG, filter->family, &filter->zone, filter->orig_flags); if (err < 0) goto err_filter; } if (filter->reply_flags) { if (!cda[CTA_TUPLE_REPLY]) { err = -EINVAL; goto err_filter; } err = ctnetlink_parse_tuple_filter(cda, &filter->reply, CTA_TUPLE_REPLY, filter->family, &filter->zone, filter->reply_flags); if (err < 0) goto err_filter; } return filter; err_filter: kfree(filter); return ERR_PTR(err); } static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda) { return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS] || cda[CTA_ZONE]; } static int ctnetlink_start(struct netlink_callback *cb) { const struct nlattr * const *cda = cb->data; struct ctnetlink_filter *filter = NULL; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u8 family = nfmsg->nfgen_family; if (ctnetlink_needs_filter(family, cda)) { filter = ctnetlink_alloc_filter(cda, family); if (IS_ERR(filter)) return PTR_ERR(filter); } cb->data = filter; return 0; } static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple, struct nf_conntrack_tuple *ct_tuple, u_int32_t flags, int family) { switch (family) { case NFPROTO_IPV4: if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && filter_tuple->src.u3.ip != ct_tuple->src.u3.ip) return 0; if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip) return 0; break; case NFPROTO_IPV6: if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && !ipv6_addr_cmp(&filter_tuple->src.u3.in6, &ct_tuple->src.u3.in6)) return 0; if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && !ipv6_addr_cmp(&filter_tuple->dst.u3.in6, &ct_tuple->dst.u3.in6)) return 0; break; } if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) && filter_tuple->dst.protonum != ct_tuple->dst.protonum) return 0; switch (ct_tuple->dst.protonum) { case IPPROTO_TCP: case IPPROTO_UDP: if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) && filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port) return 0; if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) && filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port) return 0; break; case IPPROTO_ICMP: if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) && filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) return 0; if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) && filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) return 0; if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) && filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) return 0; break; case IPPROTO_ICMPV6: if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) && filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) return 0; if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) && filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) return 0; if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) && filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) return 0; break; } return 1; } static int ctnetlink_filter_match(struct nf_conn *ct, void *data) { struct ctnetlink_filter *filter = data; struct nf_conntrack_tuple *tuple; u32 status; if (filter == NULL) goto out; /* Match entries of a given L3 protocol number. * If it is not specified, ie. l3proto == 0, * then match everything. */ if (filter->family && nf_ct_l3num(ct) != filter->family) goto ignore_entry; if (filter->zone_filter && !nf_ct_zone_equal_any(ct, &filter->zone)) goto ignore_entry; if (filter->orig_flags) { tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL); if (!ctnetlink_filter_match_tuple(&filter->orig, tuple, filter->orig_flags, filter->family)) goto ignore_entry; } if (filter->reply_flags) { tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY); if (!ctnetlink_filter_match_tuple(&filter->reply, tuple, filter->reply_flags, filter->family)) goto ignore_entry; } #ifdef CONFIG_NF_CONNTRACK_MARK if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val) goto ignore_entry; #endif status = (u32)READ_ONCE(ct->status); if ((status & filter->status.mask) != filter->status.val) goto ignore_entry; out: return 1; ignore_entry: return 0; } static int ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; struct net *net = sock_net(skb->sk); struct nf_conn *ct, *last; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; struct nf_conn *nf_ct_evict[8]; int res, i; spinlock_t *lockp; last = (struct nf_conn *)cb->args[1]; i = 0; local_bh_disable(); for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { restart: while (i) { i--; if (nf_ct_should_gc(nf_ct_evict[i])) nf_ct_kill(nf_ct_evict[i]); nf_ct_put(nf_ct_evict[i]); } lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; nf_conntrack_lock(lockp); if (cb->args[0] >= nf_conntrack_htable_size) { spin_unlock(lockp); goto out; } hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], hnnode) { ct = nf_ct_tuplehash_to_ctrack(h); if (nf_ct_is_expired(ct)) { /* need to defer nf_ct_kill() until lock is released */ if (i < ARRAY_SIZE(nf_ct_evict) && refcount_inc_not_zero(&ct->ct_general.use)) nf_ct_evict[i++] = ct; continue; } if (!net_eq(net, nf_ct_net(ct))) continue; if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) continue; if (cb->args[1]) { if (ct != last) continue; cb->args[1] = 0; } if (!ctnetlink_filter_match(ct, cb->data)) continue; res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), ct, true, flags); if (res < 0) { nf_conntrack_get(&ct->ct_general); cb->args[1] = (unsigned long)ct; spin_unlock(lockp); goto out; } } spin_unlock(lockp); if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: local_bh_enable(); if (last) { /* nf ct hash resize happened, now clear the leftover. */ if ((struct nf_conn *)cb->args[1] == last) cb->args[1] = 0; nf_ct_put(last); } while (i) { i--; if (nf_ct_should_gc(nf_ct_evict[i])) nf_ct_kill(nf_ct_evict[i]); nf_ct_put(nf_ct_evict[i]); } return skb->len; } static int ipv4_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t, u_int32_t flags) { if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { if (!tb[CTA_IP_V4_SRC]) return -EINVAL; t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); } if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { if (!tb[CTA_IP_V4_DST]) return -EINVAL; t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); } return 0; } static int ipv6_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *t, u_int32_t flags) { if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { if (!tb[CTA_IP_V6_SRC]) return -EINVAL; t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); } if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { if (!tb[CTA_IP_V6_DST]) return -EINVAL; t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); } return 0; } static int ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple, u_int32_t flags) { struct nlattr *tb[CTA_IP_MAX+1]; int ret = 0; ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, cta_ip_nla_policy, NULL); if (ret < 0) return ret; switch (tuple->src.l3num) { case NFPROTO_IPV4: ret = ipv4_nlattr_to_tuple(tb, tuple, flags); break; case NFPROTO_IPV6: ret = ipv6_nlattr_to_tuple(tb, tuple, flags); break; } return ret; } static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_NUM] = { .type = NLA_U8 }, }; static int ctnetlink_parse_tuple_proto(struct nlattr *attr, struct nf_conntrack_tuple *tuple, u_int32_t flags) { const struct nf_conntrack_l4proto *l4proto; struct nlattr *tb[CTA_PROTO_MAX+1]; int ret = 0; ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr, proto_nla_policy, NULL); if (ret < 0) return ret; if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM))) return 0; if (!tb[CTA_PROTO_NUM]) return -EINVAL; tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); rcu_read_lock(); l4proto = nf_ct_l4proto_find(tuple->dst.protonum); if (likely(l4proto->nlattr_to_tuple)) { ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX, l4proto->nla_policy, NULL); if (ret == 0) ret = l4proto->nlattr_to_tuple(tb, tuple, flags); } rcu_read_unlock(); return ret; } static int ctnetlink_parse_zone(const struct nlattr *attr, struct nf_conntrack_zone *zone) { nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID, NF_CT_DEFAULT_ZONE_DIR, 0); #ifdef CONFIG_NF_CONNTRACK_ZONES if (attr) zone->id = ntohs(nla_get_be16(attr)); #else if (attr) return -EOPNOTSUPP; #endif return 0; } static int ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type, struct nf_conntrack_zone *zone) { int ret; if (zone->id != NF_CT_DEFAULT_ZONE_ID) return -EINVAL; ret = ctnetlink_parse_zone(attr, zone); if (ret < 0) return ret; if (type == CTA_TUPLE_REPLY) zone->dir = NF_CT_ZONE_DIR_REPL; else zone->dir = NF_CT_ZONE_DIR_ORIG; return 0; } static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { [CTA_TUPLE_IP] = { .type = NLA_NESTED }, [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, [CTA_TUPLE_ZONE] = { .type = NLA_U16 }, }; #define CTA_FILTER_F_ALL_CTA_PROTO \ (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \ CTA_FILTER_F_CTA_PROTO_DST_PORT | \ CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \ CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \ CTA_FILTER_F_CTA_PROTO_ICMP_ID | \ CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \ CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \ CTA_FILTER_F_CTA_PROTO_ICMPV6_ID) static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], struct nf_conntrack_tuple *tuple, u32 type, u_int8_t l3num, struct nf_conntrack_zone *zone, u_int32_t flags) { struct nlattr *tb[CTA_TUPLE_MAX+1]; int err; memset(tuple, 0, sizeof(*tuple)); err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type], tuple_nla_policy, NULL); if (err < 0) return err; if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) return -EOPNOTSUPP; tuple->src.l3num = l3num; if (flags & CTA_FILTER_FLAG(CTA_IP_DST) || flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { if (!tb[CTA_TUPLE_IP]) return -EINVAL; err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags); if (err < 0) return err; } if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) { if (!tb[CTA_TUPLE_PROTO]) return -EINVAL; err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags); if (err < 0) return err; } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) { /* Can't manage proto flags without a protonum */ return -EINVAL; } if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) { if (!zone) return -EINVAL; err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE], type, zone); if (err < 0) return err; } /* orig and expect tuples get DIR_ORIGINAL */ if (type == CTA_TUPLE_REPLY) tuple->dst.dir = IP_CT_DIR_REPLY; else tuple->dst.dir = IP_CT_DIR_ORIGINAL; return 0; } static int ctnetlink_parse_tuple(const struct nlattr * const cda[], struct nf_conntrack_tuple *tuple, u32 type, u_int8_t l3num, struct nf_conntrack_zone *zone) { return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone, CTA_FILTER_FLAG(ALL)); } static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { [CTA_HELP_NAME] = { .type = NLA_NUL_STRING, .len = NF_CT_HELPER_NAME_LEN - 1 }, }; static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, struct nlattr **helpinfo) { int err; struct nlattr *tb[CTA_HELP_MAX+1]; err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr, help_nla_policy, NULL); if (err < 0) return err; if (!tb[CTA_HELP_NAME]) return -EINVAL; *helper_name = nla_data(tb[CTA_HELP_NAME]); if (tb[CTA_HELP_INFO]) *helpinfo = tb[CTA_HELP_INFO]; return 0; } static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, [CTA_STATUS] = { .type = NLA_U32 }, [CTA_PROTOINFO] = { .type = NLA_NESTED }, [CTA_HELP] = { .type = NLA_NESTED }, [CTA_NAT_SRC] = { .type = NLA_NESTED }, [CTA_TIMEOUT] = { .type = NLA_U32 }, [CTA_MARK] = { .type = NLA_U32 }, [CTA_ID] = { .type = NLA_U32 }, [CTA_NAT_DST] = { .type = NLA_NESTED }, [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED }, [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED }, [CTA_ZONE] = { .type = NLA_U16 }, [CTA_MARK_MASK] = { .type = NLA_U32 }, [CTA_LABELS] = { .type = NLA_BINARY, .len = NF_CT_LABELS_MAX_SIZE }, [CTA_LABELS_MASK] = { .type = NLA_BINARY, .len = NF_CT_LABELS_MAX_SIZE }, [CTA_FILTER] = { .type = NLA_NESTED }, [CTA_STATUS_MASK] = { .type = NLA_U32 }, }; static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) { return ctnetlink_filter_match(ct, data); } static int ctnetlink_flush_conntrack(struct net *net, const struct nlattr * const cda[], u32 portid, int report, u8 family) { struct ctnetlink_filter *filter = NULL; struct nf_ct_iter_data iter = { .net = net, .portid = portid, .report = report, }; if (ctnetlink_needs_filter(family, cda)) { if (cda[CTA_FILTER]) return -EOPNOTSUPP; filter = ctnetlink_alloc_filter(cda, family); if (IS_ERR(filter)) return PTR_ERR(filter); iter.data = filter; } nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter); kfree(filter); return 0; } static int ctnetlink_del_conntrack(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { u8 family = info->nfmsg->nfgen_family; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; struct nf_conn *ct; int err; err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, family, &zone); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, family, &zone); else { u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC; return ctnetlink_flush_conntrack(info->net, cda, NETLINK_CB(skb).portid, nlmsg_report(info->nlh), u3); } if (err < 0) return err; h = nf_conntrack_find_get(info->net, &zone, &tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); if (cda[CTA_ID]) { __be32 id = nla_get_be32(cda[CTA_ID]); if (id != (__force __be32)nf_ct_get_id(ct)) { nf_ct_put(ct); return -ENOENT; } } nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); nf_ct_put(ct); return 0; } static int ctnetlink_get_conntrack(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { u_int8_t u3 = info->nfmsg->nfgen_family; struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; struct sk_buff *skb2; struct nf_conn *ct; int err; if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .start = ctnetlink_start, .dump = ctnetlink_dump_table, .done = ctnetlink_done, .data = (void *)cda, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3, &zone); else if (cda[CTA_TUPLE_REPLY]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3, &zone); else return -EINVAL; if (err < 0) return err; h = nf_conntrack_find_get(info->net, &zone, &tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) { nf_ct_put(ct); return -ENOMEM; } err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct, true, 0); nf_ct_put(ct); if (err <= 0) { kfree_skb(skb2); return -ENOMEM; } return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); } static int ctnetlink_done_list(struct netlink_callback *cb) { struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; if (ctx->last) nf_ct_put(ctx->last); return 0; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static int ctnetlink_dump_one_entry(struct sk_buff *skb, struct netlink_callback *cb, struct nf_conn *ct, bool dying) { struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u8 l3proto = nfmsg->nfgen_family; int res; if (l3proto && nf_ct_l3num(ct) != l3proto) return 0; if (ctx->last) { if (ct != ctx->last) return 0; ctx->last = NULL; } /* We can't dump extension info for the unconfirmed * list because unconfirmed conntracks can have * ct->ext reallocated (and thus freed). * * In the dying list case ct->ext can't be free'd * until after we drop pcpu->lock. */ res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NFNL_MSG_TYPE(cb->nlh->nlmsg_type), ct, dying, 0); if (res < 0) { if (!refcount_inc_not_zero(&ct->ct_general.use)) return 0; ctx->last = ct; } return res; } #endif static int ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) { return 0; } static int ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) { struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; struct nf_conn *last = ctx->last; #ifdef CONFIG_NF_CONNTRACK_EVENTS const struct net *net = sock_net(skb->sk); struct nf_conntrack_net_ecache *ecache_net; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; #endif if (ctx->done) return 0; ctx->last = NULL; #ifdef CONFIG_NF_CONNTRACK_EVENTS ecache_net = nf_conn_pernet_ecache(net); spin_lock_bh(&ecache_net->dying_lock); hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) { struct nf_conn *ct; int res; ct = nf_ct_tuplehash_to_ctrack(h); if (last && last != ct) continue; res = ctnetlink_dump_one_entry(skb, cb, ct, true); if (res < 0) { spin_unlock_bh(&ecache_net->dying_lock); nf_ct_put(last); return skb->len; } nf_ct_put(last); last = NULL; } spin_unlock_bh(&ecache_net->dying_lock); #endif ctx->done = true; nf_ct_put(last); return skb->len; } static int ctnetlink_get_ct_dying(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = ctnetlink_dump_dying, .done = ctnetlink_done_list, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } return -EOPNOTSUPP; } static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = ctnetlink_dump_unconfirmed, .done = ctnetlink_done_list, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } return -EOPNOTSUPP; } #if IS_ENABLED(CONFIG_NF_NAT) static int ctnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) __must_hold(RCU) { const struct nf_nat_hook *nat_hook; int err; nat_hook = rcu_dereference(nf_nat_hook); if (!nat_hook) { #ifdef CONFIG_MODULES rcu_read_unlock(); nfnl_unlock(NFNL_SUBSYS_CTNETLINK); if (request_module("nf-nat") < 0) { nfnl_lock(NFNL_SUBSYS_CTNETLINK); rcu_read_lock(); return -EOPNOTSUPP; } nfnl_lock(NFNL_SUBSYS_CTNETLINK); rcu_read_lock(); nat_hook = rcu_dereference(nf_nat_hook); if (nat_hook) return -EAGAIN; #endif return -EOPNOTSUPP; } err = nat_hook->parse_nat_setup(ct, manip, attr); if (err == -EAGAIN) { #ifdef CONFIG_MODULES rcu_read_unlock(); nfnl_unlock(NFNL_SUBSYS_CTNETLINK); if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { nfnl_lock(NFNL_SUBSYS_CTNETLINK); rcu_read_lock(); return -EOPNOTSUPP; } nfnl_lock(NFNL_SUBSYS_CTNETLINK); rcu_read_lock(); #else err = -EOPNOTSUPP; #endif } return err; } #endif static int ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) { return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS]))); } static int ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) { #if IS_ENABLED(CONFIG_NF_NAT) int ret; if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) return 0; ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, cda[CTA_NAT_DST]); if (ret < 0) return ret; return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, cda[CTA_NAT_SRC]); #else if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) return 0; return -EOPNOTSUPP; #endif } static int ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[]) { struct nf_conntrack_helper *helper; struct nf_conn_help *help = nfct_help(ct); char *helpname = NULL; struct nlattr *helpinfo = NULL; int err; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); if (err < 0) return err; /* don't change helper of sibling connections */ if (ct->master) { /* If we try to change the helper to the same thing twice, * treat the second attempt as a no-op instead of returning * an error. */ err = -EBUSY; if (help) { rcu_read_lock(); helper = rcu_dereference(help->helper); if (helper && !strcmp(helper->name, helpname)) err = 0; rcu_read_unlock(); } return err; } if (!strcmp(helpname, "")) { if (help && help->helper) { /* we had a helper before ... */ nf_ct_remove_expectations(ct); RCU_INIT_POINTER(help->helper, NULL); } return 0; } rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); return -EOPNOTSUPP; } if (help) { if (rcu_access_pointer(help->helper) == helper) { /* update private helper data if allowed. */ if (helper->from_nlattr) helper->from_nlattr(helpinfo, ct); err = 0; } else err = -EBUSY; } else { /* we cannot set a helper for an existing conntrack */ err = -EOPNOTSUPP; } rcu_read_unlock(); return err; } static int ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[]) { return __nf_ct_change_timeout(ct, (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ); } #if defined(CONFIG_NF_CONNTRACK_MARK) static void ctnetlink_change_mark(struct nf_conn *ct, const struct nlattr * const cda[]) { u32 mark, newmark, mask = 0; if (cda[CTA_MARK_MASK]) mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); mark = ntohl(nla_get_be32(cda[CTA_MARK])); newmark = (READ_ONCE(ct->mark) & mask) ^ mark; if (newmark != READ_ONCE(ct->mark)) WRITE_ONCE(ct->mark, newmark); } #endif static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, }; static int ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]) { const struct nlattr *attr = cda[CTA_PROTOINFO]; const struct nf_conntrack_l4proto *l4proto; struct nlattr *tb[CTA_PROTOINFO_MAX+1]; int err = 0; err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr, protoinfo_policy, NULL); if (err < 0) return err; l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); if (l4proto->from_nlattr) err = l4proto->from_nlattr(tb, ct); return err; } static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 }, [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 }, [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, }; static int change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr) { int err; struct nlattr *cda[CTA_SEQADJ_MAX+1]; err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr, seqadj_policy, NULL); if (err < 0) return err; if (!cda[CTA_SEQADJ_CORRECTION_POS]) return -EINVAL; seq->correction_pos = ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS])); if (!cda[CTA_SEQADJ_OFFSET_BEFORE]) return -EINVAL; seq->offset_before = ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE])); if (!cda[CTA_SEQADJ_OFFSET_AFTER]) return -EINVAL; seq->offset_after = ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER])); return 0; } static int ctnetlink_change_seq_adj(struct nf_conn *ct, const struct nlattr * const cda[]) { struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); int ret = 0; if (!seqadj) return 0; spin_lock_bh(&ct->lock); if (cda[CTA_SEQ_ADJ_ORIG]) { ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL], cda[CTA_SEQ_ADJ_ORIG]); if (ret < 0) goto err; set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); } if (cda[CTA_SEQ_ADJ_REPLY]) { ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY], cda[CTA_SEQ_ADJ_REPLY]); if (ret < 0) goto err; set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); } spin_unlock_bh(&ct->lock); return 0; err: spin_unlock_bh(&ct->lock); return ret; } static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = { [CTA_SYNPROXY_ISN] = { .type = NLA_U32 }, [CTA_SYNPROXY_ITS] = { .type = NLA_U32 }, [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 }, }; static int ctnetlink_change_synproxy(struct nf_conn *ct, const struct nlattr * const cda[]) { struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); struct nlattr *tb[CTA_SYNPROXY_MAX + 1]; int err; if (!synproxy) return 0; err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX, cda[CTA_SYNPROXY], synproxy_policy, NULL); if (err < 0) return err; if (!tb[CTA_SYNPROXY_ISN] || !tb[CTA_SYNPROXY_ITS] || !tb[CTA_SYNPROXY_TSOFF]) return -EINVAL; synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN])); synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS])); synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF])); return 0; } static int ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) { #ifdef CONFIG_NF_CONNTRACK_LABELS size_t len = nla_len(cda[CTA_LABELS]); const void *mask = cda[CTA_LABELS_MASK]; if (len & (sizeof(u32)-1)) /* must be multiple of u32 */ return -EINVAL; if (mask) { if (nla_len(cda[CTA_LABELS_MASK]) == 0 || nla_len(cda[CTA_LABELS_MASK]) != len) return -EINVAL; mask = nla_data(cda[CTA_LABELS_MASK]); } len /= sizeof(u32); return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len); #else return -EOPNOTSUPP; #endif } static int ctnetlink_change_conntrack(struct nf_conn *ct, const struct nlattr * const cda[]) { int err; /* only allow NAT changes and master assignation for new conntracks */ if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) return -EOPNOTSUPP; if (cda[CTA_HELP]) { err = ctnetlink_change_helper(ct, cda); if (err < 0) return err; } if (cda[CTA_TIMEOUT]) { err = ctnetlink_change_timeout(ct, cda); if (err < 0) return err; } if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) return err; } if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) return err; } #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ctnetlink_change_mark(ct, cda); #endif if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { err = ctnetlink_change_seq_adj(ct, cda); if (err < 0) return err; } if (cda[CTA_SYNPROXY]) { err = ctnetlink_change_synproxy(ct, cda); if (err < 0) return err; } if (cda[CTA_LABELS]) { err = ctnetlink_attach_labels(ct, cda); if (err < 0) return err; } return 0; } static struct nf_conn * ctnetlink_create_conntrack(struct net *net, const struct nf_conntrack_zone *zone, const struct nlattr * const cda[], struct nf_conntrack_tuple *otuple, struct nf_conntrack_tuple *rtuple, u8 u3) { struct nf_conn *ct; int err = -EINVAL; struct nf_conntrack_helper *helper; struct nf_conn_tstamp *tstamp; u64 timeout; ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); if (IS_ERR(ct)) return ERR_PTR(-ENOMEM); if (!cda[CTA_TIMEOUT]) goto err1; rcu_read_lock(); if (cda[CTA_HELP]) { char *helpname = NULL; struct nlattr *helpinfo = NULL; err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); if (err < 0) goto err2; helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { err = -EOPNOTSUPP; goto err1; } rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; goto err2; } rcu_read_unlock(); #endif err = -EOPNOTSUPP; goto err1; } else { struct nf_conn_help *help; help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); if (help == NULL) { err = -ENOMEM; goto err2; } /* set private helper data if allowed. */ if (helper->from_nlattr) helper->from_nlattr(helpinfo, ct); /* disable helper auto-assignment for this entry */ ct->status |= IPS_HELPER; RCU_INIT_POINTER(help->helper, helper); } } err = ctnetlink_setup_nat(ct, cda); if (err < 0) goto err2; nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); nf_ct_labels_ext_add(ct); nfct_seqadj_ext_add(ct); nfct_synproxy_ext_add(ct); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; __nf_ct_set_timeout(ct, timeout); if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) goto err2; } if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { err = ctnetlink_change_seq_adj(ct, cda); if (err < 0) goto err2; } memset(&ct->proto, 0, sizeof(ct->proto)); if (cda[CTA_PROTOINFO]) { err = ctnetlink_change_protoinfo(ct, cda); if (err < 0) goto err2; } if (cda[CTA_SYNPROXY]) { err = ctnetlink_change_synproxy(ct, cda); if (err < 0) goto err2; } #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) ctnetlink_change_mark(ct, cda); #endif /* setup master conntrack: this is a confirmed expectation */ if (cda[CTA_TUPLE_MASTER]) { struct nf_conntrack_tuple master; struct nf_conntrack_tuple_hash *master_h; struct nf_conn *master_ct; err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3, NULL); if (err < 0) goto err2; master_h = nf_conntrack_find_get(net, zone, &master); if (master_h == NULL) { err = -ENOENT; goto err2; } master_ct = nf_ct_tuplehash_to_ctrack(master_h); __set_bit(IPS_EXPECTED_BIT, &ct->status); ct->master = master_ct; } tstamp = nf_conn_tstamp_find(ct); if (tstamp) tstamp->start = ktime_get_real_ns(); err = nf_conntrack_hash_check_insert(ct); if (err < 0) goto err3; rcu_read_unlock(); return ct; err3: if (ct->master) nf_ct_put(ct->master); err2: rcu_read_unlock(); err1: nf_conntrack_free(ct); return ERR_PTR(err); } static int ctnetlink_new_conntrack(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { struct nf_conntrack_tuple otuple, rtuple; struct nf_conntrack_tuple_hash *h = NULL; u_int8_t u3 = info->nfmsg->nfgen_family; struct nf_conntrack_zone zone; struct nf_conn *ct; int err; err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); if (err < 0) return err; if (cda[CTA_TUPLE_ORIG]) { err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3, &zone); if (err < 0) return err; } if (cda[CTA_TUPLE_REPLY]) { err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3, &zone); if (err < 0) return err; } if (cda[CTA_TUPLE_ORIG]) h = nf_conntrack_find_get(info->net, &zone, &otuple); else if (cda[CTA_TUPLE_REPLY]) h = nf_conntrack_find_get(info->net, &zone, &rtuple); if (h == NULL) { err = -ENOENT; if (info->nlh->nlmsg_flags & NLM_F_CREATE) { enum ip_conntrack_events events; if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) return -EINVAL; if (otuple.dst.protonum != rtuple.dst.protonum) return -EINVAL; ct = ctnetlink_create_conntrack(info->net, &zone, cda, &otuple, &rtuple, u3); if (IS_ERR(ct)) return PTR_ERR(ct); err = 0; if (test_bit(IPS_EXPECTED_BIT, &ct->status)) events = 1 << IPCT_RELATED; else events = 1 << IPCT_NEW; if (cda[CTA_LABELS] && ctnetlink_attach_labels(ct, cda) == 0) events |= (1 << IPCT_LABEL); nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_PROTOINFO) | (1 << IPCT_SEQADJ) | (1 << IPCT_MARK) | (1 << IPCT_SYNPROXY) | events, ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); nf_ct_put(ct); } return err; } /* implicit 'else' */ err = -EEXIST; ct = nf_ct_tuplehash_to_ctrack(h); if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) { err = ctnetlink_change_conntrack(ct, cda); if (err == 0) { nf_conntrack_eventmask_report((1 << IPCT_REPLY) | (1 << IPCT_ASSURED) | (1 << IPCT_HELPER) | (1 << IPCT_LABEL) | (1 << IPCT_PROTOINFO) | (1 << IPCT_SEQADJ) | (1 << IPCT_MARK) | (1 << IPCT_SYNPROXY), ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); } } nf_ct_put(ct); return err; } static int ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, __u16 cpu, const struct ip_conntrack_stat *st) { struct nlmsghdr *nlh; unsigned int flags = portid ? NLM_F_MULTI : 0, event; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS_CPU); nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, NFNETLINK_V0, htons(cpu)); if (!nlh) goto nlmsg_failure; if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || nla_put_be32(skb, CTA_STATS_INSERT_FAILED, htonl(st->insert_failed)) || nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || nla_put_be32(skb, CTA_STATS_SEARCH_RESTART, htonl(st->search_restart)) || nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE, htonl(st->clash_resolve)) || nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG, htonl(st->chaintoolong))) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -1; } static int ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) { int cpu; struct net *net = sock_net(skb->sk); if (cb->args[0] == nr_cpu_ids) return 0; for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { const struct ip_conntrack_stat *st; if (!cpu_possible(cpu)) continue; st = per_cpu_ptr(net->ct.stat, cpu); if (ctnetlink_ct_stat_cpu_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cpu, st) < 0) break; } cb->args[0] = cpu; return skb->len; } static int ctnetlink_stat_ct_cpu(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = ctnetlink_ct_stat_cpu_dump, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } return 0; } static int ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, struct net *net) { unsigned int flags = portid ? NLM_F_MULTI : 0, event; unsigned int nr_conntracks; struct nlmsghdr *nlh; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, NFNETLINK_V0, 0); if (!nlh) goto nlmsg_failure; nr_conntracks = nf_conntrack_count(net); if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) goto nla_put_failure; if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max))) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -1; } static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { struct sk_buff *skb2; int err; skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (skb2 == NULL) return -ENOMEM; err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFNL_MSG_TYPE(info->nlh->nlmsg_type), sock_net(skb->sk)); if (err <= 0) { kfree_skb(skb2); return -ENOMEM; } return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); } static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, [CTA_EXPECT_ID] = { .type = NLA_U32 }, [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, .len = NF_CT_HELPER_NAME_LEN - 1 }, [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, }; static struct nf_conntrack_expect * ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, struct nf_conntrack_helper *helper, struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *mask); #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT static size_t ctnetlink_glue_build_size(const struct nf_conn *ct) { return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ + nla_total_size(0) /* CTA_PROTOINFO */ + nla_total_size(0) /* CTA_HELP */ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ + ctnetlink_secctx_size(ct) + ctnetlink_acct_size(ct) + ctnetlink_timestamp_size(ct) #if IS_ENABLED(CONFIG_NF_NAT) + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ #endif #ifdef CONFIG_NF_CONNTRACK_MARK + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ #endif #ifdef CONFIG_NF_CONNTRACK_ZONES + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ #endif + ctnetlink_proto_size(ct) ; } static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct nlattr *nest_parms; zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_ORIG) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) goto nla_put_failure; if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, NF_CT_ZONE_DIR_REPL) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, NF_CT_DEFAULT_ZONE_DIR) < 0) goto nla_put_failure; if (ctnetlink_dump_id(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_status(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_timeout(skb, ct, false) < 0) goto nla_put_failure; if (ctnetlink_dump_protoinfo(skb, ct, false) < 0) goto nla_put_failure; if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 || ctnetlink_dump_timestamp(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_helpinfo(skb, ct) < 0) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_SECMARK if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) goto nla_put_failure; #endif if (ct->master && ctnetlink_dump_master(skb, ct) < 0) goto nla_put_failure; if ((ct->status & IPS_SEQ_ADJUST) && ctnetlink_dump_ct_seq_adj(skb, ct) < 0) goto nla_put_failure; if (ctnetlink_dump_ct_synproxy(skb, ct) < 0) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK if (ctnetlink_dump_mark(skb, ct, true) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static int ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, u_int16_t ct_attr, u_int16_t ct_info_attr) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, ct_attr); if (!nest_parms) goto nla_put_failure; if (__ctnetlink_glue_build(skb, ct) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static int ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[]) { unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); unsigned long d = ct->status ^ status; if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) /* SEEN_REPLY bit can only be set */ return -EBUSY; if (d & IPS_ASSURED && !(status & IPS_ASSURED)) /* ASSURED bit can only be set */ return -EBUSY; /* This check is less strict than ctnetlink_change_status() * because callers often flip IPS_EXPECTED bits when sending * an NFQA_CT attribute to the kernel. So ignore the * unchangeable bits but do not error out. Also user programs * are allowed to clear the bits that they are allowed to change. */ __nf_ct_change_status(ct, status, ~status); return 0; } static int ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct) { int err; if (cda[CTA_TIMEOUT]) { err = ctnetlink_change_timeout(ct, cda); if (err < 0) return err; } if (cda[CTA_STATUS]) { err = ctnetlink_update_status(ct, cda); if (err < 0) return err; } if (cda[CTA_HELP]) { err = ctnetlink_change_helper(ct, cda); if (err < 0) return err; } if (cda[CTA_LABELS]) { err = ctnetlink_attach_labels(ct, cda); if (err < 0) return err; } #if defined(CONFIG_NF_CONNTRACK_MARK) if (cda[CTA_MARK]) { ctnetlink_change_mark(ct, cda); } #endif return 0; } static int ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) { struct nlattr *cda[CTA_MAX+1]; int ret; ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy, NULL); if (ret < 0) return ret; return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct); } static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda, const struct nf_conn *ct, struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *mask) { int err; err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE, nf_ct_l3num(ct), NULL); if (err < 0) return err; return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK, nf_ct_l3num(ct), NULL); } static int ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, u32 portid, u32 report) { struct nlattr *cda[CTA_EXPECT_MAX+1]; struct nf_conntrack_tuple tuple, mask; struct nf_conntrack_helper *helper = NULL; struct nf_conntrack_expect *exp; int err; err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr, exp_nla_policy, NULL); if (err < 0) return err; err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda, ct, &tuple, &mask); if (err < 0) return err; if (cda[CTA_EXPECT_HELP_NAME]) { const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), nf_ct_protonum(ct)); if (helper == NULL) return -EOPNOTSUPP; } exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, helper, &tuple, &mask); if (IS_ERR(exp)) return PTR_ERR(exp); err = nf_ct_expect_related_report(exp, portid, report, 0); nf_ct_expect_put(exp); return err; } static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, int diff) { if (!(ct->status & IPS_NAT_MASK)) return; nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); } static const struct nfnl_ct_hook ctnetlink_glue_hook = { .build_size = ctnetlink_glue_build_size, .build = ctnetlink_glue_build, .parse = ctnetlink_glue_parse, .attach_expect = ctnetlink_glue_attach_expect, .seq_adjust = ctnetlink_glue_seqadj, }; #endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */ /*********************************************************************** * EXPECT ***********************************************************************/ static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, u32 type) { struct nlattr *nest_parms; nest_parms = nla_nest_start(skb, type); if (!nest_parms) goto nla_put_failure; if (ctnetlink_dump_tuples(skb, tuple) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } static int ctnetlink_exp_dump_mask(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple_mask *mask) { const struct nf_conntrack_l4proto *l4proto; struct nf_conntrack_tuple m; struct nlattr *nest_parms; int ret; memset(&m, 0xFF, sizeof(m)); memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); m.src.u.all = mask->src.u.all; m.src.l3num = tuple->src.l3num; m.dst.protonum = tuple->dst.protonum; nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); if (!nest_parms) goto nla_put_failure; rcu_read_lock(); ret = ctnetlink_dump_tuples_ip(skb, &m); if (ret >= 0) { l4proto = nf_ct_l4proto_find(tuple->dst.protonum); ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); } rcu_read_unlock(); if (unlikely(ret < 0)) goto nla_put_failure; nla_nest_end(skb, nest_parms); return 0; nla_put_failure: return -1; } #if IS_ENABLED(CONFIG_NF_NAT) static const union nf_inet_addr any_addr; #endif static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) { static siphash_aligned_key_t exp_id_seed; unsigned long a, b, c, d; net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); a = (unsigned long)exp; b = (unsigned long)exp->helper; c = (unsigned long)exp->master; d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed); #ifdef CONFIG_64BIT return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); #else return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); #endif } static int ctnetlink_exp_dump_expect(struct sk_buff *skb, const struct nf_conntrack_expect *exp) { struct nf_conn *master = exp->master; long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; struct nf_conn_help *help; #if IS_ENABLED(CONFIG_NF_NAT) struct nlattr *nest_parms; struct nf_conntrack_tuple nat_tuple = {}; #endif struct nf_ct_helper_expectfn *expfn; if (timeout < 0) timeout = 0; if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) goto nla_put_failure; if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) goto nla_put_failure; if (ctnetlink_exp_dump_tuple(skb, &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, CTA_EXPECT_MASTER) < 0) goto nla_put_failure; #if IS_ENABLED(CONFIG_NF_NAT) if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) || exp->saved_proto.all) { nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT); if (!nest_parms) goto nla_put_failure; if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) goto nla_put_failure; nat_tuple.src.l3num = nf_ct_l3num(master); nat_tuple.src.u3 = exp->saved_addr; nat_tuple.dst.protonum = nf_ct_protonum(master); nat_tuple.src.u = exp->saved_proto; if (ctnetlink_exp_dump_tuple(skb, &nat_tuple, CTA_EXPECT_NAT_TUPLE) < 0) goto nla_put_failure; nla_nest_end(skb, nest_parms); } #endif if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) || nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) goto nla_put_failure; help = nfct_help(master); if (help) { struct nf_conntrack_helper *helper; helper = rcu_dereference(help->helper); if (helper && nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) goto nla_put_failure; } expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); if (expfn != NULL && nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int event, const struct nf_conntrack_expect *exp) { struct nlmsghdr *nlh; unsigned int flags = portid ? NLM_F_MULTI : 0; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); nlh = nfnl_msg_put(skb, portid, seq, event, flags, exp->tuple.src.l3num, NFNETLINK_V0, 0); if (!nlh) goto nlmsg_failure; if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nlmsg_failure: nla_put_failure: nlmsg_cancel(skb, nlh); return -1; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static int ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item) { struct nf_conntrack_expect *exp = item->exp; struct net *net = nf_ct_exp_net(exp); struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int type, group; int flags = 0; if (events & (1 << IPEXP_DESTROY)) { type = IPCTNL_MSG_EXP_DELETE; group = NFNLGRP_CONNTRACK_EXP_DESTROY; } else if (events & (1 << IPEXP_NEW)) { type = IPCTNL_MSG_EXP_NEW; flags = NLM_F_CREATE|NLM_F_EXCL; group = NFNLGRP_CONNTRACK_EXP_NEW; } else return 0; if (!item->report && !nfnetlink_has_listeners(net, group)) return 0; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (skb == NULL) goto errout; type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, exp->tuple.src.l3num, NFNETLINK_V0, 0); if (!nlh) goto nlmsg_failure; if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; nlmsg_end(skb, nlh); nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); errout: nfnetlink_set_err(net, 0, 0, -ENOBUFS); return 0; } #endif static int ctnetlink_exp_done(struct netlink_callback *cb) { if (cb->args[1]) nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); return 0; } static int ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); u_int8_t l3proto = nfmsg->nfgen_family; rcu_read_lock(); last = (struct nf_conntrack_expect *)cb->args[1]; for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { restart: hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], hnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (!net_eq(nf_ct_net(exp->master), net)) continue; if (cb->args[1]) { if (exp != last) continue; cb->args[1] = 0; } if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { if (!refcount_inc_not_zero(&exp->use)) continue; cb->args[1] = (unsigned long)exp; goto out; } } if (cb->args[1]) { cb->args[1] = 0; goto restart; } } out: rcu_read_unlock(); if (last) nf_ct_expect_put(last); return skb->len; } static int ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) { struct nf_conntrack_expect *exp, *last; struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); struct nf_conn *ct = cb->data; struct nf_conn_help *help = nfct_help(ct); u_int8_t l3proto = nfmsg->nfgen_family; if (cb->args[0]) return 0; rcu_read_lock(); last = (struct nf_conntrack_expect *)cb->args[1]; restart: hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { if (l3proto && exp->tuple.src.l3num != l3proto) continue; if (cb->args[1]) { if (exp != last) continue; cb->args[1] = 0; } if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp) < 0) { if (!refcount_inc_not_zero(&exp->use)) continue; cb->args[1] = (unsigned long)exp; goto out; } } if (cb->args[1]) { cb->args[1] = 0; goto restart; } cb->args[0] = 1; out: rcu_read_unlock(); if (last) nf_ct_expect_put(last); return skb->len; } static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[], struct netlink_ext_ack *extack) { int err; struct nfgenmsg *nfmsg = nlmsg_data(nlh); u_int8_t u3 = nfmsg->nfgen_family; struct nf_conntrack_tuple tuple; struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; struct nf_conntrack_zone zone; struct netlink_dump_control c = { .dump = ctnetlink_exp_ct_dump_table, .done = ctnetlink_exp_done, }; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3, NULL); if (err < 0) return err; err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; h = nf_conntrack_find_get(net, &zone, &tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); /* No expectation linked to this connection tracking. */ if (!nfct_help(ct)) { nf_ct_put(ct); return 0; } c.data = ct; err = netlink_dump_start(ctnl, skb, nlh, &c); nf_ct_put(ct); return err; } static int ctnetlink_get_expect(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { u_int8_t u3 = info->nfmsg->nfgen_family; struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct nf_conntrack_zone zone; struct sk_buff *skb2; int err; if (info->nlh->nlmsg_flags & NLM_F_DUMP) { if (cda[CTA_EXPECT_MASTER]) return ctnetlink_dump_exp_ct(info->net, info->sk, skb, info->nlh, cda, info->extack); else { struct netlink_dump_control c = { .dump = ctnetlink_exp_dump_table, .done = ctnetlink_exp_done, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } } err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; if (cda[CTA_EXPECT_TUPLE]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3, NULL); else if (cda[CTA_EXPECT_MASTER]) err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3, NULL); else return -EINVAL; if (err < 0) return err; exp = nf_ct_expect_find_get(info->net, &zone, &tuple); if (!exp) return -ENOENT; if (cda[CTA_EXPECT_ID]) { __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); if (id != nf_expect_get_id(exp)) { nf_ct_expect_put(exp); return -ENOENT; } } skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb2) { nf_ct_expect_put(exp); return -ENOMEM; } rcu_read_lock(); err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp); rcu_read_unlock(); nf_ct_expect_put(exp); if (err <= 0) { kfree_skb(skb2); return -ENOMEM; } return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); } static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) { struct nf_conntrack_helper *helper; const struct nf_conn_help *m_help; const char *name = data; m_help = nfct_help(exp->master); helper = rcu_dereference(m_help->helper); if (!helper) return false; return strcmp(helper->name, name) == 0; } static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data) { return true; } static int ctnetlink_del_expect(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { u_int8_t u3 = info->nfmsg->nfgen_family; struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; struct nf_conntrack_zone zone; int err; if (cda[CTA_EXPECT_TUPLE]) { /* delete a single expect by tuple */ err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3, NULL); if (err < 0) return err; /* bump usage count to 2 */ exp = nf_ct_expect_find_get(info->net, &zone, &tuple); if (!exp) return -ENOENT; if (cda[CTA_EXPECT_ID]) { __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); if (ntohl(id) != (u32)(unsigned long)exp) { nf_ct_expect_put(exp); return -ENOENT; } } /* after list removal, usage count == 1 */ spin_lock_bh(&nf_conntrack_expect_lock); if (del_timer(&exp->timeout)) { nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); nf_ct_expect_put(exp); } spin_unlock_bh(&nf_conntrack_expect_lock); /* have to put what we 'get' above. * after this line usage count == 0 */ nf_ct_expect_put(exp); } else if (cda[CTA_EXPECT_HELP_NAME]) { char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); nf_ct_expect_iterate_net(info->net, expect_iter_name, name, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); } else { /* This basically means we have to flush everything*/ nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); } return 0; } static int ctnetlink_change_expect(struct nf_conntrack_expect *x, const struct nlattr * const cda[]) { if (cda[CTA_EXPECT_TIMEOUT]) { if (!del_timer(&x->timeout)) return -ETIME; x->timeout.expires = jiffies + ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; add_timer(&x->timeout); } return 0; } #if IS_ENABLED(CONFIG_NF_NAT) static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, }; #endif static int ctnetlink_parse_expect_nat(const struct nlattr *attr, struct nf_conntrack_expect *exp, u_int8_t u3) { #if IS_ENABLED(CONFIG_NF_NAT) struct nlattr *tb[CTA_EXPECT_NAT_MAX+1]; struct nf_conntrack_tuple nat_tuple = {}; int err; err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy, NULL); if (err < 0) return err; if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE]) return -EINVAL; err = ctnetlink_parse_tuple((const struct nlattr * const *)tb, &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3, NULL); if (err < 0) return err; exp->saved_addr = nat_tuple.src.u3; exp->saved_proto = nat_tuple.src.u; exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR])); return 0; #else return -EOPNOTSUPP; #endif } static struct nf_conntrack_expect * ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, struct nf_conntrack_helper *helper, struct nf_conntrack_tuple *tuple, struct nf_conntrack_tuple *mask) { u_int32_t class = 0; struct nf_conntrack_expect *exp; struct nf_conn_help *help; int err; help = nfct_help(ct); if (!help) return ERR_PTR(-EOPNOTSUPP); if (cda[CTA_EXPECT_CLASS] && helper) { class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); if (class > helper->expect_class_max) return ERR_PTR(-EINVAL); } exp = nf_ct_expect_alloc(ct); if (!exp) return ERR_PTR(-ENOMEM); if (cda[CTA_EXPECT_FLAGS]) { exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); exp->flags &= ~NF_CT_EXPECT_USERSPACE; } else { exp->flags = 0; } if (cda[CTA_EXPECT_FN]) { const char *name = nla_data(cda[CTA_EXPECT_FN]); struct nf_ct_helper_expectfn *expfn; expfn = nf_ct_helper_expectfn_find_by_name(name); if (expfn == NULL) { err = -EINVAL; goto err_out; } exp->expectfn = expfn->expectfn; } else exp->expectfn = NULL; exp->class = class; exp->master = ct; exp->helper = helper; exp->tuple = *tuple; exp->mask.src.u3 = mask->src.u3; exp->mask.src.u.all = mask->src.u.all; if (cda[CTA_EXPECT_NAT]) { err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], exp, nf_ct_l3num(ct)); if (err < 0) goto err_out; } return exp; err_out: nf_ct_expect_put(exp); return ERR_PTR(err); } static int ctnetlink_create_expect(struct net *net, const struct nf_conntrack_zone *zone, const struct nlattr * const cda[], u_int8_t u3, u32 portid, int report) { struct nf_conntrack_tuple tuple, mask, master_tuple; struct nf_conntrack_tuple_hash *h = NULL; struct nf_conntrack_helper *helper = NULL; struct nf_conntrack_expect *exp; struct nf_conn *ct; int err; /* caller guarantees that those three CTA_EXPECT_* exist */ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3, NULL); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3, NULL); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3, NULL); if (err < 0) return err; /* Look for master conntrack of this expectation */ h = nf_conntrack_find_get(net, zone, &master_tuple); if (!h) return -ENOENT; ct = nf_ct_tuplehash_to_ctrack(h); rcu_read_lock(); if (cda[CTA_EXPECT_HELP_NAME]) { const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper == NULL) { rcu_read_unlock(); #ifdef CONFIG_MODULES if (request_module("nfct-helper-%s", helpname) < 0) { err = -EOPNOTSUPP; goto err_ct; } rcu_read_lock(); helper = __nf_conntrack_helper_find(helpname, u3, nf_ct_protonum(ct)); if (helper) { err = -EAGAIN; goto err_rcu; } rcu_read_unlock(); #endif err = -EOPNOTSUPP; goto err_ct; } } exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); if (IS_ERR(exp)) { err = PTR_ERR(exp); goto err_rcu; } err = nf_ct_expect_related_report(exp, portid, report, 0); nf_ct_expect_put(exp); err_rcu: rcu_read_unlock(); err_ct: nf_ct_put(ct); return err; } static int ctnetlink_new_expect(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { u_int8_t u3 = info->nfmsg->nfgen_family; struct nf_conntrack_tuple tuple; struct nf_conntrack_expect *exp; struct nf_conntrack_zone zone; int err; if (!cda[CTA_EXPECT_TUPLE] || !cda[CTA_EXPECT_MASK] || !cda[CTA_EXPECT_MASTER]) return -EINVAL; err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); if (err < 0) return err; err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3, NULL); if (err < 0) return err; spin_lock_bh(&nf_conntrack_expect_lock); exp = __nf_ct_expect_find(info->net, &zone, &tuple); if (!exp) { spin_unlock_bh(&nf_conntrack_expect_lock); err = -ENOENT; if (info->nlh->nlmsg_flags & NLM_F_CREATE) { err = ctnetlink_create_expect(info->net, &zone, cda, u3, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); } return err; } err = -EEXIST; if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) err = ctnetlink_change_expect(exp, cda); spin_unlock_bh(&nf_conntrack_expect_lock); return err; } static int ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, const struct ip_conntrack_stat *st) { struct nlmsghdr *nlh; unsigned int flags = portid ? NLM_F_MULTI : 0, event; event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_EXP_GET_STATS_CPU); nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, NFNETLINK_V0, htons(cpu)); if (!nlh) goto nlmsg_failure; if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) || nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) || nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete))) goto nla_put_failure; nlmsg_end(skb, nlh); return skb->len; nla_put_failure: nlmsg_failure: nlmsg_cancel(skb, nlh); return -1; } static int ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) { int cpu; struct net *net = sock_net(skb->sk); if (cb->args[0] == nr_cpu_ids) return 0; for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { const struct ip_conntrack_stat *st; if (!cpu_possible(cpu)) continue; st = per_cpu_ptr(net->ct.stat, cpu); if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cpu, st) < 0) break; } cb->args[0] = cpu; return skb->len; } static int ctnetlink_stat_exp_cpu(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const cda[]) { if (info->nlh->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = ctnetlink_exp_stat_cpu_dump, }; return netlink_dump_start(info->sk, skb, info->nlh, &c); } return 0; } #ifdef CONFIG_NF_CONNTRACK_EVENTS static struct nf_ct_event_notifier ctnl_notifier = { .ct_event = ctnetlink_conntrack_event, .exp_event = ctnetlink_expect_event, }; #endif static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack, .type = NFNL_CB_MUTEX, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack, .type = NFNL_CB_MUTEX, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack, .type = NFNL_CB_MUTEX, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack, .type = NFNL_CB_MUTEX, .attr_count = CTA_MAX, .policy = ct_nla_policy }, [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu, .type = NFNL_CB_MUTEX, }, [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct, .type = NFNL_CB_MUTEX, }, [IPCTNL_MSG_CT_GET_DYING] = { .call = ctnetlink_get_ct_dying, .type = NFNL_CB_MUTEX, }, [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { .call = ctnetlink_get_ct_unconfirmed, .type = NFNL_CB_MUTEX, }, }; static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect, .type = NFNL_CB_MUTEX, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect, .type = NFNL_CB_MUTEX, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect, .type = NFNL_CB_MUTEX, .attr_count = CTA_EXPECT_MAX, .policy = exp_nla_policy }, [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu, .type = NFNL_CB_MUTEX, }, }; static const struct nfnetlink_subsystem ctnl_subsys = { .name = "conntrack", .subsys_id = NFNL_SUBSYS_CTNETLINK, .cb_count = IPCTNL_MSG_MAX, .cb = ctnl_cb, }; static const struct nfnetlink_subsystem ctnl_exp_subsys = { .name = "conntrack_expect", .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, .cb_count = IPCTNL_MSG_EXP_MAX, .cb = ctnl_exp_cb, }; MODULE_ALIAS("ip_conntrack_netlink"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); static int __net_init ctnetlink_net_init(struct net *net) { #ifdef CONFIG_NF_CONNTRACK_EVENTS nf_conntrack_register_notifier(net, &ctnl_notifier); #endif return 0; } static void ctnetlink_net_pre_exit(struct net *net) { #ifdef CONFIG_NF_CONNTRACK_EVENTS nf_conntrack_unregister_notifier(net); #endif } static struct pernet_operations ctnetlink_net_ops = { .init = ctnetlink_net_init, .pre_exit = ctnetlink_net_pre_exit, }; static int __init ctnetlink_init(void) { int ret; NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx); ret = nfnetlink_subsys_register(&ctnl_subsys); if (ret < 0) { pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); goto err_out; } ret = nfnetlink_subsys_register(&ctnl_exp_subsys); if (ret < 0) { pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); goto err_unreg_subsys; } ret = register_pernet_subsys(&ctnetlink_net_ops); if (ret < 0) { pr_err("ctnetlink_init: cannot register pernet operations\n"); goto err_unreg_exp_subsys; } #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT /* setup interaction between nf_queue and nf_conntrack_netlink. */ RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook); #endif return 0; err_unreg_exp_subsys: nfnetlink_subsys_unregister(&ctnl_exp_subsys); err_unreg_subsys: nfnetlink_subsys_unregister(&ctnl_subsys); err_out: return ret; } static void __exit ctnetlink_exit(void) { unregister_pernet_subsys(&ctnetlink_net_ops); nfnetlink_subsys_unregister(&ctnl_exp_subsys); nfnetlink_subsys_unregister(&ctnl_subsys); #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT RCU_INIT_POINTER(nfnl_ct_hook, NULL); #endif synchronize_rcu(); } module_init(ctnetlink_init); module_exit(ctnetlink_exit);
6 6 155 114 3093 119 3197 3193 141 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef RQ_QOS_H #define RQ_QOS_H #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blk_types.h> #include <linux/atomic.h> #include <linux/wait.h> #include <linux/blk-mq.h> #include "blk-mq-debugfs.h" struct blk_mq_debugfs_attr; enum rq_qos_id { RQ_QOS_WBT, RQ_QOS_LATENCY, RQ_QOS_COST, }; struct rq_wait { wait_queue_head_t wait; atomic_t inflight; }; struct rq_qos { const struct rq_qos_ops *ops; struct gendisk *disk; enum rq_qos_id id; struct rq_qos *next; #ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; #endif }; struct rq_qos_ops { void (*throttle)(struct rq_qos *, struct bio *); void (*track)(struct rq_qos *, struct request *, struct bio *); void (*merge)(struct rq_qos *, struct request *, struct bio *); void (*issue)(struct rq_qos *, struct request *); void (*requeue)(struct rq_qos *, struct request *); void (*done)(struct rq_qos *, struct request *); void (*done_bio)(struct rq_qos *, struct bio *); void (*cleanup)(struct rq_qos *, struct bio *); void (*queue_depth_changed)(struct rq_qos *); void (*exit)(struct rq_qos *); const struct blk_mq_debugfs_attr *debugfs_attrs; }; struct rq_depth { unsigned int max_depth; int scale_step; bool scaled_max; unsigned int queue_depth; unsigned int default_depth; }; static inline struct rq_qos *rq_qos_id(struct request_queue *q, enum rq_qos_id id) { struct rq_qos *rqos; for (rqos = q->rq_qos; rqos; rqos = rqos->next) { if (rqos->id == id) break; } return rqos; } static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) { return rq_qos_id(q, RQ_QOS_WBT); } static inline struct rq_qos *iolat_rq_qos(struct request_queue *q) { return rq_qos_id(q, RQ_QOS_LATENCY); } static inline void rq_wait_init(struct rq_wait *rq_wait) { atomic_set(&rq_wait->inflight, 0); init_waitqueue_head(&rq_wait->wait); } int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id, const struct rq_qos_ops *ops); void rq_qos_del(struct rq_qos *rqos); typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data); typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data); void rq_qos_wait(struct rq_wait *rqw, void *private_data, acquire_inflight_cb_t *acquire_inflight_cb, cleanup_cb_t *cleanup_cb); bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit); bool rq_depth_scale_up(struct rq_depth *rqd); bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle); bool rq_depth_calc_max_depth(struct rq_depth *rqd); void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); void __rq_qos_done(struct rq_qos *rqos, struct request *rq); void __rq_qos_issue(struct rq_qos *rqos, struct request *rq); void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq); void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); void __rq_qos_queue_depth_changed(struct rq_qos *rqos); static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) { if (q->rq_qos) __rq_qos_cleanup(q->rq_qos, bio); } static inline void rq_qos_done(struct request_queue *q, struct request *rq) { if (q->rq_qos && !blk_rq_is_passthrough(rq)) __rq_qos_done(q->rq_qos, rq); } static inline void rq_qos_issue(struct request_queue *q, struct request *rq) { if (q->rq_qos) __rq_qos_issue(q->rq_qos, rq); } static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) { if (q->rq_qos) __rq_qos_requeue(q->rq_qos, rq); } static inline void rq_qos_done_bio(struct bio *bio) { if (bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) || bio_flagged(bio, BIO_QOS_MERGED))) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); if (q->rq_qos) __rq_qos_done_bio(q->rq_qos, bio); } } static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) { if (q->rq_qos) { bio_set_flag(bio, BIO_QOS_THROTTLED); __rq_qos_throttle(q->rq_qos, bio); } } static inline void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio) { if (q->rq_qos) __rq_qos_track(q->rq_qos, rq, bio); } static inline void rq_qos_merge(struct request_queue *q, struct request *rq, struct bio *bio) { if (q->rq_qos) { bio_set_flag(bio, BIO_QOS_MERGED); __rq_qos_merge(q->rq_qos, rq, bio); } } static inline void rq_qos_queue_depth_changed(struct request_queue *q) { if (q->rq_qos) __rq_qos_queue_depth_changed(q->rq_qos); } void rq_qos_exit(struct request_queue *); #endif
10 3 7 3 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/module.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_socket.h> #include <net/inet_sock.h> #include <net/tcp.h> struct nft_socket { enum nft_socket_keys key:8; u8 level; u8 len; union { u8 dreg; }; }; static void nft_socket_wildcard(const struct nft_pktinfo *pkt, struct nft_regs *regs, struct sock *sk, u32 *dest) { switch (nft_pf(pkt)) { case NFPROTO_IPV4: nft_reg_store8(dest, inet_sk(sk)->inet_rcv_saddr == 0); break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: nft_reg_store8(dest, ipv6_addr_any(&sk->sk_v6_rcv_saddr)); break; #endif default: regs->verdict.code = NFT_BREAK; return; } } #ifdef CONFIG_SOCK_CGROUP_DATA static noinline bool nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level) { struct cgroup *cgrp; u64 cgid; if (!sk_fullsock(sk)) return false; cgrp = cgroup_ancestor(sock_cgroup_ptr(&sk->sk_cgrp_data), level); if (!cgrp) return false; cgid = cgroup_id(cgrp); memcpy(dest, &cgid, sizeof(u64)); return true; } #endif static struct sock *nft_socket_do_lookup(const struct nft_pktinfo *pkt) { const struct net_device *indev = nft_in(pkt); const struct sk_buff *skb = pkt->skb; struct sock *sk = NULL; if (!indev) return NULL; switch (nft_pf(pkt)) { case NFPROTO_IPV4: sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, indev); break; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, indev); break; #endif default: WARN_ON_ONCE(1); break; } return sk; } static void nft_socket_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_socket *priv = nft_expr_priv(expr); struct sk_buff *skb = pkt->skb; struct sock *sk = skb->sk; u32 *dest = &regs->data[priv->dreg]; if (sk && !net_eq(nft_net(pkt), sock_net(sk))) sk = NULL; if (!sk) sk = nft_socket_do_lookup(pkt); if (!sk) { regs->verdict.code = NFT_BREAK; return; } switch(priv->key) { case NFT_SOCKET_TRANSPARENT: nft_reg_store8(dest, inet_sk_transparent(sk)); break; case NFT_SOCKET_MARK: if (sk_fullsock(sk)) { *dest = READ_ONCE(sk->sk_mark); } else { regs->verdict.code = NFT_BREAK; return; } break; case NFT_SOCKET_WILDCARD: if (!sk_fullsock(sk)) { regs->verdict.code = NFT_BREAK; return; } nft_socket_wildcard(pkt, regs, sk, dest); break; #ifdef CONFIG_SOCK_CGROUP_DATA case NFT_SOCKET_CGROUPV2: if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) { regs->verdict.code = NFT_BREAK; return; } break; #endif default: WARN_ON(1); regs->verdict.code = NFT_BREAK; } if (sk != skb->sk) sock_gen_put(sk); } static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = { [NFTA_SOCKET_KEY] = NLA_POLICY_MAX(NLA_BE32, 255), [NFTA_SOCKET_DREG] = { .type = NLA_U32 }, [NFTA_SOCKET_LEVEL] = NLA_POLICY_MAX(NLA_BE32, 255), }; static int nft_socket_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_socket *priv = nft_expr_priv(expr); unsigned int len; if (!tb[NFTA_SOCKET_DREG] || !tb[NFTA_SOCKET_KEY]) return -EINVAL; switch(ctx->family) { case NFPROTO_IPV4: #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case NFPROTO_IPV6: #endif case NFPROTO_INET: break; default: return -EOPNOTSUPP; } priv->key = ntohl(nla_get_be32(tb[NFTA_SOCKET_KEY])); switch(priv->key) { case NFT_SOCKET_TRANSPARENT: case NFT_SOCKET_WILDCARD: len = sizeof(u8); break; case NFT_SOCKET_MARK: len = sizeof(u32); break; #ifdef CONFIG_CGROUPS case NFT_SOCKET_CGROUPV2: { unsigned int level; if (!tb[NFTA_SOCKET_LEVEL]) return -EINVAL; level = ntohl(nla_get_be32(tb[NFTA_SOCKET_LEVEL])); if (level > 255) return -EOPNOTSUPP; priv->level = level; len = sizeof(u64); break; } #endif default: return -EOPNOTSUPP; } priv->len = len; return nft_parse_register_store(ctx, tb[NFTA_SOCKET_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, len); } static int nft_socket_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_socket *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_SOCKET_KEY, htonl(priv->key))) return -1; if (nft_dump_register(skb, NFTA_SOCKET_DREG, priv->dreg)) return -1; if (priv->key == NFT_SOCKET_CGROUPV2 && nla_put_be32(skb, NFTA_SOCKET_LEVEL, htonl(priv->level))) return -1; return 0; } static bool nft_socket_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { const struct nft_socket *priv = nft_expr_priv(expr); const struct nft_socket *socket; if (!nft_reg_track_cmp(track, expr, priv->dreg)) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } socket = nft_expr_priv(track->regs[priv->dreg].selector); if (priv->key != socket->key || priv->dreg != socket->dreg || priv->level != socket->level) { nft_reg_track_update(track, expr, priv->dreg, priv->len); return false; } if (!track->regs[priv->dreg].bitwise) return true; return nft_expr_reduce_bitwise(track, expr); } static int nft_socket_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { if (ctx->family != NFPROTO_IPV4 && ctx->family != NFPROTO_IPV6 && ctx->family != NFPROTO_INET) return -EOPNOTSUPP; return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_LOCAL_OUT)); } static struct nft_expr_type nft_socket_type; static const struct nft_expr_ops nft_socket_ops = { .type = &nft_socket_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_socket)), .eval = nft_socket_eval, .init = nft_socket_init, .dump = nft_socket_dump, .validate = nft_socket_validate, .reduce = nft_socket_reduce, }; static struct nft_expr_type nft_socket_type __read_mostly = { .name = "socket", .ops = &nft_socket_ops, .policy = nft_socket_policy, .maxattr = NFTA_SOCKET_MAX, .owner = THIS_MODULE, }; static int __init nft_socket_module_init(void) { return nft_register_expr(&nft_socket_type); } static void __exit nft_socket_module_exit(void) { nft_unregister_expr(&nft_socket_type); } module_init(nft_socket_module_init); module_exit(nft_socket_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Máté Eckl"); MODULE_DESCRIPTION("nf_tables socket match module"); MODULE_ALIAS_NFT_EXPR("socket");
6 3 4 1 3 3 3 6 1 1 5 5 1 1 2 1 2 3 5 5 5 1 1 4 3 1 3 3 3 3 2 3 7 15 15 15 9 14 5 11 5 11 4 1 11 15 5 2 17 17 10 10 4 6 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ #include <linux/skmsg.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/util_macros.h> #include <net/inet_common.h> #include <net/tls.h> void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tcp; int copied; if (!skb || !skb->len || !sk_is_tcp(sk)) return; if (skb_bpf_strparser(skb)) return; tcp = tcp_sk(sk); copied = tcp->copied_seq + skb->len; WRITE_ONCE(tcp->copied_seq, copied); tcp_rcv_space_adjust(sk); __tcp_cleanup_rbuf(sk, skb->len); } static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, u32 apply_bytes, int flags) { bool apply = apply_bytes; struct scatterlist *sge; u32 size, copied = 0; struct sk_msg *tmp; int i, ret = 0; tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); if (unlikely(!tmp)) return -ENOMEM; lock_sock(sk); tmp->sg.start = msg->sg.start; i = msg->sg.start; do { sge = sk_msg_elem(msg, i); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; if (!sk_wmem_schedule(sk, size)) { if (!copied) ret = -ENOMEM; break; } sk_mem_charge(sk, size); sk_msg_xfer(tmp, msg, i, size); copied += size; if (sge->length) get_page(sk_msg_page(tmp, i)); sk_msg_iter_var_next(i); tmp->sg.end = i; if (apply) { apply_bytes -= size; if (!apply_bytes) { if (sge->length) sk_msg_iter_var_prev(i); break; } } } while (i != msg->sg.end); if (!ret) { msg->sg.start = i; sk_psock_queue_msg(psock, tmp); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); kfree(tmp); } release_sock(sk); return ret; } static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, int flags, bool uncharge) { struct msghdr msghdr = {}; bool apply = apply_bytes; struct scatterlist *sge; struct page *page; int size, ret = 0; u32 off; while (1) { struct bio_vec bvec; bool has_tx_ulp; sge = sk_msg_elem(msg, msg->sg.start); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; off = sge->offset; page = sg_page(sge); tcp_rate_check_app_limited(sk); retry: msghdr.msg_flags = flags | MSG_SPLICE_PAGES; has_tx_ulp = tls_sw_has_ctx_tx(sk); if (has_tx_ulp) msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; if (size < sge->length && msg->sg.start != msg->sg.end) msghdr.msg_flags |= MSG_MORE; bvec_set_page(&bvec, page, size, off); iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); ret = tcp_sendmsg_locked(sk, &msghdr, size); if (ret <= 0) return ret; if (apply) apply_bytes -= ret; msg->sg.size -= ret; sge->offset += ret; sge->length -= ret; if (uncharge) sk_mem_uncharge(sk, ret); if (ret != size) { size -= ret; off += ret; goto retry; } if (!sge->length) { put_page(page); sk_msg_iter_next(msg, start); sg_init_table(sge, 1); if (msg->sg.start == msg->sg.end) break; } if (apply && !apply_bytes) break; } return 0; } static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, int flags, bool uncharge) { int ret; lock_sock(sk); ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); release_sock(sk); return ret; } int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, struct sk_msg *msg, u32 bytes, int flags) { struct sk_psock *psock = sk_psock_get(sk); int ret; if (unlikely(!psock)) return -EPIPE; ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : tcp_bpf_push_locked(sk, msg, bytes, flags, false); sk_psock_put(sk, psock); return ret; } EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); #ifdef CONFIG_BPF_SYSCALL static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int ret = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) return 1; if (!timeo) return ret; add_wait_queue(sk_sleep(sk), &wait); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = sk_wait_event(sk, &timeo, !list_empty(&psock->ingress_msg) || !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; } static bool is_next_msg_fin(struct sk_psock *psock) { struct scatterlist *sge; struct sk_msg *msg_rx; int i; msg_rx = sk_psock_peek_msg(psock); i = msg_rx->sg.start; sge = sk_msg_elem(msg_rx, i); if (!sge->length) { struct sk_buff *skb = msg_rx->skb; if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) return true; } return false; } static int tcp_bpf_recvmsg_parser(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct tcp_sock *tcp = tcp_sk(sk); int peek = flags & MSG_PEEK; u32 seq = tcp->copied_seq; struct sk_psock *psock; int copied = 0; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); /* We may have received data on the sk_receive_queue pre-accept and * then we can not use read_skb in this context because we haven't * assigned a sk_socket yet so have no link to the ops. The work-around * is to check the sk_receive_queue and in these cases read skbs off * queue again. The read_skb hook is not running at this point because * of lock_sock so we avoid having multiple runners in read_skb. */ if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { tcp_data_ready(sk); /* This handles the ENOMEM errors if we both receive data * pre accept and are already under memory pressure. At least * let user know to retry. */ if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { copied = -EAGAIN; goto out; } } msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); /* The typical case for EFAULT is the socket was gracefully * shutdown with a FIN pkt. So check here the other case is * some error on copy_page_to_iter which would be unexpected. * On fin return correct return code to zero. */ if (copied == -EFAULT) { bool is_fin = is_next_msg_fin(psock); if (is_fin) { copied = 0; seq++; goto out; } } seq += copied; if (!copied) { long timeo; int data; if (sock_flag(sk, SOCK_DONE)) goto out; if (sk->sk_err) { copied = sock_error(sk); goto out; } if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; goto out; } timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); if (!timeo) { copied = -EAGAIN; goto out; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); goto out; } data = tcp_msg_wait_data(sk, psock, timeo); if (data < 0) { copied = data; goto unlock; } if (data && !sk_psock_queue_empty(psock)) goto msg_bytes_ready; copied = -EAGAIN; } out: if (!peek) WRITE_ONCE(tcp->copied_seq, seq); tcp_rcv_space_adjust(sk); if (copied > 0) __tcp_cleanup_rbuf(sk, copied); unlock: release_sock(sk); sk_psock_put(sk, psock); return copied; } static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); if (!skb_queue_empty(&sk->sk_receive_queue) && sk_psock_queue_empty(psock)) { sk_psock_put(sk, psock); return tcp_recvmsg(sk, msg, len, flags, addr_len); } lock_sock(sk); msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); if (!copied) { long timeo; int data; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = tcp_msg_wait_data(sk, psock, timeo); if (data < 0) { ret = data; goto unlock; } if (data) { if (!sk_psock_queue_empty(psock)) goto msg_bytes_ready; release_sock(sk); sk_psock_put(sk, psock); return tcp_recvmsg(sk, msg, len, flags, addr_len); } copied = -EAGAIN; } ret = copied; unlock: release_sock(sk); sk_psock_put(sk, psock); return ret; } static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, int *copied, int flags) { bool cork = false, enospc = sk_msg_full(msg), redir_ingress; struct sock *sk_redir; u32 tosend, origsize, sent, delta = 0; u32 eval; int ret; more_data: if (psock->eval == __SK_NONE) { /* Track delta in msg size to add/subtract it on SK_DROP from * returned to user copied size. This ensures user doesn't * get a positive return code with msg_cut_data and SK_DROP * verdict. */ delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); delta -= msg->sg.size; } if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && !enospc) { psock->cork_bytes = msg->cork_bytes - msg->sg.size; if (!psock->cork) { psock->cork = kzalloc(sizeof(*psock->cork), GFP_ATOMIC | __GFP_NOWARN); if (!psock->cork) return -ENOMEM; } memcpy(psock->cork, msg, sizeof(*msg)); return 0; } tosend = msg->sg.size; if (psock->apply_bytes && psock->apply_bytes < tosend) tosend = psock->apply_bytes; eval = __SK_NONE; switch (psock->eval) { case __SK_PASS: ret = tcp_bpf_push(sk, msg, tosend, flags, true); if (unlikely(ret)) { *copied -= sk_msg_free(sk, msg); break; } sk_msg_apply_bytes(psock, tosend); break; case __SK_REDIRECT: redir_ingress = psock->redir_ingress; sk_redir = psock->sk_redir; sk_msg_apply_bytes(psock, tosend); if (!psock->apply_bytes) { /* Clean up before releasing the sock lock. */ eval = psock->eval; psock->eval = __SK_NONE; psock->sk_redir = NULL; } if (psock->cork) { cork = true; psock->cork = NULL; } sk_msg_return(sk, msg, tosend); release_sock(sk); origsize = msg->sg.size; ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, msg, tosend, flags); sent = origsize - msg->sg.size; if (eval == __SK_REDIRECT) sock_put(sk_redir); lock_sock(sk); if (unlikely(ret < 0)) { int free = sk_msg_free_nocharge(sk, msg); if (!cork) *copied -= free; } if (cork) { sk_msg_free(sk, msg); kfree(msg); msg = NULL; ret = 0; } break; case __SK_DROP: default: sk_msg_free_partial(sk, msg, tosend); sk_msg_apply_bytes(psock, tosend); *copied -= (tosend + delta); return -EACCES; } if (likely(!ret)) { if (!psock->apply_bytes) { psock->eval = __SK_NONE; if (psock->sk_redir) { sock_put(psock->sk_redir); psock->sk_redir = NULL; } } if (msg && msg->sg.data[msg->sg.start].page_link && msg->sg.data[msg->sg.start].length) { if (eval == __SK_REDIRECT) sk_mem_charge(sk, tosend - sent); goto more_data; } } return ret; } static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_msg tmp, *msg_tx = NULL; int copied = 0, err = 0; struct sk_psock *psock; long timeo; int flags; /* Don't let internal flags through */ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); flags |= MSG_NO_SHARED_FRAGS; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_sendmsg(sk, msg, size); lock_sock(sk); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); while (msg_data_left(msg)) { bool enospc = false; u32 copy, osize; if (sk->sk_err) { err = -sk->sk_err; goto out_err; } copy = msg_data_left(msg); if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; if (psock->cork) { msg_tx = psock->cork; } else { msg_tx = &tmp; sk_msg_init(msg_tx); } osize = msg_tx->sg.size; err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); if (err) { if (err != -ENOSPC) goto wait_for_memory; enospc = true; copy = msg_tx->sg.size - osize; } err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, copy); if (err < 0) { sk_msg_trim(sk, msg_tx, osize); goto out_err; } copied += copy; if (psock->cork_bytes) { if (size > psock->cork_bytes) psock->cork_bytes = 0; else psock->cork_bytes -= size; if (psock->cork_bytes && !enospc) goto out_err; /* All cork bytes are accounted, rerun the prog. */ psock->eval = __SK_NONE; psock->cork_bytes = 0; } err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); if (unlikely(err < 0)) goto out_err; continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: err = sk_stream_wait_memory(sk, &timeo); if (err) { if (msg_tx && msg_tx != psock->cork) sk_msg_free(sk, msg_tx); goto out_err; } } out_err: if (err < 0) err = sk_stream_error(sk, msg->msg_flags, err); release_sock(sk); sk_psock_put(sk, psock); return copied ? copied : err; } enum { TCP_BPF_IPV4, TCP_BPF_IPV6, TCP_BPF_NUM_PROTS, }; enum { TCP_BPF_BASE, TCP_BPF_TX, TCP_BPF_RX, TCP_BPF_TXRX, TCP_BPF_NUM_CFGS, }; static struct proto *tcpv6_prot_saved __read_mostly; static DEFINE_SPINLOCK(tcpv6_prot_lock); static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], struct proto *base) { prot[TCP_BPF_BASE] = *base; prot[TCP_BPF_BASE].destroy = sock_map_destroy; prot[TCP_BPF_BASE].close = sock_map_close; prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; } static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) { if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { spin_lock_bh(&tcpv6_prot_lock); if (likely(ops != tcpv6_prot_saved)) { tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); smp_store_release(&tcpv6_prot_saved, ops); } spin_unlock_bh(&tcpv6_prot_lock); } } static int __init tcp_bpf_v4_build_proto(void) { tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); return 0; } late_initcall(tcp_bpf_v4_build_proto); static int tcp_bpf_assert_proto_ops(struct proto *ops) { /* In order to avoid retpoline, we make assumptions when we call * into ops if e.g. a psock is not present. Make sure they are * indeed valid assumptions. */ return ops->recvmsg == tcp_recvmsg && ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; } int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; if (psock->progs.stream_verdict || psock->progs.skb_verdict) { config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; } if (restore) { if (inet_csk_has_ulp(sk)) { /* TLS does not have an unhash proto in SW cases, * but we need to ensure we stop using the sock_map * unhash routine because the associated psock is being * removed. So use the original unhash handler. */ WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); } else { sk->sk_write_space = psock->saved_write_space; /* Pairs with lockless read in sk_clone_lock() */ sock_replace_proto(sk, psock->sk_proto); } return 0; } if (sk->sk_family == AF_INET6) { if (tcp_bpf_assert_proto_ops(psock->sk_proto)) return -EINVAL; tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); } /* Pairs with lockless read in sk_clone_lock() */ sock_replace_proto(sk, &tcp_bpf_prots[family][config]); return 0; } EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); /* If a child got cloned from a listening socket that had tcp_bpf * protocol callbacks installed, we need to restore the callbacks to * the default ones because the child does not inherit the psock state * that tcp_bpf callbacks expect. */ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) { struct proto *prot = newsk->sk_prot; if (is_insidevar(prot, tcp_bpf_prots)) newsk->sk_prot = sk->sk_prot_creator; } #endif /* CONFIG_BPF_SYSCALL */
8 1 79 60 6 4 2 35 8 5 5 5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 // SPDX-License-Identifier: GPL-2.0 /* Multipath TCP * * Copyright (c) 2019, Tessares SA. */ #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <net/net_namespace.h> #include <net/netns/generic.h> #include "protocol.h" #define MPTCP_SYSCTL_PATH "net/mptcp" static int mptcp_pernet_id; #ifdef CONFIG_SYSCTL static int mptcp_pm_type_max = __MPTCP_PM_TYPE_MAX; #endif struct mptcp_pernet { #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_hdr; #endif unsigned int add_addr_timeout; unsigned int close_timeout; unsigned int stale_loss_cnt; u8 mptcp_enabled; u8 checksum_enabled; u8 allow_join_initial_addr_port; u8 pm_type; char scheduler[MPTCP_SCHED_NAME_MAX]; }; static struct mptcp_pernet *mptcp_get_pernet(const struct net *net) { return net_generic(net, mptcp_pernet_id); } int mptcp_is_enabled(const struct net *net) { return mptcp_get_pernet(net)->mptcp_enabled; } unsigned int mptcp_get_add_addr_timeout(const struct net *net) { return mptcp_get_pernet(net)->add_addr_timeout; } int mptcp_is_checksum_enabled(const struct net *net) { return mptcp_get_pernet(net)->checksum_enabled; } int mptcp_allow_join_id0(const struct net *net) { return mptcp_get_pernet(net)->allow_join_initial_addr_port; } unsigned int mptcp_stale_loss_cnt(const struct net *net) { return mptcp_get_pernet(net)->stale_loss_cnt; } unsigned int mptcp_close_timeout(const struct sock *sk) { if (sock_flag(sk, SOCK_DEAD)) return TCP_TIMEWAIT_LEN; return mptcp_get_pernet(sock_net(sk))->close_timeout; } int mptcp_get_pm_type(const struct net *net) { return mptcp_get_pernet(net)->pm_type; } const char *mptcp_get_scheduler(const struct net *net) { return mptcp_get_pernet(net)->scheduler; } static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet) { pernet->mptcp_enabled = 1; pernet->add_addr_timeout = TCP_RTO_MAX; pernet->close_timeout = TCP_TIMEWAIT_LEN; pernet->checksum_enabled = 0; pernet->allow_join_initial_addr_port = 1; pernet->stale_loss_cnt = 4; pernet->pm_type = MPTCP_PM_TYPE_KERNEL; strcpy(pernet->scheduler, "default"); } #ifdef CONFIG_SYSCTL static struct ctl_table mptcp_sysctl_table[] = { { .procname = "enabled", .maxlen = sizeof(u8), .mode = 0644, /* users with CAP_NET_ADMIN or root (not and) can change this * value, same as other sysctl or the 'net' tree. */ .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE }, { .procname = "add_addr_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "checksum_enabled", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE }, { .procname = "allow_join_initial_addr_port", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE }, { .procname = "stale_loss_cnt", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_douintvec_minmax, }, { .procname = "pm_type", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = &mptcp_pm_type_max }, { .procname = "scheduler", .maxlen = MPTCP_SCHED_NAME_MAX, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "close_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, {} }; static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) { struct ctl_table_header *hdr; struct ctl_table *table; table = mptcp_sysctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(mptcp_sysctl_table), GFP_KERNEL); if (!table) goto err_alloc; } table[0].data = &pernet->mptcp_enabled; table[1].data = &pernet->add_addr_timeout; table[2].data = &pernet->checksum_enabled; table[3].data = &pernet->allow_join_initial_addr_port; table[4].data = &pernet->stale_loss_cnt; table[5].data = &pernet->pm_type; table[6].data = &pernet->scheduler; table[7].data = &pernet->close_timeout; hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table, ARRAY_SIZE(mptcp_sysctl_table)); if (!hdr) goto err_reg; pernet->ctl_table_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) { struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg; unregister_net_sysctl_table(pernet->ctl_table_hdr); kfree(table); } #else static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) { return 0; } static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {} #endif /* CONFIG_SYSCTL */ static int __net_init mptcp_net_init(struct net *net) { struct mptcp_pernet *pernet = mptcp_get_pernet(net); mptcp_pernet_set_defaults(pernet); return mptcp_pernet_new_table(net, pernet); } /* Note: the callback will only be called per extra netns */ static void __net_exit mptcp_net_exit(struct net *net) { struct mptcp_pernet *pernet = mptcp_get_pernet(net); mptcp_pernet_del_table(pernet); } static struct pernet_operations mptcp_pernet_ops = { .init = mptcp_net_init, .exit = mptcp_net_exit, .id = &mptcp_pernet_id, .size = sizeof(struct mptcp_pernet), }; void __init mptcp_init(void) { mptcp_join_cookie_init(); mptcp_proto_init(); if (register_pernet_subsys(&mptcp_pernet_ops) < 0) panic("Failed to register MPTCP pernet subsystem.\n"); } #if IS_ENABLED(CONFIG_MPTCP_IPV6) int __init mptcpv6_init(void) { int err; err = mptcp_proto_v6_init(); return err; } #endif
1 2 2 3 1 5 2 3 3 3 2 3 2 2 1 1 1 1 3 3 1 2 2 4 3 2 2 6 2 4 4 4 4 4 4 4 4 4 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 // SPDX-License-Identifier: GPL-2.0-or-later /* net/sched/sch_ingress.c - Ingress and clsact qdisc * * Authors: Jamal Hadi Salim 1999 */ #include <linux/module.h> #include <linux/types.h> #include <linux/list.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tcx.h> struct ingress_sched_data { struct tcf_block *block; struct tcf_block_ext_info block_info; struct mini_Qdisc_pair miniqp; }; static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; } static unsigned long ingress_find(struct Qdisc *sch, u32 classid) { return TC_H_MIN(classid) + 1; } static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return ingress_find(sch, classid); } static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl) { } static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) { } static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block; } static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv) { struct mini_Qdisc_pair *miniqp = priv; mini_qdisc_pair_swap(miniqp, tp_head); }; static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct ingress_sched_data *q = qdisc_priv(sch); q->block_info.block_index = block_index; } static u32 ingress_ingress_block_get(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); return q->block_info.block_index; } static int ingress_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_INGRESS) return -EOPNOTSUPP; net_inc_ingress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_set_active(entry, true); mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->block_info.chain_head_change = clsact_chain_head_change; q->block_info.chain_head_change_priv = &q->miniqp; err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp, q->block); return 0; } static void ingress_destroy(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); if (sch->parent != TC_H_INGRESS) return; tcf_block_put_ext(q->block, sch, &q->block_info); if (entry) { tcx_miniq_set_active(entry, false); if (!tcx_entry_is_active(entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(entry); } } net_dec_ingress_queue(); } static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) { struct nlattr *nest; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; return nla_nest_end(skb, nest); nla_put_failure: nla_nest_cancel(skb, nest); return -1; } static const struct Qdisc_class_ops ingress_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = ingress_find, .walk = ingress_walk, .tcf_block = ingress_tcf_block, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { .cl_ops = &ingress_class_ops, .id = "ingress", .priv_size = sizeof(struct ingress_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = ingress_init, .destroy = ingress_destroy, .dump = ingress_dump, .ingress_block_set = ingress_ingress_block_set, .ingress_block_get = ingress_ingress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("ingress"); struct clsact_sched_data { struct tcf_block *ingress_block; struct tcf_block *egress_block; struct tcf_block_ext_info ingress_block_info; struct tcf_block_ext_info egress_block_info; struct mini_Qdisc_pair miniqp_ingress; struct mini_Qdisc_pair miniqp_egress; }; static unsigned long clsact_find(struct Qdisc *sch, u32 classid) { switch (TC_H_MIN(classid)) { case TC_H_MIN(TC_H_MIN_INGRESS): case TC_H_MIN(TC_H_MIN_EGRESS): return TC_H_MIN(classid); default: return 0; } } static unsigned long clsact_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { return clsact_find(sch, classid); } static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); switch (cl) { case TC_H_MIN(TC_H_MIN_INGRESS): return q->ingress_block; case TC_H_MIN(TC_H_MIN_EGRESS): return q->egress_block; default: return NULL; } } static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->ingress_block_info.block_index = block_index; } static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index) { struct clsact_sched_data *q = qdisc_priv(sch); q->egress_block_info.block_index = block_index; } static u32 clsact_ingress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->ingress_block_info.block_index; } static u32 clsact_egress_block_get(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); return q->egress_block_info.block_index; } static int clsact_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *entry; bool created; int err; if (sch->parent != TC_H_CLSACT) return -EOPNOTSUPP; net_inc_ingress_queue(); net_inc_egress_queue(); entry = tcx_entry_fetch_or_create(dev, true, &created); if (!entry) return -ENOMEM; tcx_miniq_set_active(entry, true); mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, true); q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q->ingress_block_info.chain_head_change = clsact_chain_head_change; q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, extack); if (err) return err; mini_qdisc_pair_block_init(&q->miniqp_ingress, q->ingress_block); entry = tcx_entry_fetch_or_create(dev, false, &created); if (!entry) return -ENOMEM; tcx_miniq_set_active(entry, true); mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); if (created) tcx_entry_update(dev, entry, false); q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; q->egress_block_info.chain_head_change = clsact_chain_head_change; q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); } static void clsact_destroy(struct Qdisc *sch) { struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); if (sch->parent != TC_H_CLSACT) return; tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); if (ingress_entry) { tcx_miniq_set_active(ingress_entry, false); if (!tcx_entry_is_active(ingress_entry)) { tcx_entry_update(dev, NULL, true); tcx_entry_free(ingress_entry); } } if (egress_entry) { tcx_miniq_set_active(egress_entry, false); if (!tcx_entry_is_active(egress_entry)) { tcx_entry_update(dev, NULL, false); tcx_entry_free(egress_entry); } } net_dec_ingress_queue(); net_dec_egress_queue(); } static const struct Qdisc_class_ops clsact_class_ops = { .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED, .leaf = ingress_leaf, .find = clsact_find, .walk = ingress_walk, .tcf_block = clsact_tcf_block, .bind_tcf = clsact_bind_filter, .unbind_tcf = ingress_unbind_filter, }; static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { .cl_ops = &clsact_class_ops, .id = "clsact", .priv_size = sizeof(struct clsact_sched_data), .static_flags = TCQ_F_INGRESS | TCQ_F_CPUSTATS, .init = clsact_init, .destroy = clsact_destroy, .dump = ingress_dump, .ingress_block_set = clsact_ingress_block_set, .egress_block_set = clsact_egress_block_set, .ingress_block_get = clsact_ingress_block_get, .egress_block_get = clsact_egress_block_get, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("clsact"); static int __init ingress_module_init(void) { int ret; ret = register_qdisc(&ingress_qdisc_ops); if (!ret) { ret = register_qdisc(&clsact_qdisc_ops); if (ret) unregister_qdisc(&ingress_qdisc_ops); } return ret; } static void __exit ingress_module_exit(void) { unregister_qdisc(&ingress_qdisc_ops); unregister_qdisc(&clsact_qdisc_ops); } module_init(ingress_module_init); module_exit(ingress_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 // SPDX-License-Identifier: ISC /* * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. * Copyright (c) 2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "coredump.h" #include <linux/devcoredump.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/utsname.h> #include "debug.h" #include "hw.h" static const struct ath10k_mem_section qca6174_hw21_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8000, 0x8004}, {0x8010, 0x8064}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80F4}, {0x8100, 0x8104}, {0x8110, 0x812C}, {0x9000, 0x9004}, {0x9800, 0x982C}, {0x9830, 0x9838}, {0x9840, 0x986C}, {0x9870, 0x9898}, {0x9A00, 0x9C00}, {0xD580, 0xD59C}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A064}, {0x40000, 0x400A4}, {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8010, 0x8060}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80ec}, {0x8110, 0x8128}, {0x9000, 0x9004}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, /* EFUSE0,1,2 is disabled here * because its state may be reset * * {0x24800, 0x24804}, * {0x25000, 0x25004}, * {0x25800, 0x25804}, */ {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A074}, /* DBI windows is skipped here, it can be only accessed when pcie * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 && * PCIE_CTRL_APP_LTSSM_ENALBE=0. * {0x3C000 , 0x3C004}, */ {0x40000, 0x400A4}, /* SI register is skipped here. * Because it will cause bus hang * * {0x50000, 0x50018}, */ {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_section qca6174_hw30_register_sections[] = { {0x800, 0x810}, {0x820, 0x82C}, {0x830, 0x8F4}, {0x90C, 0x91C}, {0xA14, 0xA18}, {0xA84, 0xA94}, {0xAA8, 0xAD4}, {0xADC, 0xB40}, {0x1000, 0x10A4}, {0x10BC, 0x111C}, {0x1134, 0x1138}, {0x1144, 0x114C}, {0x1150, 0x115C}, {0x1160, 0x1178}, {0x1240, 0x1260}, {0x2000, 0x207C}, {0x3000, 0x3014}, {0x4000, 0x4014}, {0x5000, 0x5124}, {0x6000, 0x6040}, {0x6080, 0x60CC}, {0x6100, 0x611C}, {0x6140, 0x61D8}, {0x6200, 0x6238}, {0x6240, 0x628C}, {0x62C0, 0x62EC}, {0x6380, 0x63E8}, {0x6400, 0x6440}, {0x6480, 0x64CC}, {0x6500, 0x651C}, {0x6540, 0x6580}, {0x6600, 0x6638}, {0x6640, 0x668C}, {0x66C0, 0x66EC}, {0x6780, 0x67E8}, {0x7080, 0x708C}, {0x70C0, 0x70C8}, {0x7400, 0x741C}, {0x7440, 0x7454}, {0x7800, 0x7818}, {0x8000, 0x8004}, {0x8010, 0x8064}, {0x8080, 0x8084}, {0x80A0, 0x80A4}, {0x80C0, 0x80C4}, {0x80E0, 0x80F4}, {0x8100, 0x8104}, {0x8110, 0x812C}, {0x9000, 0x9004}, {0x9800, 0x982C}, {0x9830, 0x9838}, {0x9840, 0x986C}, {0x9870, 0x9898}, {0x9A00, 0x9C00}, {0xD580, 0xD59C}, {0xF000, 0xF0E0}, {0xF140, 0xF190}, {0xF250, 0xF25C}, {0xF260, 0xF268}, {0xF26C, 0xF2A8}, {0x10008, 0x1000C}, {0x10014, 0x10018}, {0x1001C, 0x10020}, {0x10024, 0x10028}, {0x10030, 0x10034}, {0x10040, 0x10054}, {0x10058, 0x1007C}, {0x10080, 0x100C4}, {0x100C8, 0x10114}, {0x1012C, 0x10130}, {0x10138, 0x10144}, {0x10200, 0x10220}, {0x10230, 0x10250}, {0x10260, 0x10280}, {0x10290, 0x102B0}, {0x102C0, 0x102DC}, {0x102E0, 0x102F4}, {0x102FC, 0x1037C}, {0x10380, 0x10390}, {0x10800, 0x10828}, {0x10840, 0x10844}, {0x10880, 0x10884}, {0x108C0, 0x108E8}, {0x10900, 0x10928}, {0x10940, 0x10944}, {0x10980, 0x10984}, {0x109C0, 0x109E8}, {0x10A00, 0x10A28}, {0x10A40, 0x10A50}, {0x11000, 0x11028}, {0x11030, 0x11034}, {0x11038, 0x11068}, {0x11070, 0x11074}, {0x11078, 0x110A8}, {0x110B0, 0x110B4}, {0x110B8, 0x110E8}, {0x110F0, 0x110F4}, {0x110F8, 0x11128}, {0x11138, 0x11144}, {0x11178, 0x11180}, {0x111B8, 0x111C0}, {0x111F8, 0x11200}, {0x11238, 0x1123C}, {0x11270, 0x11274}, {0x11278, 0x1127C}, {0x112B0, 0x112B4}, {0x112B8, 0x112BC}, {0x112F0, 0x112F4}, {0x112F8, 0x112FC}, {0x11338, 0x1133C}, {0x11378, 0x1137C}, {0x113B8, 0x113BC}, {0x113F8, 0x113FC}, {0x11438, 0x11440}, {0x11478, 0x11480}, {0x114B8, 0x114BC}, {0x114F8, 0x114FC}, {0x11538, 0x1153C}, {0x11578, 0x1157C}, {0x115B8, 0x115BC}, {0x115F8, 0x115FC}, {0x11638, 0x1163C}, {0x11678, 0x1167C}, {0x116B8, 0x116BC}, {0x116F8, 0x116FC}, {0x11738, 0x1173C}, {0x11778, 0x1177C}, {0x117B8, 0x117BC}, {0x117F8, 0x117FC}, {0x17000, 0x1701C}, {0x17020, 0x170AC}, {0x18000, 0x18050}, {0x18054, 0x18074}, {0x18080, 0x180D4}, {0x180DC, 0x18104}, {0x18108, 0x1813C}, {0x18144, 0x18148}, {0x18168, 0x18174}, {0x18178, 0x18180}, {0x181C8, 0x181E0}, {0x181E4, 0x181E8}, {0x181EC, 0x1820C}, {0x1825C, 0x18280}, {0x18284, 0x18290}, {0x18294, 0x182A0}, {0x18300, 0x18304}, {0x18314, 0x18320}, {0x18328, 0x18350}, {0x1835C, 0x1836C}, {0x18370, 0x18390}, {0x18398, 0x183AC}, {0x183BC, 0x183D8}, {0x183DC, 0x183F4}, {0x18400, 0x186F4}, {0x186F8, 0x1871C}, {0x18720, 0x18790}, {0x19800, 0x19830}, {0x19834, 0x19840}, {0x19880, 0x1989C}, {0x198A4, 0x198B0}, {0x198BC, 0x19900}, {0x19C00, 0x19C88}, {0x19D00, 0x19D20}, {0x19E00, 0x19E7C}, {0x19E80, 0x19E94}, {0x19E98, 0x19EAC}, {0x19EB0, 0x19EBC}, {0x19F70, 0x19F74}, {0x19F80, 0x19F8C}, {0x19FA0, 0x19FB4}, {0x19FC0, 0x19FD8}, {0x1A000, 0x1A200}, {0x1A204, 0x1A210}, {0x1A228, 0x1A22C}, {0x1A230, 0x1A248}, {0x1A250, 0x1A270}, {0x1A280, 0x1A290}, {0x1A2A0, 0x1A2A4}, {0x1A2C0, 0x1A2EC}, {0x1A300, 0x1A3BC}, {0x1A3F0, 0x1A3F4}, {0x1A3F8, 0x1A434}, {0x1A438, 0x1A444}, {0x1A448, 0x1A468}, {0x1A580, 0x1A58C}, {0x1A644, 0x1A654}, {0x1A670, 0x1A698}, {0x1A6AC, 0x1A6B0}, {0x1A6D0, 0x1A6D4}, {0x1A6EC, 0x1A70C}, {0x1A710, 0x1A738}, {0x1A7C0, 0x1A7D0}, {0x1A7D4, 0x1A7D8}, {0x1A7DC, 0x1A7E4}, {0x1A7F0, 0x1A7F8}, {0x1A888, 0x1A89C}, {0x1A8A8, 0x1A8AC}, {0x1A8C0, 0x1A8DC}, {0x1A8F0, 0x1A8FC}, {0x1AE04, 0x1AE08}, {0x1AE18, 0x1AE24}, {0x1AF80, 0x1AF8C}, {0x1AFA0, 0x1AFB4}, {0x1B000, 0x1B200}, {0x1B284, 0x1B288}, {0x1B2D0, 0x1B2D8}, {0x1B2DC, 0x1B2EC}, {0x1B300, 0x1B340}, {0x1B374, 0x1B378}, {0x1B380, 0x1B384}, {0x1B388, 0x1B38C}, {0x1B404, 0x1B408}, {0x1B420, 0x1B428}, {0x1B440, 0x1B444}, {0x1B448, 0x1B44C}, {0x1B450, 0x1B458}, {0x1B45C, 0x1B468}, {0x1B584, 0x1B58C}, {0x1B68C, 0x1B690}, {0x1B6AC, 0x1B6B0}, {0x1B7F0, 0x1B7F8}, {0x1C800, 0x1CC00}, {0x1CE00, 0x1CE04}, {0x1CF80, 0x1CF84}, {0x1D200, 0x1D800}, {0x1E000, 0x20014}, {0x20100, 0x20124}, {0x21400, 0x217A8}, {0x21800, 0x21BA8}, {0x21C00, 0x21FA8}, {0x22000, 0x223A8}, {0x22400, 0x227A8}, {0x22800, 0x22BA8}, {0x22C00, 0x22FA8}, {0x23000, 0x233A8}, {0x24000, 0x24034}, {0x26000, 0x26064}, {0x27000, 0x27024}, {0x34000, 0x3400C}, {0x34400, 0x3445C}, {0x34800, 0x3485C}, {0x34C00, 0x34C5C}, {0x35000, 0x3505C}, {0x35400, 0x3545C}, {0x35800, 0x3585C}, {0x35C00, 0x35C5C}, {0x36000, 0x3605C}, {0x38000, 0x38064}, {0x38070, 0x380E0}, {0x3A000, 0x3A074}, {0x40000, 0x400A4}, {0x80000, 0x8000C}, {0x80010, 0x80020}, }; static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x70000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, /* RTC_SOC_BASE_ADDRESS */ .start = 0x0, /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */ .len = 0x800 - 0x0, .name = "REG_PART1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, /* STEREO_BASE_ADDRESS */ .start = 0x27000, /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */ .len = 0x60000 - 0x27000, .name = "REG_PART2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x70000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw21_register_sections, .size = ARRAY_SIZE(qca6174_hw21_register_sections), }, }, }; static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0xa8000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM1, .start = 0x00980000, .len = 0x00080000, .name = "IRAM1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM2, .start = 0x00a00000, .len = 0x00040000, .name = "IRAM2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw30_sdio_register_sections, .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections), }, }, }; static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0xa8000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_AXI, .start = 0xa0000, .len = 0x18000, .name = "AXI", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x800, .len = 0x80020 - 0x800, .name = "REG_TOTAL", .section_table = { .sections = qca6174_hw30_register_sections, .size = ARRAY_SIZE(qca6174_hw30_register_sections), }, }, /* IRAM dump must be put last */ { .type = ATH10K_MEM_REGION_TYPE_IRAM1, .start = 0x00980000, .len = 0x00080000, .name = "IRAM1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IRAM2, .start = 0x00a00000, .len = 0x00040000, .name = "IRAM2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x50000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x4000, .len = 0x2000, .name = "REG_PART1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x8000, .len = 0x58000, .name = "REG_PART2", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x60000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOSRAM, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x80000, .len = 0x6000, .name = "SOC REG", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x80000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOSRAM, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x80000, .len = 0x6000, .name = "SOC REG", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_mem_section ipq4019_soc_reg_range[] = { {0x080000, 0x080004}, {0x080020, 0x080024}, {0x080028, 0x080050}, {0x0800d4, 0x0800ec}, {0x08010c, 0x080118}, {0x080284, 0x080290}, {0x0802a8, 0x0802b8}, {0x0802dc, 0x08030c}, {0x082000, 0x083fff} }; static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = { { .type = ATH10K_MEM_REGION_TYPE_DRAM, .start = 0x400000, .len = 0x68000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0xC0000, .len = 0x40000, .name = "SRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x980000, .len = 0x50000, .name = "IRAM", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x30000, .len = 0x7000, .name = "APB REG 1", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x3f000, .len = 0x3000, .name = "APB REG 2", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x43000, .len = 0x3000, .name = "WIFI REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_IOREG, .start = 0x4A000, .len = 0x5000, .name = "CE REG", .section_table = { .sections = NULL, .size = 0, }, }, { .type = ATH10K_MEM_REGION_TYPE_REG, .start = 0x080000, .len = 0x083fff - 0x080000, .name = "REG_TOTAL", .section_table = { .sections = ipq4019_soc_reg_range, .size = ARRAY_SIZE(ipq4019_soc_reg_range), }, }, }; static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = { { /* MSA region start is not fixed, hence it is assigned at runtime */ .type = ATH10K_MEM_REGION_TYPE_MSA, .len = 0x100000, .name = "DRAM", .section_table = { .sections = NULL, .size = 0, }, }, }; static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { { .hw_id = QCA6174_HW_1_0_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_1_1_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_1_3_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), }, }, { .hw_id = QCA6174_HW_2_1_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw21_mem_regions, .size = ARRAY_SIZE(qca6174_hw21_mem_regions), }, }, { .hw_id = QCA6174_HW_3_0_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA6174_HW_3_2_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA6174_HW_3_2_VERSION, .hw_rev = ATH10K_HW_QCA6174, .bus = ATH10K_BUS_SDIO, .region_table = { .regions = qca6174_hw30_sdio_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions), }, }, { .hw_id = QCA9377_HW_1_1_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9377, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), }, }, { .hw_id = QCA988X_HW_2_0_VERSION, .hw_rev = ATH10K_HW_QCA988X, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca988x_hw20_mem_regions, .size = ARRAY_SIZE(qca988x_hw20_mem_regions), }, }, { .hw_id = QCA9984_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9984, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, { .hw_id = QCA9888_HW_2_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA9888, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), }, }, { .hw_id = QCA99X0_HW_2_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA99X0, .bus = ATH10K_BUS_PCI, .region_table = { .regions = qca99x0_hw20_mem_regions, .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), }, }, { .hw_id = QCA4019_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_QCA4019, .bus = ATH10K_BUS_AHB, .region_table = { .regions = qca4019_hw10_mem_regions, .size = ARRAY_SIZE(qca4019_hw10_mem_regions), }, }, { .hw_id = WCN3990_HW_1_0_DEV_VERSION, .hw_rev = ATH10K_HW_WCN3990, .bus = ATH10K_BUS_SNOC, .region_table = { .regions = wcn399x_hw10_mem_regions, .size = ARRAY_SIZE(wcn399x_hw10_mem_regions), }, }, }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) { const struct ath10k_hw_mem_layout *hw; const struct ath10k_mem_region *mem_region; size_t size = 0; int i; hw = ath10k_coredump_get_mem_layout(ar); if (!hw) return 0; mem_region = &hw->region_table.regions[0]; for (i = 0; i < hw->region_table.size; i++) { size += mem_region->len; mem_region++; } /* reserve space for the headers */ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr); /* make sure it is aligned 16 bytes for debug message print out */ size = ALIGN(size, 16); return size; } const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar) { if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) return NULL; return _ath10k_coredump_get_mem_layout(ar); } EXPORT_SYMBOL(ath10k_coredump_get_mem_layout); const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar) { int i; if (WARN_ON(ar->target_version == 0)) return NULL; for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) { if (ar->target_version == hw_mem_layouts[i].hw_id && ar->hw_rev == hw_mem_layouts[i].hw_rev && hw_mem_layouts[i].bus == ar->hif.bus) return &hw_mem_layouts[i]; } return NULL; } struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; lockdep_assert_held(&ar->dump_mutex); if (ath10k_coredump_mask == 0) /* coredump disabled */ return NULL; guid_gen(&crash_data->guid); ktime_get_real_ts64(&crash_data->timestamp); return crash_data; } EXPORT_SYMBOL(ath10k_coredump_new); static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; struct ath10k_ce_crash_hdr *ce_hdr; struct ath10k_dump_file_data *dump_data; struct ath10k_tlv_dump_data *dump_tlv; size_t hdr_len = sizeof(*dump_data); size_t len, sofar = 0; unsigned char *buf; len = hdr_len; if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + sizeof(crash_data->registers); if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + sizeof(*ce_hdr) + CE_COUNT * sizeof(ce_hdr->entries[0]); if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; sofar += hdr_len; /* This is going to get big when we start dumping FW RAM and such, * so go ahead and use vmalloc. */ buf = vzalloc(len); if (!buf) return NULL; mutex_lock(&ar->dump_mutex); dump_data = (struct ath10k_dump_file_data *)(buf); strscpy(dump_data->df_magic, "ATH10K-FW-DUMP", sizeof(dump_data->df_magic)); dump_data->len = cpu_to_le32(len); dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); guid_copy(&dump_data->guid, &crash_data->guid); dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id); dump_data->bus_type = cpu_to_le32(0); dump_data->target_version = cpu_to_le32(ar->target_version); dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major); dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor); dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release); dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build); dump_data->phy_capability = cpu_to_le32(ar->phy_capability); dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power); dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power); dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info); dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info); dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains); strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version, sizeof(dump_data->fw_ver)); dump_data->kernel_ver_code = 0; strscpy(dump_data->kernel_ver, init_utsname()->release, sizeof(dump_data->kernel_ver)); dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec); dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec); if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS); dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers)); memcpy(dump_tlv->tlv_data, &crash_data->registers, sizeof(crash_data->registers)); sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers); } if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA); dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries, CE_COUNT)); ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data); ce_hdr->ce_count = cpu_to_le32(CE_COUNT); memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved)); memcpy(ce_hdr->entries, crash_data->ce_crash_data, CE_COUNT * sizeof(ce_hdr->entries[0])); sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) + CE_COUNT * sizeof(ce_hdr->entries[0]); } /* Gather ram dump */ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar); dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA); dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len); if (crash_data->ramdump_buf_len) { memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf, crash_data->ramdump_buf_len); sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len; } } mutex_unlock(&ar->dump_mutex); return dump_data; } int ath10k_coredump_submit(struct ath10k *ar) { struct ath10k_dump_file_data *dump; if (ath10k_coredump_mask == 0) /* coredump disabled */ return 0; dump = ath10k_coredump_build(ar); if (!dump) { ath10k_warn(ar, "no crash dump data found for devcoredump"); return -ENODATA; } dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL); return 0; } int ath10k_coredump_create(struct ath10k *ar) { if (ath10k_coredump_mask == 0) /* coredump disabled */ return 0; ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data)); if (!ar->coredump.fw_crash_data) return -ENOMEM; return 0; } int ath10k_coredump_register(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) { crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar); if (!crash_data->ramdump_buf_len) return 0; crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len); if (!crash_data->ramdump_buf) return -ENOMEM; } return 0; } void ath10k_coredump_unregister(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data; vfree(crash_data->ramdump_buf); } void ath10k_coredump_destroy(struct ath10k *ar) { if (ar->coredump.fw_crash_data->ramdump_buf) { vfree(ar->coredump.fw_crash_data->ramdump_buf); ar->coredump.fw_crash_data->ramdump_buf = NULL; ar->coredump.fw_crash_data->ramdump_buf_len = 0; } vfree(ar->coredump.fw_crash_data); ar->coredump.fw_crash_data = NULL; }
4 4 4 4 3 1 4 4 4 6 1 3 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nft_fib.h> #include <net/ip_fib.h> #include <net/route.h> /* don't try to find route from mcast/bcast/zeronet */ static __be32 get_saddr(__be32 addr) { if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) || ipv4_is_zeronet(addr)) return 0; return addr; } #define DSCP_BITS 0xfc void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); int noff = skb_network_offset(pkt->skb); u32 *dst = &regs->data[priv->dreg]; const struct net_device *dev = NULL; struct iphdr *iph, _iph; __be32 addr; if (priv->flags & NFTA_FIB_F_IIF) dev = nft_in(pkt); else if (priv->flags & NFTA_FIB_F_OIF) dev = nft_out(pkt); iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; return; } if (priv->flags & NFTA_FIB_F_DADDR) addr = iph->daddr; else addr = iph->saddr; *dst = inet_dev_addr_type(nft_net(pkt), dev, addr); } EXPORT_SYMBOL_GPL(nft_fib4_eval_type); void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); int noff = skb_network_offset(pkt->skb); u32 *dest = &regs->data[priv->dreg]; struct iphdr *iph, _iph; struct fib_result res; struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_uid = sock_net_uid(nft_net(pkt), NULL), }; const struct net_device *oif; const struct net_device *found; /* * Do not set flowi4_oif, it restricts results (for example, asking * for oif 3 will get RTN_UNICAST result even if the daddr exits * on another interface. * * Search results for the desired outinterface instead. */ if (priv->flags & NFTA_FIB_F_OIF) oif = nft_out(pkt); else if (priv->flags & NFTA_FIB_F_IIF) oif = nft_in(pkt); else oif = NULL; if (priv->flags & NFTA_FIB_F_IIF) fl4.flowi4_l3mdev = l3mdev_master_ifindex_rcu(oif); if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { nft_fib_store_result(dest, priv, nft_in(pkt)); return; } iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; return; } if (ipv4_is_zeronet(iph->saddr)) { if (ipv4_is_lbcast(iph->daddr) || ipv4_is_local_multicast(iph->daddr)) { nft_fib_store_result(dest, priv, pkt->skb->dev); return; } } if (priv->flags & NFTA_FIB_F_MARK) fl4.flowi4_mark = pkt->skb->mark; fl4.flowi4_tos = iph->tos & DSCP_BITS; if (priv->flags & NFTA_FIB_F_DADDR) { fl4.daddr = iph->daddr; fl4.saddr = get_saddr(iph->saddr); } else { if (nft_hook(pkt) == NF_INET_FORWARD && priv->flags & NFTA_FIB_F_IIF) fl4.flowi4_iif = nft_out(pkt)->ifindex; fl4.daddr = iph->saddr; fl4.saddr = get_saddr(iph->daddr); } *dest = 0; if (fib_lookup(nft_net(pkt), &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE)) return; switch (res.type) { case RTN_UNICAST: break; case RTN_LOCAL: /* Should not see RTN_LOCAL here */ return; default: break; } if (!oif) { found = FIB_RES_DEV(res); } else { if (!fib_info_nh_uses_dev(res.fi, oif)) return; found = oif; } nft_fib_store_result(dest, priv, found); } EXPORT_SYMBOL_GPL(nft_fib4_eval); static struct nft_expr_type nft_fib4_type; static const struct nft_expr_ops nft_fib4_type_ops = { .type = &nft_fib4_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), .eval = nft_fib4_eval_type, .init = nft_fib_init, .dump = nft_fib_dump, .validate = nft_fib_validate, .reduce = nft_fib_reduce, }; static const struct nft_expr_ops nft_fib4_ops = { .type = &nft_fib4_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), .eval = nft_fib4_eval, .init = nft_fib_init, .dump = nft_fib_dump, .validate = nft_fib_validate, .reduce = nft_fib_reduce, }; static const struct nft_expr_ops * nft_fib4_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { enum nft_fib_result result; if (!tb[NFTA_FIB_RESULT]) return ERR_PTR(-EINVAL); result = ntohl(nla_get_be32(tb[NFTA_FIB_RESULT])); switch (result) { case NFT_FIB_RESULT_OIF: return &nft_fib4_ops; case NFT_FIB_RESULT_OIFNAME: return &nft_fib4_ops; case NFT_FIB_RESULT_ADDRTYPE: return &nft_fib4_type_ops; default: return ERR_PTR(-EOPNOTSUPP); } } static struct nft_expr_type nft_fib4_type __read_mostly = { .name = "fib", .select_ops = nft_fib4_select_ops, .policy = nft_fib_policy, .maxattr = NFTA_FIB_MAX, .family = NFPROTO_IPV4, .owner = THIS_MODULE, }; static int __init nft_fib4_module_init(void) { return nft_register_expr(&nft_fib4_type); } static void __exit nft_fib4_module_exit(void) { nft_unregister_expr(&nft_fib4_type); } module_init(nft_fib4_module_init); module_exit(nft_fib4_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); MODULE_ALIAS_NFT_AF_EXPR(2, "fib"); MODULE_DESCRIPTION("nftables fib / ip route lookup support");
35 289 471 128 3 16 14 5 7 550 104 176 159 571 273 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM io_uring #if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IO_URING_H #include <linux/tracepoint.h> #include <uapi/linux/io_uring.h> #include <linux/io_uring_types.h> #include <linux/io_uring.h> struct io_wq_work; /** * io_uring_create - called after a new io_uring context was prepared * * @fd: corresponding file descriptor * @ctx: pointer to a ring context structure * @sq_entries: actual SQ size * @cq_entries: actual CQ size * @flags: SQ ring flags, provided to io_uring_setup(2) * * Allows to trace io_uring creation and provide pointer to a context, that can * be used later to find correlated events. */ TRACE_EVENT(io_uring_create, TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags), TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), TP_STRUCT__entry ( __field( int, fd ) __field( void *, ctx ) __field( u32, sq_entries ) __field( u32, cq_entries ) __field( u32, flags ) ), TP_fast_assign( __entry->fd = fd; __entry->ctx = ctx; __entry->sq_entries = sq_entries; __entry->cq_entries = cq_entries; __entry->flags = flags; ), TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x", __entry->ctx, __entry->fd, __entry->sq_entries, __entry->cq_entries, __entry->flags) ); /** * io_uring_register - called after a buffer/file/eventfd was successfully * registered for a ring * * @ctx: pointer to a ring context structure * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers * @ret: return code * * Allows to trace fixed files/buffers, that could be registered to * avoid an overhead of getting references to them for every operation. This * event, together with io_uring_file_get, can provide a full picture of how * much overhead one can reduce via fixing. */ TRACE_EVENT(io_uring_register, TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, unsigned nr_bufs, long ret), TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret), TP_STRUCT__entry ( __field( void *, ctx ) __field( unsigned, opcode ) __field( unsigned, nr_files) __field( unsigned, nr_bufs ) __field( long, ret ) ), TP_fast_assign( __entry->ctx = ctx; __entry->opcode = opcode; __entry->nr_files = nr_files; __entry->nr_bufs = nr_bufs; __entry->ret = ret; ), TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " "ret %ld", __entry->ctx, __entry->opcode, __entry->nr_files, __entry->nr_bufs, __entry->ret) ); /** * io_uring_file_get - called before getting references to an SQE file * * @req: pointer to a submitted request * @fd: SQE file descriptor * * Allows to trace out how often an SQE file reference is obtained, which can * help figuring out if it makes sense to use fixed files, or check that fixed * files are used correctly. */ TRACE_EVENT(io_uring_file_get, TP_PROTO(struct io_kiocb *req, int fd), TP_ARGS(req, fd), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( u64, user_data ) __field( int, fd ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->fd = fd; ), TP_printk("ring %p, req %p, user_data 0x%llx, fd %d", __entry->ctx, __entry->req, __entry->user_data, __entry->fd) ); /** * io_uring_queue_async_work - called before submitting a new async work * * @req: pointer to a submitted request * @rw: type of workqueue, hashed or normal * * Allows to trace asynchronous work submission. */ TRACE_EVENT(io_uring_queue_async_work, TP_PROTO(struct io_kiocb *req, int rw), TP_ARGS(req, rw), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( u64, user_data ) __field( u8, opcode ) __field( unsigned long long, flags ) __field( struct io_wq_work *, work ) __field( int, rw ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->flags = (__force unsigned long long) req->flags; __entry->opcode = req->opcode; __entry->work = &req->work; __entry->rw = rw; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) ); /** * io_uring_defer - called when an io_uring request is deferred * * @req: pointer to a deferred request * * Allows to track deferred requests, to get an insight about what requests are * not started immediately. */ TRACE_EVENT(io_uring_defer, TP_PROTO(struct io_kiocb *req), TP_ARGS(req), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, data ) __field( u8, opcode ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->data = req->cqe.user_data; __entry->opcode = req->opcode; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s", __entry->ctx, __entry->req, __entry->data, __get_str(op_str)) ); /** * io_uring_link - called before the io_uring request added into link_list of * another request * * @req: pointer to a linked request * @target_req: pointer to a previous request, that would contain @req * * Allows to track linked requests, to understand dependencies between requests * and how does it influence their execution flow. */ TRACE_EVENT(io_uring_link, TP_PROTO(struct io_kiocb *req, struct io_kiocb *target_req), TP_ARGS(req, target_req), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( void *, target_req ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->target_req = target_req; ), TP_printk("ring %p, request %p linked after %p", __entry->ctx, __entry->req, __entry->target_req) ); /** * io_uring_cqring_wait - called before start waiting for an available CQE * * @ctx: pointer to a ring context structure * @min_events: minimal number of events to wait for * * Allows to track waiting for CQE, so that we can e.g. troubleshoot * situations, when an application wants to wait for an event, that never * comes. */ TRACE_EVENT(io_uring_cqring_wait, TP_PROTO(void *ctx, int min_events), TP_ARGS(ctx, min_events), TP_STRUCT__entry ( __field( void *, ctx ) __field( int, min_events ) ), TP_fast_assign( __entry->ctx = ctx; __entry->min_events = min_events; ), TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events) ); /** * io_uring_fail_link - called before failing a linked request * * @req: request, which links were cancelled * @link: cancelled link * * Allows to track linked requests cancellation, to see not only that some work * was cancelled, but also which request was the reason. */ TRACE_EVENT(io_uring_fail_link, TP_PROTO(struct io_kiocb *req, struct io_kiocb *link), TP_ARGS(req, link), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, user_data ) __field( u8, opcode ) __field( void *, link ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->opcode = req->opcode; __entry->link = link; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, link %p", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->link) ); /** * io_uring_complete - called when completing an SQE * * @ctx: pointer to a ring context structure * @req: pointer to a submitted request * @user_data: user data associated with the request * @res: result of the request * @cflags: completion flags * @extra1: extra 64-bit data for CQE32 * @extra2: extra 64-bit data for CQE32 * */ TRACE_EVENT(io_uring_complete, TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags, u64 extra1, u64 extra2), TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( u64, user_data ) __field( int, res ) __field( unsigned, cflags ) __field( u64, extra1 ) __field( u64, extra2 ) ), TP_fast_assign( __entry->ctx = ctx; __entry->req = req; __entry->user_data = user_data; __entry->res = res; __entry->cflags = cflags; __entry->extra1 = extra1; __entry->extra2 = extra2; ), TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x " "extra1 %llu extra2 %llu ", __entry->ctx, __entry->req, __entry->user_data, __entry->res, __entry->cflags, (unsigned long long) __entry->extra1, (unsigned long long) __entry->extra2) ); /** * io_uring_submit_req - called before submitting a request * * @req: pointer to a submitted request * * Allows to track SQE submitting, to understand what was the source of it, SQ * thread or io_uring_enter call. */ TRACE_EVENT(io_uring_submit_req, TP_PROTO(struct io_kiocb *req), TP_ARGS(req), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, user_data ) __field( u8, opcode ) __field( unsigned long long, flags ) __field( bool, sq_thread ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->opcode = req->opcode; __entry->flags = (__force unsigned long long) req->flags; __entry->sq_thread = req->ctx->flags & IORING_SETUP_SQPOLL; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%llx, " "sq_thread %d", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->flags, __entry->sq_thread) ); /* * io_uring_poll_arm - called after arming a poll wait if successful * * @req: pointer to the armed request * @mask: request poll events mask * @events: registered events of interest * * Allows to track which fds are waiting for and what are the events of * interest. */ TRACE_EVENT(io_uring_poll_arm, TP_PROTO(struct io_kiocb *req, int mask, int events), TP_ARGS(req, mask, events), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, user_data ) __field( u8, opcode ) __field( int, mask ) __field( int, events ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->opcode = req->opcode; __entry->mask = mask; __entry->events = events; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask 0x%x, events 0x%x", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->mask, __entry->events) ); /* * io_uring_task_add - called after adding a task * * @req: pointer to request * @mask: request poll events mask * */ TRACE_EVENT(io_uring_task_add, TP_PROTO(struct io_kiocb *req, int mask), TP_ARGS(req, mask), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, user_data ) __field( u8, opcode ) __field( int, mask ) __string( op_str, io_uring_get_opcode(req->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = req->cqe.user_data; __entry->opcode = req->opcode; __entry->mask = mask; __assign_str(op_str, io_uring_get_opcode(req->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, mask %x", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->mask) ); /* * io_uring_req_failed - called when an sqe is errored dring submission * * @sqe: pointer to the io_uring_sqe that failed * @req: pointer to request * @error: error it failed with * * Allows easier diagnosing of malformed requests in production systems. */ TRACE_EVENT(io_uring_req_failed, TP_PROTO(const struct io_uring_sqe *sqe, struct io_kiocb *req, int error), TP_ARGS(sqe, req, error), TP_STRUCT__entry ( __field( void *, ctx ) __field( void *, req ) __field( unsigned long long, user_data ) __field( u8, opcode ) __field( u8, flags ) __field( u8, ioprio ) __field( u64, off ) __field( u64, addr ) __field( u32, len ) __field( u32, op_flags ) __field( u16, buf_index ) __field( u16, personality ) __field( u32, file_index ) __field( u64, pad1 ) __field( u64, addr3 ) __field( int, error ) __string( op_str, io_uring_get_opcode(sqe->opcode) ) ), TP_fast_assign( __entry->ctx = req->ctx; __entry->req = req; __entry->user_data = sqe->user_data; __entry->opcode = sqe->opcode; __entry->flags = sqe->flags; __entry->ioprio = sqe->ioprio; __entry->off = sqe->off; __entry->addr = sqe->addr; __entry->len = sqe->len; __entry->op_flags = sqe->poll32_events; __entry->buf_index = sqe->buf_index; __entry->personality = sqe->personality; __entry->file_index = sqe->file_index; __entry->pad1 = sqe->__pad2[0]; __entry->addr3 = sqe->addr3; __entry->error = error; __assign_str(op_str, io_uring_get_opcode(sqe->opcode)); ), TP_printk("ring %p, req %p, user_data 0x%llx, " "opcode %s, flags 0x%x, prio=%d, off=%llu, addr=%llu, " "len=%u, rw_flags=0x%x, buf_index=%d, " "personality=%d, file_index=%d, pad=0x%llx, addr3=%llx, " "error=%d", __entry->ctx, __entry->req, __entry->user_data, __get_str(op_str), __entry->flags, __entry->ioprio, (unsigned long long)__entry->off, (unsigned long long) __entry->addr, __entry->len, __entry->op_flags, __entry->buf_index, __entry->personality, __entry->file_index, (unsigned long long) __entry->pad1, (unsigned long long) __entry->addr3, __entry->error) ); /* * io_uring_cqe_overflow - a CQE overflowed * * @ctx: pointer to a ring context structure * @user_data: user data associated with the request * @res: CQE result * @cflags: CQE flags * @ocqe: pointer to the overflow cqe (if available) * */ TRACE_EVENT(io_uring_cqe_overflow, TP_PROTO(void *ctx, unsigned long long user_data, s32 res, u32 cflags, void *ocqe), TP_ARGS(ctx, user_data, res, cflags, ocqe), TP_STRUCT__entry ( __field( void *, ctx ) __field( unsigned long long, user_data ) __field( s32, res ) __field( u32, cflags ) __field( void *, ocqe ) ), TP_fast_assign( __entry->ctx = ctx; __entry->user_data = user_data; __entry->res = res; __entry->cflags = cflags; __entry->ocqe = ocqe; ), TP_printk("ring %p, user_data 0x%llx, res %d, cflags 0x%x, " "overflow_cqe %p", __entry->ctx, __entry->user_data, __entry->res, __entry->cflags, __entry->ocqe) ); /* * io_uring_task_work_run - ran task work * * @tctx: pointer to a io_uring_task * @count: how many functions it ran * */ TRACE_EVENT(io_uring_task_work_run, TP_PROTO(void *tctx, unsigned int count), TP_ARGS(tctx, count), TP_STRUCT__entry ( __field( void *, tctx ) __field( unsigned int, count ) ), TP_fast_assign( __entry->tctx = tctx; __entry->count = count; ), TP_printk("tctx %p, count %u", __entry->tctx, __entry->count) ); TRACE_EVENT(io_uring_short_write, TP_PROTO(void *ctx, u64 fpos, u64 wanted, u64 got), TP_ARGS(ctx, fpos, wanted, got), TP_STRUCT__entry( __field(void *, ctx) __field(u64, fpos) __field(u64, wanted) __field(u64, got) ), TP_fast_assign( __entry->ctx = ctx; __entry->fpos = fpos; __entry->wanted = wanted; __entry->got = got; ), TP_printk("ring %p, fpos %lld, wanted %lld, got %lld", __entry->ctx, __entry->fpos, __entry->wanted, __entry->got) ); /* * io_uring_local_work_run - ran ring local task work * * @tctx: pointer to a io_uring_ctx * @count: how many functions it ran * @loops: how many loops it ran * */ TRACE_EVENT(io_uring_local_work_run, TP_PROTO(void *ctx, int count, unsigned int loops), TP_ARGS(ctx, count, loops), TP_STRUCT__entry ( __field(void *, ctx ) __field(int, count ) __field(unsigned int, loops ) ), TP_fast_assign( __entry->ctx = ctx; __entry->count = count; __entry->loops = loops; ), TP_printk("ring %p, count %d, loops %u", __entry->ctx, __entry->count, __entry->loops) ); #endif /* _TRACE_IO_URING_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
6 279 20 279 44 55 19 58 44 110 58 55 31 1 14 11 2 6 2 3 2 6 30 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Definitions for the 'struct ptr_ring' datastructure. * * Author: * Michael S. Tsirkin <mst@redhat.com> * * Copyright (C) 2016 Red Hat, Inc. * * This is a limited-size FIFO maintaining pointers in FIFO order, with * one CPU producing entries and another consuming entries from a FIFO. * * This implementation tries to minimize cache-contention when there is a * single producer and a single consumer CPU. */ #ifndef _LINUX_PTR_RING_H #define _LINUX_PTR_RING_H 1 #ifdef __KERNEL__ #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/mm.h> #include <asm/errno.h> #endif struct ptr_ring { int producer ____cacheline_aligned_in_smp; spinlock_t producer_lock; int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ int consumer_tail; /* next entry to invalidate */ spinlock_t consumer_lock; /* Shared consumer/producer data */ /* Read-only by both the producer and the consumer */ int size ____cacheline_aligned_in_smp; /* max entries in queue */ int batch; /* number of entries to consume in a batch */ void **queue; }; /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). * * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: * see e.g. ptr_ring_full. */ static inline bool __ptr_ring_full(struct ptr_ring *r) { return r->queue[r->producer]; } static inline bool ptr_ring_full(struct ptr_ring *r) { bool ret; spin_lock(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock(&r->producer_lock); return ret; } static inline bool ptr_ring_full_irq(struct ptr_ring *r) { bool ret; spin_lock_irq(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock_irq(&r->producer_lock); return ret; } static inline bool ptr_ring_full_any(struct ptr_ring *r) { unsigned long flags; bool ret; spin_lock_irqsave(&r->producer_lock, flags); ret = __ptr_ring_full(r); spin_unlock_irqrestore(&r->producer_lock, flags); return ret; } static inline bool ptr_ring_full_bh(struct ptr_ring *r) { bool ret; spin_lock_bh(&r->producer_lock); ret = __ptr_ring_full(r); spin_unlock_bh(&r->producer_lock); return ret; } /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. * Callers are responsible for making sure pointer that is being queued * points to a valid data. */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { if (unlikely(!r->size) || r->queue[r->producer]) return -ENOSPC; /* Make sure the pointer we are storing points to a valid data. */ /* Pairs with the dependency ordering in __ptr_ring_consume. */ smp_wmb(); WRITE_ONCE(r->queue[r->producer++], ptr); if (unlikely(r->producer >= r->size)) r->producer = 0; return 0; } /* * Note: resize (below) nests producer lock within consumer lock, so if you * consume in interrupt or BH context, you must disable interrupts/BH when * calling this. */ static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) { int ret; spin_lock(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock(&r->producer_lock); return ret; } static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) { int ret; spin_lock_irq(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock_irq(&r->producer_lock); return ret; } static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) { unsigned long flags; int ret; spin_lock_irqsave(&r->producer_lock, flags); ret = __ptr_ring_produce(r, ptr); spin_unlock_irqrestore(&r->producer_lock, flags); return ret; } static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) { int ret; spin_lock_bh(&r->producer_lock); ret = __ptr_ring_produce(r, ptr); spin_unlock_bh(&r->producer_lock); return ret; } static inline void *__ptr_ring_peek(struct ptr_ring *r) { if (likely(r->size)) return READ_ONCE(r->queue[r->consumer_head]); return NULL; } /* * Test ring empty status without taking any locks. * * NB: This is only safe to call if ring is never resized. * * However, if some other CPU consumes ring entries at the same time, the value * returned is not guaranteed to be correct. * * In this case - to avoid incorrectly detecting the ring * as empty - the CPU consuming the ring entries is responsible * for either consuming all ring entries until the ring is empty, * or synchronizing with some other CPU and causing it to * re-test __ptr_ring_empty and/or consume the ring enteries * after the synchronization point. * * Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). */ static inline bool __ptr_ring_empty(struct ptr_ring *r) { if (likely(r->size)) return !r->queue[READ_ONCE(r->consumer_head)]; return true; } static inline bool ptr_ring_empty(struct ptr_ring *r) { bool ret; spin_lock(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock(&r->consumer_lock); return ret; } static inline bool ptr_ring_empty_irq(struct ptr_ring *r) { bool ret; spin_lock_irq(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock_irq(&r->consumer_lock); return ret; } static inline bool ptr_ring_empty_any(struct ptr_ring *r) { unsigned long flags; bool ret; spin_lock_irqsave(&r->consumer_lock, flags); ret = __ptr_ring_empty(r); spin_unlock_irqrestore(&r->consumer_lock, flags); return ret; } static inline bool ptr_ring_empty_bh(struct ptr_ring *r) { bool ret; spin_lock_bh(&r->consumer_lock); ret = __ptr_ring_empty(r); spin_unlock_bh(&r->consumer_lock); return ret; } /* Must only be called after __ptr_ring_peek returned !NULL */ static inline void __ptr_ring_discard_one(struct ptr_ring *r) { /* Fundamentally, what we want to do is update consumer * index and zero out the entry so producer can reuse it. * Doing it naively at each consume would be as simple as: * consumer = r->consumer; * r->queue[consumer++] = NULL; * if (unlikely(consumer >= r->size)) * consumer = 0; * r->consumer = consumer; * but that is suboptimal when the ring is full as producer is writing * out new entries in the same cache line. Defer these updates until a * batch of entries has been consumed. */ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty * to work correctly. */ int consumer_head = r->consumer_head; int head = consumer_head++; /* Once we have processed enough entries invalidate them in * the ring all at once so producer can reuse their space in the ring. * We also do this when we reach end of the ring - not mandatory * but helps keep the implementation simple. */ if (unlikely(consumer_head - r->consumer_tail >= r->batch || consumer_head >= r->size)) { /* Zero out entries in the reverse order: this way we touch the * cache line that producer might currently be reading the last; * producer won't make progress and touch other cache lines * besides the first one until we write out all entries. */ while (likely(head >= r->consumer_tail)) r->queue[head--] = NULL; r->consumer_tail = consumer_head; } if (unlikely(consumer_head >= r->size)) { consumer_head = 0; r->consumer_tail = 0; } /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ WRITE_ONCE(r->consumer_head, consumer_head); } static inline void *__ptr_ring_consume(struct ptr_ring *r) { void *ptr; /* The READ_ONCE in __ptr_ring_peek guarantees that anyone * accessing data through the pointer is up to date. Pairs * with smp_wmb in __ptr_ring_produce. */ ptr = __ptr_ring_peek(r); if (ptr) __ptr_ring_discard_one(r); return ptr; } static inline int __ptr_ring_consume_batched(struct ptr_ring *r, void **array, int n) { void *ptr; int i; for (i = 0; i < n; i++) { ptr = __ptr_ring_consume(r); if (!ptr) break; array[i] = ptr; } return i; } /* * Note: resize (below) nests producer lock within consumer lock, so if you * call this in interrupt or BH context, you must disable interrupts/BH when * producing. */ static inline void *ptr_ring_consume(struct ptr_ring *r) { void *ptr; spin_lock(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock(&r->consumer_lock); return ptr; } static inline void *ptr_ring_consume_irq(struct ptr_ring *r) { void *ptr; spin_lock_irq(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock_irq(&r->consumer_lock); return ptr; } static inline void *ptr_ring_consume_any(struct ptr_ring *r) { unsigned long flags; void *ptr; spin_lock_irqsave(&r->consumer_lock, flags); ptr = __ptr_ring_consume(r); spin_unlock_irqrestore(&r->consumer_lock, flags); return ptr; } static inline void *ptr_ring_consume_bh(struct ptr_ring *r) { void *ptr; spin_lock_bh(&r->consumer_lock); ptr = __ptr_ring_consume(r); spin_unlock_bh(&r->consumer_lock); return ptr; } static inline int ptr_ring_consume_batched(struct ptr_ring *r, void **array, int n) { int ret; spin_lock(&r->consumer_lock); ret = __ptr_ring_consume_batched(r, array, n); spin_unlock(&r->consumer_lock); return ret; } static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, void **array, int n) { int ret; spin_lock_irq(&r->consumer_lock); ret = __ptr_ring_consume_batched(r, array, n); spin_unlock_irq(&r->consumer_lock); return ret; } static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, void **array, int n) { unsigned long flags; int ret; spin_lock_irqsave(&r->consumer_lock, flags); ret = __ptr_ring_consume_batched(r, array, n); spin_unlock_irqrestore(&r->consumer_lock, flags); return ret; } static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, void **array, int n) { int ret; spin_lock_bh(&r->consumer_lock); ret = __ptr_ring_consume_batched(r, array, n); spin_unlock_bh(&r->consumer_lock); return ret; } /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. */ #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) #define PTR_RING_PEEK_CALL(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock_irq(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_irq(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_BH(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ \ spin_lock_bh(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_bh(&(r)->consumer_lock); \ __PTR_RING_PEEK_CALL_v; \ }) #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ unsigned long __PTR_RING_PEEK_CALL_f;\ \ spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ __PTR_RING_PEEK_CALL_v; \ }) /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See * documentation for vmalloc for which of them are legal. */ static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { if (size > KMALLOC_MAX_SIZE / sizeof(void *)) return NULL; return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) { r->size = size; r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); /* We need to set batch at least to 1 to make logic * in __ptr_ring_discard_one work correctly. * Batching too much (because ring is small) would cause a lot of * burstiness. Needs tuning, for now disable batching. */ if (r->batch > r->size / 2 || !r->batch) r->batch = 1; } static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) { r->queue = __ptr_ring_init_queue_alloc(size, gfp); if (!r->queue) return -ENOMEM; __ptr_ring_set_size(r, size); r->producer = r->consumer_head = r->consumer_tail = 0; spin_lock_init(&r->producer_lock); spin_lock_init(&r->consumer_lock); return 0; } /* * Return entries into ring. Destroy entries that don't fit. * * Note: this is expected to be a rare slow path operation. * * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, void (*destroy)(void *)) { unsigned long flags; int head; spin_lock_irqsave(&r->consumer_lock, flags); spin_lock(&r->producer_lock); if (!r->size) goto done; /* * Clean out buffered entries (for simplicity). This way following code * can test entries for NULL and if not assume they are valid. */ head = r->consumer_head - 1; while (likely(head >= r->consumer_tail)) r->queue[head--] = NULL; r->consumer_tail = r->consumer_head; /* * Go over entries in batch, start moving head back and copy entries. * Stop when we run into previously unconsumed entries. */ while (n) { head = r->consumer_head - 1; if (head < 0) head = r->size - 1; if (r->queue[head]) { /* This batch entry will have to be destroyed. */ goto done; } r->queue[head] = batch[--n]; r->consumer_tail = head; /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ WRITE_ONCE(r->consumer_head, head); } done: /* Destroy all entries left in the batch. */ while (n) destroy(batch[--n]); spin_unlock(&r->producer_lock); spin_unlock_irqrestore(&r->consumer_lock, flags); } static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, int size, gfp_t gfp, void (*destroy)(void *)) { int producer = 0; void **old; void *ptr; while ((ptr = __ptr_ring_consume(r))) if (producer < size) queue[producer++] = ptr; else if (destroy) destroy(ptr); if (producer >= size) producer = 0; __ptr_ring_set_size(r, size); r->producer = producer; r->consumer_head = 0; r->consumer_tail = 0; old = r->queue; r->queue = queue; return old; } /* * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void **queue = __ptr_ring_init_queue_alloc(size, gfp); void **old; if (!queue) return -ENOMEM; spin_lock_irqsave(&(r)->consumer_lock, flags); spin_lock(&(r)->producer_lock); old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); spin_unlock(&(r)->producer_lock); spin_unlock_irqrestore(&(r)->consumer_lock, flags); kvfree(old); return 0; } /* * Note: producer lock is nested within consumer lock, so if you * resize you must make sure all uses nest correctly. * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, unsigned int nrings, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void ***queues; int i; queues = kmalloc_array(nrings, sizeof(*queues), gfp); if (!queues) goto noqueues; for (i = 0; i < nrings; ++i) { queues[i] = __ptr_ring_init_queue_alloc(size, gfp); if (!queues[i]) goto nomem; } for (i = 0; i < nrings; ++i) { spin_lock_irqsave(&(rings[i])->consumer_lock, flags); spin_lock(&(rings[i])->producer_lock); queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], size, gfp, destroy); spin_unlock(&(rings[i])->producer_lock); spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); } for (i = 0; i < nrings; ++i) kvfree(queues[i]); kfree(queues); return 0; nomem: while (--i >= 0) kvfree(queues[i]); kfree(queues); noqueues: return -ENOMEM; } static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) { void *ptr; if (destroy) while ((ptr = ptr_ring_consume(r))) destroy(ptr); kvfree(r->queue); } #endif /* _LINUX_PTR_RING_H */
51 104 104 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 // SPDX-License-Identifier: GPL-2.0 #ifndef _LINUX_KERNEL_TRACE_H #define _LINUX_KERNEL_TRACE_H #include <linux/fs.h> #include <linux/atomic.h> #include <linux/sched.h> #include <linux/clocksource.h> #include <linux/ring_buffer.h> #include <linux/mmiotrace.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/trace.h> #include <linux/hw_breakpoint.h> #include <linux/trace_seq.h> #include <linux/trace_events.h> #include <linux/compiler.h> #include <linux/glob.h> #include <linux/irq_work.h> #include <linux/workqueue.h> #include <linux/ctype.h> #include <linux/once_lite.h> #include "pid_list.h" #ifdef CONFIG_FTRACE_SYSCALLS #include <asm/unistd.h> /* For NR_syscalls */ #include <asm/syscall.h> /* some archs define it here */ #endif #define TRACE_MODE_WRITE 0640 #define TRACE_MODE_READ 0440 enum trace_type { __TRACE_FIRST_TYPE = 0, TRACE_FN, TRACE_CTX, TRACE_WAKE, TRACE_STACK, TRACE_PRINT, TRACE_BPRINT, TRACE_MMIO_RW, TRACE_MMIO_MAP, TRACE_BRANCH, TRACE_GRAPH_RET, TRACE_GRAPH_ENT, TRACE_USER_STACK, TRACE_BLK, TRACE_BPUTS, TRACE_HWLAT, TRACE_OSNOISE, TRACE_TIMERLAT, TRACE_RAW_DATA, TRACE_FUNC_REPEATS, __TRACE_LAST_TYPE, }; #undef __field #define __field(type, item) type item; #undef __field_fn #define __field_fn(type, item) type item; #undef __field_struct #define __field_struct(type, item) __field(type, item) #undef __field_desc #define __field_desc(type, container, item) #undef __field_packed #define __field_packed(type, container, item) #undef __array #define __array(type, item, size) type item[size]; /* * For backward compatibility, older user space expects to see the * kernel_stack event with a fixed size caller field. But today the fix * size is ignored by the kernel, and the real structure is dynamic. * Expose to user space: "unsigned long caller[8];" but the real structure * will be "unsigned long caller[] __counted_by(size)" */ #undef __stack_array #define __stack_array(type, item, size, field) type item[] __counted_by(field); #undef __array_desc #define __array_desc(type, container, item, size) #undef __dynamic_array #define __dynamic_array(type, item) type item[]; #undef __rel_dynamic_array #define __rel_dynamic_array(type, item) type item[]; #undef F_STRUCT #define F_STRUCT(args...) args #undef FTRACE_ENTRY #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ struct struct_name { \ struct trace_entry ent; \ tstruct \ } #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) #undef FTRACE_ENTRY_REG #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) #undef FTRACE_ENTRY_PACKED #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed #include "trace_entries.h" /* Use this for memory failure errors */ #define MEM_FAIL(condition, fmt, ...) \ DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) #define FAULT_STRING "(fault)" #define HIST_STACKTRACE_DEPTH 16 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) #define HIST_STACKTRACE_SKIP 5 /* * syscalls are special, and need special handling, this is why * they are not included in trace_entries.h */ struct syscall_trace_enter { struct trace_entry ent; int nr; unsigned long args[]; }; struct syscall_trace_exit { struct trace_entry ent; int nr; long ret; }; struct kprobe_trace_entry_head { struct trace_entry ent; unsigned long ip; }; struct eprobe_trace_entry_head { struct trace_entry ent; }; struct kretprobe_trace_entry_head { struct trace_entry ent; unsigned long func; unsigned long ret_ip; }; struct fentry_trace_entry_head { struct trace_entry ent; unsigned long ip; }; struct fexit_trace_entry_head { struct trace_entry ent; unsigned long func; unsigned long ret_ip; }; #define TRACE_BUF_SIZE 1024 struct trace_array; /* * The CPU trace array - it consists of thousands of trace entries * plus some other descriptor data: (for example which task started * the trace, etc.) */ struct trace_array_cpu { atomic_t disabled; void *buffer_page; /* ring buffer spare */ unsigned long entries; unsigned long saved_latency; unsigned long critical_start; unsigned long critical_end; unsigned long critical_sequence; unsigned long nice; unsigned long policy; unsigned long rt_priority; unsigned long skipped_entries; u64 preempt_timestamp; pid_t pid; kuid_t uid; char comm[TASK_COMM_LEN]; #ifdef CONFIG_FUNCTION_TRACER int ftrace_ignore_pid; #endif bool ignore_pid; }; struct tracer; struct trace_option_dentry; struct array_buffer { struct trace_array *tr; struct trace_buffer *buffer; struct trace_array_cpu __percpu *data; u64 time_start; int cpu; }; #define TRACE_FLAGS_MAX_SIZE 32 struct trace_options { struct tracer *tracer; struct trace_option_dentry *topts; }; struct trace_pid_list *trace_pid_list_alloc(void); void trace_pid_list_free(struct trace_pid_list *pid_list); bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, unsigned int *next); enum { TRACE_PIDS = BIT(0), TRACE_NO_PIDS = BIT(1), }; static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, struct trace_pid_list *no_pid_list) { /* Return true if the pid list in type has pids */ return ((type & TRACE_PIDS) && pid_list) || ((type & TRACE_NO_PIDS) && no_pid_list); } static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, struct trace_pid_list *no_pid_list) { /* * Turning off what is in @type, return true if the "other" * pid list, still has pids in it. */ return (!(type & TRACE_PIDS) && pid_list) || (!(type & TRACE_NO_PIDS) && no_pid_list); } typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); /** * struct cond_snapshot - conditional snapshot data and callback * * The cond_snapshot structure encapsulates a callback function and * data associated with the snapshot for a given tracing instance. * * When a snapshot is taken conditionally, by invoking * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is * passed in turn to the cond_snapshot.update() function. That data * can be compared by the update() implementation with the cond_data * contained within the struct cond_snapshot instance associated with * the trace_array. Because the tr->max_lock is held throughout the * update() call, the update() function can directly retrieve the * cond_snapshot and cond_data associated with the per-instance * snapshot associated with the trace_array. * * The cond_snapshot.update() implementation can save data to be * associated with the snapshot if it decides to, and returns 'true' * in that case, or it returns 'false' if the conditional snapshot * shouldn't be taken. * * The cond_snapshot instance is created and associated with the * user-defined cond_data by tracing_cond_snapshot_enable(). * Likewise, the cond_snapshot instance is destroyed and is no longer * associated with the trace instance by * tracing_cond_snapshot_disable(). * * The method below is required. * * @update: When a conditional snapshot is invoked, the update() * callback function is invoked with the tr->max_lock held. The * update() implementation signals whether or not to actually * take the snapshot, by returning 'true' if so, 'false' if no * snapshot should be taken. Because the max_lock is held for * the duration of update(), the implementation is safe to * directly retrieved and save any implementation data it needs * to in association with the snapshot. */ struct cond_snapshot { void *cond_data; cond_update_fn_t update; }; /* * struct trace_func_repeats - used to keep track of the consecutive * (on the same CPU) calls of a single function. */ struct trace_func_repeats { unsigned long ip; unsigned long parent_ip; unsigned long count; u64 ts_last_call; }; /* * The trace array - an array of per-CPU trace arrays. This is the * highest level data structure that individual tracers deal with. * They have on/off state as well: */ struct trace_array { struct list_head list; char *name; struct array_buffer array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE /* * The max_buffer is used to snapshot the trace when a maximum * latency is reached, or when the user initiates a snapshot. * Some tracers will use this to store a maximum trace while * it continues examining live traces. * * The buffers for the max_buffer are set up the same as the array_buffer * When a snapshot is taken, the buffer of the max_buffer is swapped * with the buffer of the array_buffer and the buffers are reset for * the array_buffer so the tracing can continue. */ struct array_buffer max_buffer; bool allocated_snapshot; spinlock_t snapshot_trigger_lock; unsigned int snapshot; unsigned long max_latency; #ifdef CONFIG_FSNOTIFY struct dentry *d_max_latency; struct work_struct fsnotify_work; struct irq_work fsnotify_irqwork; #endif #endif struct trace_pid_list __rcu *filtered_pids; struct trace_pid_list __rcu *filtered_no_pids; /* * max_lock is used to protect the swapping of buffers * when taking a max snapshot. The buffers themselves are * protected by per_cpu spinlocks. But the action of the swap * needs its own lock. * * This is defined as a arch_spinlock_t in order to help * with performance when lockdep debugging is enabled. * * It is also used in other places outside the update_max_tr * so it needs to be defined outside of the * CONFIG_TRACER_MAX_TRACE. */ arch_spinlock_t max_lock; int buffer_disabled; #ifdef CONFIG_FTRACE_SYSCALLS int sys_refcount_enter; int sys_refcount_exit; struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; #endif int stop_count; int clock_id; int nr_topts; bool clear_trace; int buffer_percent; unsigned int n_err_log_entries; struct tracer *current_trace; unsigned int trace_flags; unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; unsigned int flags; raw_spinlock_t start_lock; const char *system_names; struct list_head err_log; struct dentry *dir; struct dentry *options; struct dentry *percpu_dir; struct eventfs_inode *event_dir; struct trace_options *topts; struct list_head systems; struct list_head events; struct trace_event_file *trace_marker_file; cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ /* one per_cpu trace_pipe can be opened by only one user */ cpumask_var_t pipe_cpumask; int ref; int trace_ref; #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops *ops; struct trace_pid_list __rcu *function_pids; struct trace_pid_list __rcu *function_no_pids; #ifdef CONFIG_DYNAMIC_FTRACE /* All of these are protected by the ftrace_lock */ struct list_head func_probes; struct list_head mod_trace; struct list_head mod_notrace; #endif /* function tracing enabled */ int function_enabled; #endif int no_filter_buffering_ref; struct list_head hist_vars; #ifdef CONFIG_TRACER_SNAPSHOT struct cond_snapshot *cond_snapshot; #endif struct trace_func_repeats __percpu *last_func_repeats; /* * On boot up, the ring buffer is set to the minimum size, so that * we do not waste memory on systems that are not using tracing. */ bool ring_buffer_expanded; }; enum { TRACE_ARRAY_FL_GLOBAL = (1 << 0) }; extern struct list_head ftrace_trace_arrays; extern struct mutex trace_types_lock; extern int trace_array_get(struct trace_array *tr); extern int tracing_check_open_get_tr(struct trace_array *tr); extern struct trace_array *trace_array_find(const char *instance); extern struct trace_array *trace_array_find_get(const char *instance); extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); extern bool trace_clock_in_ns(struct trace_array *tr); /* * The global tracer (top) should be the first trace array added, * but we check the flag anyway. */ static inline struct trace_array *top_trace_array(void) { struct trace_array *tr; if (list_empty(&ftrace_trace_arrays)) return NULL; tr = list_entry(ftrace_trace_arrays.prev, typeof(*tr), list); WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); return tr; } #define FTRACE_CMP_TYPE(var, type) \ __builtin_types_compatible_p(typeof(var), type *) #undef IF_ASSIGN #define IF_ASSIGN(var, entry, etype, id) \ if (FTRACE_CMP_TYPE(var, etype)) { \ var = (typeof(var))(entry); \ WARN_ON(id != 0 && (entry)->type != id); \ break; \ } /* Will cause compile errors if type is not found. */ extern void __ftrace_bad_type(void); /* * The trace_assign_type is a verifier that the entry type is * the same as the type being assigned. To add new types simply * add a line with the following format: * * IF_ASSIGN(var, ent, type, id); * * Where "type" is the trace type that includes the trace_entry * as the "ent" item. And "id" is the trace identifier that is * used in the trace_type enum. * * If the type can have more than one id, then use zero. */ #define trace_assign_type(var, ent) \ do { \ IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ TRACE_MMIO_RW); \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ TRACE_MMIO_MAP); \ IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ TRACE_GRAPH_ENT); \ IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ TRACE_GRAPH_RET); \ IF_ASSIGN(var, ent, struct func_repeats_entry, \ TRACE_FUNC_REPEATS); \ __ftrace_bad_type(); \ } while (0) /* * An option specific to a tracer. This is a boolean value. * The bit is the bit index that sets its value on the * flags value in struct tracer_flags. */ struct tracer_opt { const char *name; /* Will appear on the trace_options file */ u32 bit; /* Mask assigned in val field in tracer_flags */ }; /* * The set of specific options for a tracer. Your tracer * have to set the initial value of the flags val. */ struct tracer_flags { u32 val; struct tracer_opt *opts; struct tracer *trace; }; /* Makes more easy to define a tracer opt */ #define TRACER_OPT(s, b) .name = #s, .bit = b struct trace_option_dentry { struct tracer_opt *opt; struct tracer_flags *flags; struct trace_array *tr; struct dentry *entry; }; /** * struct tracer - a specific tracer and its callbacks to interact with tracefs * @name: the name chosen to select it on the available_tracers file * @init: called when one switches to this tracer (echo name > current_tracer) * @reset: called when one switches to another tracer * @start: called when tracing is unpaused (echo 1 > tracing_on) * @stop: called when tracing is paused (echo 0 > tracing_on) * @update_thresh: called when tracing_thresh is updated * @open: called when the trace file is opened * @pipe_open: called when the trace_pipe file is opened * @close: called when the trace file is released * @pipe_close: called when the trace_pipe file is released * @read: override the default read callback on trace_pipe * @splice_read: override the default splice_read callback on trace_pipe * @selftest: selftest to run on boot (see trace_selftest.c) * @print_headers: override the first lines that describe your columns * @print_line: callback that prints a trace * @set_flag: signals one of your private flags changed (trace_options file) * @flags: your private flags */ struct tracer { const char *name; int (*init)(struct trace_array *tr); void