Total coverage: 360220 (20%)of 1852376
111 1 110 111 6 6 6 3 2 109 1 9 1 61 59 1 2 2 54 3 3 58 59 58 120 120 54 2 107 9 9 5 2 1 1 1 3 18 32 10 6 17 22 22 3 21 14 14 1 1 12 9 9 9 10 10 10 5 5 4 1 2 1 1 2 2 3 1 2 1 1 4 1 3 1 1 1 1 4 3 1 3 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Red Hat, Inc. */ #include <linux/cred.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/xattr.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/security.h> #include <linux/fs.h> #include <linux/backing-file.h> #include "overlayfs.h" static char ovl_whatisit(struct inode *inode, struct inode *realinode) { if (realinode != ovl_inode_upper(inode)) return 'l'; if (ovl_has_upperdata(inode)) return 'u'; else return 'm'; } static struct file *ovl_open_realfile(const struct file *file, const struct path *realpath) { struct inode *realinode = d_inode(realpath->dentry); struct inode *inode = file_inode(file); struct mnt_idmap *real_idmap; struct file *realfile; const struct cred *old_cred; int flags = file->f_flags | OVL_OPEN_FLAGS; int acc_mode = ACC_MODE(flags); int err; if (flags & O_APPEND) acc_mode |= MAY_APPEND; old_cred = ovl_override_creds(inode->i_sb); real_idmap = mnt_idmap(realpath->mnt); err = inode_permission(real_idmap, realinode, MAY_OPEN | acc_mode); if (err) { realfile = ERR_PTR(err); } else { if (!inode_owner_or_capable(real_idmap, realinode)) flags &= ~O_NOATIME; realfile = backing_file_open(&file->f_path, flags, realpath, current_cred()); } ovl_revert_creds(old_cred); pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n", file, file, ovl_whatisit(inode, realinode), file->f_flags, realfile, IS_ERR(realfile) ? 0 : realfile->f_flags); return realfile; } #define OVL_SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT) static int ovl_change_flags(struct file *file, unsigned int flags) { struct inode *inode = file_inode(file); int err; flags &= OVL_SETFL_MASK; if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode)) return -EPERM; if ((flags & O_DIRECT) && !(file->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; if (file->f_op->check_flags) { err = file->f_op->check_flags(flags); if (err) return err; } spin_lock(&file->f_lock); file->f_flags = (file->f_flags & ~OVL_SETFL_MASK) | flags; file->f_iocb_flags = iocb_flags(file); spin_unlock(&file->f_lock); return 0; } struct ovl_file { struct file *realfile; struct file *upperfile; }; struct ovl_file *ovl_file_alloc(struct file *realfile) { struct ovl_file *of = kzalloc(sizeof(struct ovl_file), GFP_KERNEL); if (unlikely(!of)) return NULL; of->realfile = realfile; return of; } void ovl_file_free(struct ovl_file *of) { fput(of->realfile); if (of->upperfile) fput(of->upperfile); kfree(of); } static bool ovl_is_real_file(const struct file *realfile, const struct path *realpath) { return file_inode(realfile) == d_inode(realpath->dentry); } static struct file *ovl_real_file_path(const struct file *file, struct path *realpath) { struct ovl_file *of = file->private_data; struct file *realfile = of->realfile; if (WARN_ON_ONCE(!realpath->dentry)) return ERR_PTR(-EIO); /* * If the realfile that we want is not where the data used to be at * open time, either we'd been copied up, or it's an fsync of a * metacopied file. We need the upperfile either way, so see if it * is already opened and if it is not then open and store it. */ if (unlikely(!ovl_is_real_file(realfile, realpath))) { struct file *upperfile = READ_ONCE(of->upperfile); struct file *old; if (!upperfile) { /* Nobody opened upperfile yet */ upperfile = ovl_open_realfile(file, realpath); if (IS_ERR(upperfile)) return upperfile; /* Store the upperfile for later */ old = cmpxchg_release(&of->upperfile, NULL, upperfile); if (old) { /* Someone opened upperfile before us */ fput(upperfile); upperfile = old; } } /* * Stored file must be from the right inode, unless someone's * been corrupting the upper layer. */ if (WARN_ON_ONCE(!ovl_is_real_file(upperfile, realpath))) return ERR_PTR(-EIO); realfile = upperfile; } /* Did the flags change since open? */ if (unlikely((file->f_flags ^ realfile->f_flags) & ~OVL_OPEN_FLAGS)) { int err = ovl_change_flags(realfile, file->f_flags); if (err) return ERR_PTR(err); } return realfile; } static struct file *ovl_real_file(const struct file *file) { struct dentry *dentry = file_dentry(file); struct path realpath; int err; if (d_is_dir(dentry)) { struct file *f = ovl_dir_real_file(file, false); if (WARN_ON_ONCE(!f)) return ERR_PTR(-EIO); return f; } /* lazy lookup and verify of lowerdata */ err = ovl_verify_lowerdata(dentry); if (err) return ERR_PTR(err); ovl_path_realdata(dentry, &realpath); return ovl_real_file_path(file, &realpath); } static int ovl_open(struct inode *inode, struct file *file) { struct dentry *dentry = file_dentry(file); struct file *realfile; struct path realpath; struct ovl_file *of; int err; /* lazy lookup and verify lowerdata */ err = ovl_verify_lowerdata(dentry); if (err) return err; err = ovl_maybe_copy_up(dentry, file->f_flags); if (err) return err; /* No longer need these flags, so don't pass them on to underlying fs */ file->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); ovl_path_realdata(dentry, &realpath); if (!realpath.dentry) return -EIO; realfile = ovl_open_realfile(file, &realpath); if (IS_ERR(realfile)) return PTR_ERR(realfile); of = ovl_file_alloc(realfile); if (!of) { fput(realfile); return -ENOMEM; } file->private_data = of; return 0; } static int ovl_release(struct inode *inode, struct file *file) { ovl_file_free(file->private_data); return 0; } static loff_t ovl_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; loff_t ret; /* * The two special cases below do not need to involve real fs, * so we can optimizing concurrent callers. */ if (offset == 0) { if (whence == SEEK_CUR) return file->f_pos; if (whence == SEEK_SET) return vfs_setpos(file, 0, 0); } realfile = ovl_real_file(file); if (IS_ERR(realfile)) return PTR_ERR(realfile); /* * Overlay file f_pos is the master copy that is preserved * through copy up and modified on read/write, but only real * fs knows how to SEEK_HOLE/SEEK_DATA and real fs may impose * limitations that are more strict than ->s_maxbytes for specific * files, so we use the real file to perform seeks. */ ovl_inode_lock(inode); realfile->f_pos = file->f_pos; old_cred = ovl_override_creds(inode->i_sb); ret = vfs_llseek(realfile, offset, whence); ovl_revert_creds(old_cred); file->f_pos = realfile->f_pos; ovl_inode_unlock(inode); return ret; } static void ovl_file_modified(struct file *file) { /* Update size/mtime */ ovl_copyattr(file_inode(file)); } static void ovl_file_end_write(struct kiocb *iocb, ssize_t ret) { ovl_file_modified(iocb->ki_filp); } static void ovl_file_accessed(struct file *file) { struct inode *inode, *upperinode; struct timespec64 ctime, uctime; struct timespec64 mtime, umtime; if (file->f_flags & O_NOATIME) return; inode = file_inode(file); upperinode = ovl_inode_upper(inode); if (!upperinode) return; ctime = inode_get_ctime(inode); uctime = inode_get_ctime(upperinode); mtime = inode_get_mtime(inode); umtime = inode_get_mtime(upperinode); if ((!timespec64_equal(&mtime, &umtime)) || !timespec64_equal(&ctime, &uctime)) { inode_set_mtime_to_ts(inode, inode_get_mtime(upperinode)); inode_set_ctime_to_ts(inode, uctime); } touch_atime(&file->f_path); } static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct file *realfile; struct backing_file_ctx ctx = { .cred = ovl_creds(file_inode(file)->i_sb), .accessed = ovl_file_accessed, }; if (!iov_iter_count(iter)) return 0; realfile = ovl_real_file(file); if (IS_ERR(realfile)) return PTR_ERR(realfile); return backing_file_read_iter(realfile, iter, iocb, iocb->ki_flags, &ctx); } static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct file *realfile; ssize_t ret; int ifl = iocb->ki_flags; struct backing_file_ctx ctx = { .cred = ovl_creds(inode->i_sb), .end_write = ovl_file_end_write, }; if (!iov_iter_count(iter)) return 0; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); realfile = ovl_real_file(file); ret = PTR_ERR(realfile); if (IS_ERR(realfile)) goto out_unlock; if (!ovl_should_sync(OVL_FS(inode->i_sb))) ifl &= ~(IOCB_DSYNC | IOCB_SYNC); /* * Overlayfs doesn't support deferred completions, don't copy * this property in case it is set by the issuer. */ ifl &= ~IOCB_DIO_CALLER_COMP; ret = backing_file_write_iter(realfile, iter, iocb, ifl, &ctx); out_unlock: inode_unlock(inode); return ret; } static ssize_t ovl_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct file *realfile; ssize_t ret; struct backing_file_ctx ctx = { .cred = ovl_creds(file_inode(in)->i_sb), .accessed = ovl_file_accessed, }; struct kiocb iocb; realfile = ovl_real_file(in); if (IS_ERR(realfile)) return PTR_ERR(realfile); init_sync_kiocb(&iocb, in); iocb.ki_pos = *ppos; ret = backing_file_splice_read(realfile, &iocb, pipe, len, flags, &ctx); *ppos = iocb.ki_pos; return ret; } /* * Calling iter_file_splice_write() directly from overlay's f_op may deadlock * due to lock order inversion between pipe->mutex in iter_file_splice_write() * and file_start_write(realfile) in ovl_write_iter(). * * So do everything ovl_write_iter() does and call iter_file_splice_write() on * the real file. */ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { struct file *realfile; struct inode *inode = file_inode(out); ssize_t ret; struct backing_file_ctx ctx = { .cred = ovl_creds(inode->i_sb), .end_write = ovl_file_end_write, }; struct kiocb iocb; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); realfile = ovl_real_file(out); ret = PTR_ERR(realfile); if (IS_ERR(realfile)) goto out_unlock; init_sync_kiocb(&iocb, out); iocb.ki_pos = *ppos; ret = backing_file_splice_write(pipe, realfile, &iocb, len, flags, &ctx); *ppos = iocb.ki_pos; out_unlock: inode_unlock(inode); return ret; } static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct dentry *dentry = file_dentry(file); enum ovl_path_type type; struct path upperpath; struct file *upperfile; const struct cred *old_cred; int ret; ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb)); if (ret <= 0) return ret; /* Don't sync lower file for fear of receiving EROFS error */ type = ovl_path_type(dentry); if (!OVL_TYPE_UPPER(type) || (datasync && OVL_TYPE_MERGE(type))) return 0; ovl_path_upper(dentry, &upperpath); upperfile = ovl_real_file_path(file, &upperpath); if (IS_ERR(upperfile)) return PTR_ERR(upperfile); old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fsync_range(upperfile, start, end, datasync); ovl_revert_creds(old_cred); return ret; } static int ovl_mmap(struct file *file, struct vm_area_struct *vma) { struct ovl_file *of = file->private_data; struct backing_file_ctx ctx = { .cred = ovl_creds(file_inode(file)->i_sb), .accessed = ovl_file_accessed, }; return backing_file_mmap(of->realfile, vma, &ctx); } static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct file *realfile; const struct cred *old_cred; int ret; inode_lock(inode); /* Update mode */ ovl_copyattr(inode); ret = file_remove_privs(file); if (ret) goto out_unlock; realfile = ovl_real_file(file); ret = PTR_ERR(realfile); if (IS_ERR(realfile)) goto out_unlock; old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fallocate(realfile, mode, offset, len); ovl_revert_creds(old_cred); /* Update size */ ovl_file_modified(file); out_unlock: inode_unlock(inode); return ret; } static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice) { struct file *realfile; const struct cred *old_cred; int ret; realfile = ovl_real_file(file); if (IS_ERR(realfile)) return PTR_ERR(realfile); old_cred = ovl_override_creds(file_inode(file)->i_sb); ret = vfs_fadvise(realfile, offset, len, advice); ovl_revert_creds(old_cred); return ret; } enum ovl_copyop { OVL_COPY, OVL_CLONE, OVL_DEDUPE, }; static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int flags, enum ovl_copyop op) { struct inode *inode_out = file_inode(file_out); struct file *realfile_in, *realfile_out; const struct cred *old_cred; loff_t ret; inode_lock(inode_out); if (op != OVL_DEDUPE) { /* Update mode */ ovl_copyattr(inode_out); ret = file_remove_privs(file_out); if (ret) goto out_unlock; } realfile_out = ovl_real_file(file_out); ret = PTR_ERR(realfile_out); if (IS_ERR(realfile_out)) goto out_unlock; realfile_in = ovl_real_file(file_in); ret = PTR_ERR(realfile_in); if (IS_ERR(realfile_in)) goto out_unlock; old_cred = ovl_override_creds(file_inode(file_out)->i_sb); switch (op) { case OVL_COPY: ret = vfs_copy_file_range(realfile_in, pos_in, realfile_out, pos_out, len, flags); break; case OVL_CLONE: ret = vfs_clone_file_range(realfile_in, pos_in, realfile_out, pos_out, len, flags); break; case OVL_DEDUPE: ret = vfs_dedupe_file_range_one(realfile_in, pos_in, realfile_out, pos_out, len, flags); break; } ovl_revert_creds(old_cred); /* Update size */ ovl_file_modified(file_out); out_unlock: inode_unlock(inode_out); return ret; } static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags) { return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, flags, OVL_COPY); } static loff_t ovl_remap_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { enum ovl_copyop op; if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) return -EINVAL; if (remap_flags & REMAP_FILE_DEDUP) op = OVL_DEDUPE; else op = OVL_CLONE; /* * Don't copy up because of a dedupe request, this wouldn't make sense * most of the time (data would be duplicated instead of deduplicated). */ if (op == OVL_DEDUPE && (!ovl_inode_upper(file_inode(file_in)) || !ovl_inode_upper(file_inode(file_out)))) return -EPERM; return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, remap_flags, op); } static int ovl_flush(struct file *file, fl_owner_t id) { struct file *realfile; const struct cred *old_cred; int err = 0; realfile = ovl_real_file(file); if (IS_ERR(realfile)) return PTR_ERR(realfile); if (realfile->f_op->flush) { old_cred = ovl_override_creds(file_inode(file)->i_sb); err = realfile->f_op->flush(realfile, id); ovl_revert_creds(old_cred); } return err; } const struct file_operations ovl_file_operations = { .open = ovl_open, .release = ovl_release, .llseek = ovl_llseek, .read_iter = ovl_read_iter, .write_iter = ovl_write_iter, .fsync = ovl_fsync, .mmap = ovl_mmap, .fallocate = ovl_fallocate, .fadvise = ovl_fadvise, .flush = ovl_flush, .splice_read = ovl_splice_read, .splice_write = ovl_splice_write, .copy_file_range = ovl_copy_file_range, .remap_file_range = ovl_remap_file_range, };
51 4 1 45 20 1 1 23 5 5 9 1 2 1 5 5 7 7 21 21 20 5 5 9 1 22 22 23 23 23 1 21 50 51 45 23 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2010: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> * Copyright (C) 2015: Linus Lüssing <linus.luessing@c0d3.blue> * * Based on the MLD support added to br_multicast.c by YOSHIFUJI Hideaki. */ #include <linux/skbuff.h> #include <net/ipv6.h> #include <net/mld.h> #include <net/addrconf.h> #include <net/ip6_checksum.h> static int ipv6_mc_check_ip6hdr(struct sk_buff *skb) { const struct ipv6hdr *ip6h; unsigned int len; unsigned int offset = skb_network_offset(skb) + sizeof(*ip6h); if (!pskb_may_pull(skb, offset)) return -EINVAL; ip6h = ipv6_hdr(skb); if (ip6h->version != 6) return -EINVAL; len = offset + ntohs(ip6h->payload_len); if (skb->len < len || len <= offset) return -EINVAL; skb_set_transport_header(skb, offset); return 0; } static int ipv6_mc_check_exthdrs(struct sk_buff *skb) { const struct ipv6hdr *ip6h; int offset; u8 nexthdr; __be16 frag_off; ip6h = ipv6_hdr(skb); if (ip6h->nexthdr != IPPROTO_HOPOPTS) return -ENOMSG; nexthdr = ip6h->nexthdr; offset = skb_network_offset(skb) + sizeof(*ip6h); offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); if (offset < 0) return -EINVAL; if (nexthdr != IPPROTO_ICMPV6) return -ENOMSG; skb_set_transport_header(skb, offset); return 0; } static int ipv6_mc_check_mld_reportv2(struct sk_buff *skb) { unsigned int len = skb_transport_offset(skb); len += sizeof(struct mld2_report); return ipv6_mc_may_pull(skb, len) ? 0 : -EINVAL; } static int ipv6_mc_check_mld_query(struct sk_buff *skb) { unsigned int transport_len = ipv6_transport_len(skb); struct mld_msg *mld; unsigned int len; /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) return -EINVAL; /* MLDv1? */ if (transport_len != sizeof(struct mld_msg)) { /* or MLDv2? */ if (transport_len < sizeof(struct mld2_query)) return -EINVAL; len = skb_transport_offset(skb) + sizeof(struct mld2_query); if (!ipv6_mc_may_pull(skb, len)) return -EINVAL; } mld = (struct mld_msg *)skb_transport_header(skb); /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer * all-nodes destination address (ff02::1) for general queries */ if (ipv6_addr_any(&mld->mld_mca) && !ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr)) return -EINVAL; return 0; } static int ipv6_mc_check_mld_msg(struct sk_buff *skb) { unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg); struct mld_msg *mld; if (!ipv6_mc_may_pull(skb, len)) return -ENODATA; mld = (struct mld_msg *)skb_transport_header(skb); switch (mld->mld_type) { case ICMPV6_MGM_REDUCTION: case ICMPV6_MGM_REPORT: return 0; case ICMPV6_MLD2_REPORT: return ipv6_mc_check_mld_reportv2(skb); case ICMPV6_MGM_QUERY: return ipv6_mc_check_mld_query(skb); default: return -ENODATA; } } static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb) { return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo); } static int ipv6_mc_check_icmpv6(struct sk_buff *skb) { unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr); unsigned int transport_len = ipv6_transport_len(skb); struct sk_buff *skb_chk; if (!ipv6_mc_may_pull(skb, len)) return -EINVAL; skb_chk = skb_checksum_trimmed(skb, transport_len, ipv6_mc_validate_checksum); if (!skb_chk) return -EINVAL; if (skb_chk != skb) kfree_skb(skb_chk); return 0; } /** * ipv6_mc_check_mld - checks whether this is a sane MLD packet * @skb: the skb to validate * * Checks whether an IPv6 packet is a valid MLD packet. If so sets * skb transport header accordingly and returns zero. * * -EINVAL: A broken packet was detected, i.e. it violates some internet * standard * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet * with a hop-by-hop option. * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded * but it is not an MLD packet. * -ENOMEM: A memory allocation failure happened. * * Caller needs to set the skb network header and free any returned skb if it * differs from the provided skb. */ int ipv6_mc_check_mld(struct sk_buff *skb) { int ret; ret = ipv6_mc_check_ip6hdr(skb); if (ret < 0) return ret; ret = ipv6_mc_check_exthdrs(skb); if (ret < 0) return ret; ret = ipv6_mc_check_icmpv6(skb); if (ret < 0) return ret; return ipv6_mc_check_mld_msg(skb); } EXPORT_SYMBOL(ipv6_mc_check_mld);
28 28 28 28 28 27 3 1 1 1 1 1 1 532 1 1 1 315 315 1 1 35 35 506 505 15 2 3 3 2 1 2 15 3 5 5 282 284 15 15 15 1 1 3 2 3 1 3 2 13 14 3 2 3 3 15 15 1 1 1 1 1 15 15 310 313 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 // SPDX-License-Identifier: GPL-2.0 #include "messages.h" #include "tree-mod-log.h" #include "disk-io.h" #include "fs.h" #include "accessors.h" #include "tree-checker.h" struct tree_mod_root { u64 logical; u8 level; }; struct tree_mod_elem { struct rb_node node; u64 logical; u64 seq; enum btrfs_mod_log_op op; /* * This is used for BTRFS_MOD_LOG_KEY_* and BTRFS_MOD_LOG_MOVE_KEYS * operations. */ int slot; /* This is used for BTRFS_MOD_LOG_KEY* and BTRFS_MOD_LOG_ROOT_REPLACE. */ u64 generation; /* Those are used for op == BTRFS_MOD_LOG_KEY_{REPLACE,REMOVE}. */ struct btrfs_disk_key key; u64 blockptr; /* This is used for op == BTRFS_MOD_LOG_MOVE_KEYS. */ struct { int dst_slot; int nr_items; } move; /* This is used for op == BTRFS_MOD_LOG_ROOT_REPLACE. */ struct tree_mod_root old_root; }; /* * Pull a new tree mod seq number for our operation. */ static u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info) { return atomic64_inc_return(&fs_info->tree_mod_seq); } /* * This adds a new blocker to the tree mod log's blocker list if the @elem * passed does not already have a sequence number set. So when a caller expects * to record tree modifications, it should ensure to set elem->seq to zero * before calling btrfs_get_tree_mod_seq. * Returns a fresh, unused tree log modification sequence number, even if no new * blocker was added. */ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct btrfs_seq_list *elem) { write_lock(&fs_info->tree_mod_log_lock); if (!elem->seq) { elem->seq = btrfs_inc_tree_mod_seq(fs_info); list_add_tail(&elem->list, &fs_info->tree_mod_seq_list); set_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags); } write_unlock(&fs_info->tree_mod_log_lock); return elem->seq; } void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, struct btrfs_seq_list *elem) { struct rb_root *tm_root; struct rb_node *node; struct rb_node *next; struct tree_mod_elem *tm; u64 min_seq = BTRFS_SEQ_LAST; u64 seq_putting = elem->seq; if (!seq_putting) return; write_lock(&fs_info->tree_mod_log_lock); list_del(&elem->list); elem->seq = 0; if (list_empty(&fs_info->tree_mod_seq_list)) { clear_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags); } else { struct btrfs_seq_list *first; first = list_first_entry(&fs_info->tree_mod_seq_list, struct btrfs_seq_list, list); if (seq_putting > first->seq) { /* * Blocker with lower sequence number exists, we cannot * remove anything from the log. */ write_unlock(&fs_info->tree_mod_log_lock); return; } min_seq = first->seq; } /* * Anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. */ tm_root = &fs_info->tree_mod_log; for (node = rb_first(tm_root); node; node = next) { next = rb_next(node); tm = rb_entry(node, struct tree_mod_elem, node); if (tm->seq >= min_seq) continue; rb_erase(node, tm_root); kfree(tm); } write_unlock(&fs_info->tree_mod_log_lock); } /* * Key order of the log: * node/leaf start address -> sequence * * The 'start address' is the logical address of the *new* root node for root * replace operations, or the logical address of the affected block for all * other operations. */ static noinline int tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) { struct rb_root *tm_root; struct rb_node **new; struct rb_node *parent = NULL; struct tree_mod_elem *cur; lockdep_assert_held_write(&fs_info->tree_mod_log_lock); tm->seq = btrfs_inc_tree_mod_seq(fs_info); tm_root = &fs_info->tree_mod_log; new = &tm_root->rb_node; while (*new) { cur = rb_entry(*new, struct tree_mod_elem, node); parent = *new; if (cur->logical < tm->logical) new = &((*new)->rb_left); else if (cur->logical > tm->logical) new = &((*new)->rb_right); else if (cur->seq < tm->seq) new = &((*new)->rb_left); else if (cur->seq > tm->seq) new = &((*new)->rb_right); else return -EEXIST; } rb_link_node(&tm->node, parent, new); rb_insert_color(&tm->node, tm_root); return 0; } /* * Determines if logging can be omitted. Returns true if it can. Otherwise, it * returns false with the tree_mod_log_lock acquired. The caller must hold * this until all tree mod log insertions are recorded in the rb tree and then * write unlock fs_info::tree_mod_log_lock. */ static bool tree_mod_dont_log(struct btrfs_fs_info *fs_info, const struct extent_buffer *eb) { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return true; if (eb && btrfs_header_level(eb) == 0) return true; write_lock(&fs_info->tree_mod_log_lock); if (list_empty(&(fs_info)->tree_mod_seq_list)) { write_unlock(&fs_info->tree_mod_log_lock); return true; } return false; } /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */ static bool tree_mod_need_log(const struct btrfs_fs_info *fs_info, const struct extent_buffer *eb) { if (!test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) return false; if (eb && btrfs_header_level(eb) == 0) return false; return true; } static struct tree_mod_elem *alloc_tree_mod_elem(const struct extent_buffer *eb, int slot, enum btrfs_mod_log_op op) { struct tree_mod_elem *tm; tm = kzalloc(sizeof(*tm), GFP_NOFS); if (!tm) return NULL; tm->logical = eb->start; if (op != BTRFS_MOD_LOG_KEY_ADD) { btrfs_node_key(eb, &tm->key, slot); tm->blockptr = btrfs_node_blockptr(eb, slot); } tm->op = op; tm->slot = slot; tm->generation = btrfs_node_ptr_generation(eb, slot); RB_CLEAR_NODE(&tm->node); return tm; } int btrfs_tree_mod_log_insert_key(const struct extent_buffer *eb, int slot, enum btrfs_mod_log_op op) { struct tree_mod_elem *tm; int ret = 0; if (!tree_mod_need_log(eb->fs_info, eb)) return 0; tm = alloc_tree_mod_elem(eb, slot, op); if (!tm) ret = -ENOMEM; if (tree_mod_dont_log(eb->fs_info, eb)) { kfree(tm); /* * Don't error if we failed to allocate memory because we don't * need to log. */ return 0; } else if (ret != 0) { /* * We previously failed to allocate memory and we need to log, * so we have to fail. */ goto out_unlock; } ret = tree_mod_log_insert(eb->fs_info, tm); out_unlock: write_unlock(&eb->fs_info->tree_mod_log_lock); if (ret) kfree(tm); return ret; } static struct tree_mod_elem *tree_mod_log_alloc_move(const struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items) { struct tree_mod_elem *tm; tm = kzalloc(sizeof(*tm), GFP_NOFS); if (!tm) return ERR_PTR(-ENOMEM); tm->logical = eb->start; tm->slot = src_slot; tm->move.dst_slot = dst_slot; tm->move.nr_items = nr_items; tm->op = BTRFS_MOD_LOG_MOVE_KEYS; RB_CLEAR_NODE(&tm->node); return tm; } int btrfs_tree_mod_log_insert_move(const struct extent_buffer *eb, int dst_slot, int src_slot, int nr_items) { struct tree_mod_elem *tm = NULL; struct tree_mod_elem **tm_list = NULL; int ret = 0; int i; bool locked = false; if (!tree_mod_need_log(eb->fs_info, eb)) return 0; tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS); if (!tm_list) { ret = -ENOMEM; goto lock; } tm = tree_mod_log_alloc_move(eb, dst_slot, src_slot, nr_items); if (IS_ERR(tm)) { ret = PTR_ERR(tm); tm = NULL; goto lock; } for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot, BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING); if (!tm_list[i]) { ret = -ENOMEM; goto lock; } } lock: if (tree_mod_dont_log(eb->fs_info, eb)) { /* * Don't error if we failed to allocate memory because we don't * need to log. */ ret = 0; goto free_tms; } locked = true; /* * We previously failed to allocate memory and we need to log, so we * have to fail. */ if (ret != 0) goto free_tms; /* * When we override something during the move, we log these removals. * This can only happen when we move towards the beginning of the * buffer, i.e. dst_slot < src_slot. */ for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { ret = tree_mod_log_insert(eb->fs_info, tm_list[i]); if (ret) goto free_tms; } ret = tree_mod_log_insert(eb->fs_info, tm); if (ret) goto free_tms; write_unlock(&eb->fs_info->tree_mod_log_lock); kfree(tm_list); return 0; free_tms: if (tm_list) { for (i = 0; i < nr_items; i++) { if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log); kfree(tm_list[i]); } } if (locked) write_unlock(&eb->fs_info->tree_mod_log_lock); kfree(tm_list); kfree(tm); return ret; } static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct tree_mod_elem **tm_list, int nritems) { int i, j; int ret; for (i = nritems - 1; i >= 0; i--) { ret = tree_mod_log_insert(fs_info, tm_list[i]); if (ret) { for (j = nritems - 1; j > i; j--) rb_erase(&tm_list[j]->node, &fs_info->tree_mod_log); return ret; } } return 0; } int btrfs_tree_mod_log_insert_root(struct extent_buffer *old_root, struct extent_buffer *new_root, bool log_removal) { struct btrfs_fs_info *fs_info = old_root->fs_info; struct tree_mod_elem *tm = NULL; struct tree_mod_elem **tm_list = NULL; int nritems = 0; int ret = 0; int i; if (!tree_mod_need_log(fs_info, NULL)) return 0; if (log_removal && btrfs_header_level(old_root) > 0) { nritems = btrfs_header_nritems(old_root); tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS); if (!tm_list) { ret = -ENOMEM; goto lock; } for (i = 0; i < nritems; i++) { tm_list[i] = alloc_tree_mod_elem(old_root, i, BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING); if (!tm_list[i]) { ret = -ENOMEM; goto lock; } } } tm = kzalloc(sizeof(*tm), GFP_NOFS); if (!tm) { ret = -ENOMEM; goto lock; } tm->logical = new_root->start; tm->old_root.logical = old_root->start; tm->old_root.level = btrfs_header_level(old_root); tm->generation = btrfs_header_generation(old_root); tm->op = BTRFS_MOD_LOG_ROOT_REPLACE; lock: if (tree_mod_dont_log(fs_info, NULL)) { /* * Don't error if we failed to allocate memory because we don't * need to log. */ ret = 0; goto free_tms; } else if (ret != 0) { /* * We previously failed to allocate memory and we need to log, * so we have to fail. */ goto out_unlock; } if (tm_list) ret = tree_mod_log_free_eb(fs_info, tm_list, nritems); if (!ret) ret = tree_mod_log_insert(fs_info, tm); out_unlock: write_unlock(&fs_info->tree_mod_log_lock); if (ret) goto free_tms; kfree(tm_list); return ret; free_tms: if (tm_list) { for (i = 0; i < nritems; i++) kfree(tm_list[i]); kfree(tm_list); } kfree(tm); return ret; } static struct tree_mod_elem *__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq, bool smallest) { struct rb_root *tm_root; struct rb_node *node; struct tree_mod_elem *cur = NULL; struct tree_mod_elem *found = NULL; read_lock(&fs_info->tree_mod_log_lock); tm_root = &fs_info->tree_mod_log; node = tm_root->rb_node; while (node) { cur = rb_entry(node, struct tree_mod_elem, node); if (cur->logical < start) { node = node->rb_left; } else if (cur->logical > start) { node = node->rb_right; } else if (cur->seq < min_seq) { node = node->rb_left; } else if (!smallest) { /* We want the node with the highest seq */ if (found) BUG_ON(found->seq > cur->seq); found = cur; node = node->rb_left; } else if (cur->seq > min_seq) { /* We want the node with the smallest seq */ if (found) BUG_ON(found->seq < cur->seq); found = cur; node = node->rb_right; } else { found = cur; break; } } read_unlock(&fs_info->tree_mod_log_lock); return found; } /* * This returns the element from the log with the smallest time sequence * value that's in the log (the oldest log item). Any element with a time * sequence lower than min_seq will be ignored. */ static struct tree_mod_elem *tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) { return __tree_mod_log_search(fs_info, start, min_seq, true); } /* * This returns the element from the log with the largest time sequence * value that's in the log (the most recent log item). Any element with * a time sequence lower than min_seq will be ignored. */ static struct tree_mod_elem *tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) { return __tree_mod_log_search(fs_info, start, min_seq, false); } int btrfs_tree_mod_log_eb_copy(struct extent_buffer *dst, const struct extent_buffer *src, unsigned long dst_offset, unsigned long src_offset, int nr_items) { struct btrfs_fs_info *fs_info = dst->fs_info; int ret = 0; struct tree_mod_elem **tm_list = NULL; struct tree_mod_elem **tm_list_add = NULL; struct tree_mod_elem **tm_list_rem = NULL; int i; bool locked = false; struct tree_mod_elem *dst_move_tm = NULL; struct tree_mod_elem *src_move_tm = NULL; u32 dst_move_nr_items = btrfs_header_nritems(dst) - dst_offset; u32 src_move_nr_items = btrfs_header_nritems(src) - (src_offset + nr_items); if (!tree_mod_need_log(fs_info, NULL)) return 0; if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) return 0; tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *), GFP_NOFS); if (!tm_list) { ret = -ENOMEM; goto lock; } if (dst_move_nr_items) { dst_move_tm = tree_mod_log_alloc_move(dst, dst_offset + nr_items, dst_offset, dst_move_nr_items); if (IS_ERR(dst_move_tm)) { ret = PTR_ERR(dst_move_tm); dst_move_tm = NULL; goto lock; } } if (src_move_nr_items) { src_move_tm = tree_mod_log_alloc_move(src, src_offset, src_offset + nr_items, src_move_nr_items); if (IS_ERR(src_move_tm)) { ret = PTR_ERR(src_move_tm); src_move_tm = NULL; goto lock; } } tm_list_add = tm_list; tm_list_rem = tm_list + nr_items; for (i = 0; i < nr_items; i++) { tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset, BTRFS_MOD_LOG_KEY_REMOVE); if (!tm_list_rem[i]) { ret = -ENOMEM; goto lock; } tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset, BTRFS_MOD_LOG_KEY_ADD); if (!tm_list_add[i]) { ret = -ENOMEM; goto lock; } } lock: if (tree_mod_dont_log(fs_info, NULL)) { /* * Don't error if we failed to allocate memory because we don't * need to log. */ ret = 0; goto free_tms; } locked = true; /* * We previously failed to allocate memory and we need to log, so we * have to fail. */ if (ret != 0) goto free_tms; if (dst_move_tm) { ret = tree_mod_log_insert(fs_info, dst_move_tm); if (ret) goto free_tms; } for (i = 0; i < nr_items; i++) { ret = tree_mod_log_insert(fs_info, tm_list_rem[i]); if (ret) goto free_tms; ret = tree_mod_log_insert(fs_info, tm_list_add[i]); if (ret) goto free_tms; } if (src_move_tm) { ret = tree_mod_log_insert(fs_info, src_move_tm); if (ret) goto free_tms; } write_unlock(&fs_info->tree_mod_log_lock); kfree(tm_list); return 0; free_tms: if (dst_move_tm && !RB_EMPTY_NODE(&dst_move_tm->node)) rb_erase(&dst_move_tm->node, &fs_info->tree_mod_log); kfree(dst_move_tm); if (src_move_tm && !RB_EMPTY_NODE(&src_move_tm->node)) rb_erase(&src_move_tm->node, &fs_info->tree_mod_log); kfree(src_move_tm); if (tm_list) { for (i = 0; i < nr_items * 2; i++) { if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node)) rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log); kfree(tm_list[i]); } } if (locked) write_unlock(&fs_info->tree_mod_log_lock); kfree(tm_list); return ret; } int btrfs_tree_mod_log_free_eb(struct extent_buffer *eb) { struct tree_mod_elem **tm_list = NULL; int nritems = 0; int i; int ret = 0; if (!tree_mod_need_log(eb->fs_info, eb)) return 0; nritems = btrfs_header_nritems(eb); tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS); if (!tm_list) { ret = -ENOMEM; goto lock; } for (i = 0; i < nritems; i++) { tm_list[i] = alloc_tree_mod_elem(eb, i, BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING); if (!tm_list[i]) { ret = -ENOMEM; goto lock; } } lock: if (tree_mod_dont_log(eb->fs_info, eb)) { /* * Don't error if we failed to allocate memory because we don't * need to log. */ ret = 0; goto free_tms; } else if (ret != 0) { /* * We previously failed to allocate memory and we need to log, * so we have to fail. */ goto out_unlock; } ret = tree_mod_log_free_eb(eb->fs_info, tm_list, nritems); out_unlock: write_unlock(&eb->fs_info->tree_mod_log_lock); if (ret) goto free_tms; kfree(tm_list); return 0; free_tms: if (tm_list) { for (i = 0; i < nritems; i++) kfree(tm_list[i]); kfree(tm_list); } return ret; } /* * Returns the logical address of the oldest predecessor of the given root. * Entries older than time_seq are ignored. */ static struct tree_mod_elem *tree_mod_log_oldest_root(struct extent_buffer *eb_root, u64 time_seq) { struct tree_mod_elem *tm; struct tree_mod_elem *found = NULL; u64 root_logical = eb_root->start; bool looped = false; if (!time_seq) return NULL; /* * The very last operation that's logged for a root is the replacement * operation (if it is replaced at all). This has the logical address * of the *new* root, making it the very first operation that's logged * for this root. */ while (1) { tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical, time_seq); if (!looped && !tm) return NULL; /* * If there are no tree operation for the oldest root, we simply * return it. This should only happen if that (old) root is at * level 0. */ if (!tm) break; /* * If there's an operation that's not a root replacement, we * found the oldest version of our root. Normally, we'll find a * BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here. */ if (tm->op != BTRFS_MOD_LOG_ROOT_REPLACE) break; found = tm; root_logical = tm->old_root.logical; looped = true; } /* If there's no old root to return, return what we found instead */ if (!found) found = tm; return found; } /* * tm is a pointer to the first operation to rewind within eb. Then, all * previous operations will be rewound (until we reach something older than * time_seq). */ static void tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, u64 time_seq, struct tree_mod_elem *first_tm) { u32 n; struct rb_node *next; struct tree_mod_elem *tm = first_tm; unsigned long o_dst; unsigned long o_src; unsigned long p_size = sizeof(struct btrfs_key_ptr); /* * max_slot tracks the maximum valid slot of the rewind eb at every * step of the rewind. This is in contrast with 'n' which eventually * matches the number of items, but can be wrong during moves or if * removes overlap on already valid slots (which is probably separately * a bug). We do this to validate the offsets of memmoves for rewinding * moves and detect invalid memmoves. * * Since a rewind eb can start empty, max_slot is a signed integer with * a special meaning for -1, which is that no slot is valid to move out * of. Any other negative value is invalid. */ int max_slot; int move_src_end_slot; int move_dst_end_slot; n = btrfs_header_nritems(eb); max_slot = n - 1; read_lock(&fs_info->tree_mod_log_lock); while (tm && tm->seq >= time_seq) { ASSERT(max_slot >= -1); /* * All the operations are recorded with the operator used for * the modification. As we're going backwards, we do the * opposite of each operation here. */ switch (tm->op) { case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING: BUG_ON(tm->slot < n); fallthrough; case BTRFS_MOD_LOG_KEY_REMOVE_WHILE_MOVING: case BTRFS_MOD_LOG_KEY_REMOVE: btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); n++; if (tm->slot > max_slot) max_slot = tm->slot; break; case BTRFS_MOD_LOG_KEY_REPLACE: BUG_ON(tm->slot >= n); btrfs_set_node_key(eb, &tm->key, tm->slot); btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr); btrfs_set_node_ptr_generation(eb, tm->slot, tm->generation); break; case BTRFS_MOD_LOG_KEY_ADD: /* * It is possible we could have already removed keys * behind the known max slot, so this will be an * overestimate. In practice, the copy operation * inserts them in increasing order, and overestimating * just means we miss some warnings, so it's OK. It * isn't worth carefully tracking the full array of * valid slots to check against when moving. */ if (tm->slot == max_slot) max_slot--; /* if a move operation is needed it's in the log */ n--; break; case BTRFS_MOD_LOG_MOVE_KEYS: ASSERT(tm->move.nr_items > 0); move_src_end_slot = tm->move.dst_slot + tm->move.nr_items - 1; move_dst_end_slot = tm->slot + tm->move.nr_items - 1; o_dst = btrfs_node_key_ptr_offset(eb, tm->slot); o_src = btrfs_node_key_ptr_offset(eb, tm->move.dst_slot); if (WARN_ON(move_src_end_slot > max_slot || tm->move.nr_items <= 0)) { btrfs_warn(fs_info, "move from invalid tree mod log slot eb %llu slot %d dst_slot %d nr_items %d seq %llu n %u max_slot %d", eb->start, tm->slot, tm->move.dst_slot, tm->move.nr_items, tm->seq, n, max_slot); } memmove_extent_buffer(eb, o_dst, o_src, tm->move.nr_items * p_size); max_slot = move_dst_end_slot; break; case BTRFS_MOD_LOG_ROOT_REPLACE: /* * This operation is special. For roots, this must be * handled explicitly before rewinding. * For non-roots, this operation may exist if the node * was a root: root A -> child B; then A gets empty and * B is promoted to the new root. In the mod log, we'll * have a root-replace operation for B, a tree block * that is no root. We simply ignore that operation. */ break; } next = rb_next(&tm->node); if (!next) break; tm = rb_entry(next, struct tree_mod_elem, node); if (tm->logical != first_tm->logical) break; } read_unlock(&fs_info->tree_mod_log_lock); btrfs_set_header_nritems(eb, n); } /* * Called with eb read locked. If the buffer cannot be rewound, the same buffer * is returned. If rewind operations happen, a fresh buffer is returned. The * returned buffer is always read-locked. If the returned buffer is not the * input buffer, the lock on the input buffer is released and the input buffer * is freed (its refcount is decremented). */ struct extent_buffer *btrfs_tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, u64 time_seq) { struct extent_buffer *eb_rewin; struct tree_mod_elem *tm; if (!time_seq) return eb; if (btrfs_header_level(eb) == 0) return eb; tm = tree_mod_log_search(fs_info, eb->start, time_seq); if (!tm) return eb; if (tm->op == BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) { BUG_ON(tm->slot != 0); eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); if (!eb_rewin) { btrfs_tree_read_unlock(eb); free_extent_buffer(eb); return NULL; } btrfs_set_header_bytenr(eb_rewin, eb->start); btrfs_set_header_backref_rev(eb_rewin, btrfs_header_backref_rev(eb)); btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb)); btrfs_set_header_level(eb_rewin, btrfs_header_level(eb)); } else { eb_rewin = btrfs_clone_extent_buffer(eb); if (!eb_rewin) { btrfs_tree_read_unlock(eb); free_extent_buffer(eb); return NULL; } } btrfs_tree_read_unlock(eb); free_extent_buffer(eb); btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), eb_rewin, btrfs_header_level(eb_rewin)); btrfs_tree_read_lock(eb_rewin); tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); WARN_ON(btrfs_header_nritems(eb_rewin) > BTRFS_NODEPTRS_PER_BLOCK(fs_info)); return eb_rewin; } /* * Rewind the state of @root's root node to the given @time_seq value. * If there are no changes, the current root->root_node is returned. If anything * changed in between, there's a fresh buffer allocated on which the rewind * operations are done. In any case, the returned buffer is read locked. * Returns NULL on error (with no locks held). */ struct extent_buffer *btrfs_get_old_root(struct btrfs_root *root, u64 time_seq) { struct btrfs_fs_info *fs_info = root->fs_info; struct tree_mod_elem *tm; struct extent_buffer *eb = NULL; struct extent_buffer *eb_root; u64 eb_root_owner = 0; struct extent_buffer *old; struct tree_mod_root *old_root = NULL; u64 old_generation = 0; u64 logical; int level; eb_root = btrfs_read_lock_root_node(root); tm = tree_mod_log_oldest_root(eb_root, time_seq); if (!tm) return eb_root; if (tm->op == BTRFS_MOD_LOG_ROOT_REPLACE) { old_root = &tm->old_root; old_generation = tm->generation; logical = old_root->logical; level = old_root->level; } else { logical = eb_root->start; level = btrfs_header_level(eb_root); } tm = tree_mod_log_search(fs_info, logical, time_seq); if (old_root && tm && tm->op != BTRFS_MOD_LOG_KEY_REMOVE_WHILE_FREEING) { struct btrfs_tree_parent_check check = { 0 }; btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); check.level = level; check.owner_root = btrfs_root_id(root); old = read_tree_block(fs_info, logical, &check); if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) { if (!IS_ERR(old)) free_extent_buffer(old); btrfs_warn(fs_info, "failed to read tree block %llu from get_old_root", logical); } else { struct tree_mod_elem *tm2; btrfs_tree_read_lock(old); eb = btrfs_clone_extent_buffer(old); /* * After the lookup for the most recent tree mod operation * above and before we locked and cloned the extent buffer * 'old', a new tree mod log operation may have been added. * So lookup for a more recent one to make sure the number * of mod log operations we replay is consistent with the * number of items we have in the cloned extent buffer, * otherwise we can hit a BUG_ON when rewinding the extent * buffer. */ tm2 = tree_mod_log_search(fs_info, logical, time_seq); btrfs_tree_read_unlock(old); free_extent_buffer(old); ASSERT(tm2); ASSERT(tm2 == tm || tm2->seq > tm->seq); if (!tm2 || tm2->seq < tm->seq) { free_extent_buffer(eb); return NULL; } tm = tm2; } } else if (old_root) { eb_root_owner = btrfs_header_owner(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); eb = alloc_dummy_extent_buffer(fs_info, logical); } else { eb = btrfs_clone_extent_buffer(eb_root); btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); } if (!eb) return NULL; if (old_root) { btrfs_set_header_bytenr(eb, eb->start); btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV); btrfs_set_header_owner(eb, eb_root_owner); btrfs_set_header_level(eb, old_root->level); btrfs_set_header_generation(eb, old_generation); } btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb, btrfs_header_level(eb)); btrfs_tree_read_lock(eb); if (tm) tree_mod_log_rewind(fs_info, eb, time_seq, tm); else WARN_ON(btrfs_header_level(eb) != 0); WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info)); return eb; } int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq) { struct tree_mod_elem *tm; int level; struct extent_buffer *eb_root = btrfs_root_node(root); tm = tree_mod_log_oldest_root(eb_root, time_seq); if (tm && tm->op == BTRFS_MOD_LOG_ROOT_REPLACE) level = tm->old_root.level; else level = btrfs_header_level(eb_root); free_extent_buffer(eb_root); return level; } /* * Return the lowest sequence number in the tree modification log. * * Return the sequence number of the oldest tree modification log user, which * corresponds to the lowest sequence number of all existing users. If there are * no users it returns 0. */ u64 btrfs_tree_mod_log_lowest_seq(struct btrfs_fs_info *fs_info) { u64 ret = 0; read_lock(&fs_info->tree_mod_log_lock); if (!list_empty(&fs_info->tree_mod_seq_list)) { struct btrfs_seq_list *elem; elem = list_first_entry(&fs_info->tree_mod_seq_list, struct btrfs_seq_list, list); ret = elem->seq; } read_unlock(&fs_info->tree_mod_log_lock); return ret; }
97 54 2 52 21 1 1 2 1 18 1 1 1 17 1 1 1 1 1 5 1 12 1 10 1 5 7 7 1 35 6 29 10 3 7 19 27 2 24 19 1 12 9 9 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 /* * linux/fs/hfs/dir.c * * Copyright (C) 1995-1997 Paul H. Hargrove * (C) 2003 Ardis Technologies <roman@ardistech.com> * This file may be distributed under the terms of the GNU General Public License. * * This file contains directory-related functions independent of which * scheme is being used to represent forks. * * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds */ #include "hfs_fs.h" #include "btree.h" /* * hfs_lookup() */ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { hfs_cat_rec rec; struct hfs_find_data fd; struct inode *inode = NULL; int res; res = hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); if (res) return ERR_PTR(res); hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); res = hfs_brec_read(&fd, &rec, sizeof(rec)); if (res) { if (res != -ENOENT) inode = ERR_PTR(res); } else { inode = hfs_iget(dir->i_sb, &fd.search_key->cat, &rec); if (!inode) inode = ERR_PTR(-EACCES); } hfs_find_exit(&fd); return d_splice_alias(inode, dentry); } /* * hfs_readdir */ static int hfs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFS_MAX_NAMELEN]; union hfs_cat_rec entry; struct hfs_find_data fd; struct hfs_readdir_data *rd; u16 type; if (ctx->pos >= inode->i_size) return 0; err = hfs_find_init(HFS_SB(sb)->cat_tree, &fd); if (err) return err; hfs_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) goto out; if (ctx->pos == 0) { /* This is completely artificial... */ if (!dir_emit_dot(file, ctx)) goto out; ctx->pos = 1; } if (ctx->pos == 1) { if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (entry.type != HFS_CDR_THD) { pr_err("bad catalog folder thread\n"); err = -EIO; goto out; } //if (fd.entrylength < HFS_MIN_THREAD_SZ) { // pr_err("truncated catalog thread\n"); // err = -EIO; // goto out; //} if (!dir_emit(ctx, "..", 2, be32_to_cpu(entry.thread.ParID), DT_DIR)) goto out; ctx->pos = 2; } if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, ctx->pos - 1); if (err) goto out; for (;;) { if (be32_to_cpu(fd.key->cat.ParID) != inode->i_ino) { pr_err("walked past end of dir\n"); err = -EIO; goto out; } if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = entry.type; len = hfs_mac2asc(sb, strbuf, &fd.key->cat.CName); if (type == HFS_CDR_DIR) { if (fd.entrylength < sizeof(struct hfs_cat_dir)) { pr_err("small dir entry\n"); err = -EIO; goto out; } if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.dir.DirID), DT_DIR)) break; } else if (type == HFS_CDR_FIL) { if (fd.entrylength < sizeof(struct hfs_cat_file)) { pr_err("small file entry\n"); err = -EIO; goto out; } if (!dir_emit(ctx, strbuf, len, be32_to_cpu(entry.file.FlNum), DT_REG)) break; } else { pr_err("bad catalog entry type %d\n", type); err = -EIO; goto out; } ctx->pos++; if (ctx->pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = file->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfs_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } file->private_data = rd; rd->file = file; spin_lock(&HFS_I(inode)->open_dir_lock); list_add(&rd->list, &HFS_I(inode)->open_dir_list); spin_unlock(&HFS_I(inode)->open_dir_lock); } /* * Can be done after the list insertion; exclusion with * hfs_delete_cat() is provided by directory lock. */ memcpy(&rd->key, &fd.key->cat, sizeof(struct hfs_cat_key)); out: hfs_find_exit(&fd); return err; } static int hfs_dir_release(struct inode *inode, struct file *file) { struct hfs_readdir_data *rd = file->private_data; if (rd) { spin_lock(&HFS_I(inode)->open_dir_lock); list_del(&rd->list); spin_unlock(&HFS_I(inode)->open_dir_lock); kfree(rd); } return 0; } /* * hfs_create() * * This is the create() entry in the inode_operations structure for * regular HFS directories. The purpose is to create a new file in * a directory and return a corresponding inode, given the inode for * the directory and the name (and its length) of the new file. */ static int hfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { struct inode *inode; int res; inode = hfs_new_inode(dir, &dentry->d_name, mode); if (!inode) return -ENOMEM; res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfs_delete_inode(inode); iput(inode); return res; } d_instantiate(dentry, inode); mark_inode_dirty(inode); return 0; } /* * hfs_mkdir() * * This is the mkdir() entry in the inode_operations structure for * regular HFS directories. The purpose is to create a new directory * in a directory, given the inode for the parent directory and the * name (and its length) of the new directory. */ static struct dentry *hfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int res; inode = hfs_new_inode(dir, &dentry->d_name, S_IFDIR | mode); if (!inode) return ERR_PTR(-ENOMEM); res = hfs_cat_create(inode->i_ino, dir, &dentry->d_name, inode); if (res) { clear_nlink(inode); hfs_delete_inode(inode); iput(inode); return ERR_PTR(res); } d_instantiate(dentry, inode); mark_inode_dirty(inode); return NULL; } /* * hfs_remove() * * This serves as both unlink() and rmdir() in the inode_operations * structure for regular HFS directories. The purpose is to delete * an existing child, given the inode for the parent directory and * the name (and its length) of the existing directory. * * HFS does not have hardlinks, so both rmdir and unlink set the * link count to 0. The only difference is the emptiness check. */ static int hfs_remove(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); int res; if (S_ISDIR(inode->i_mode) && inode->i_size != 2) return -ENOTEMPTY; res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); if (res) return res; clear_nlink(inode); inode_set_ctime_current(inode); hfs_delete_inode(inode); mark_inode_dirty(inode); return 0; } /* * hfs_rename() * * This is the rename() entry in the inode_operations structure for * regular HFS directories. The purpose is to rename an existing * file or directory, given the inode for the current directory and * the name (and its length) of the existing file/directory and the * inode for the new directory and the name (and its length) of the * new file/directory. * XXX: how do you handle must_be dir? */ static int hfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { int res; if (flags & ~RENAME_NOREPLACE) return -EINVAL; /* Unlink destination if it already exists */ if (d_really_is_positive(new_dentry)) { res = hfs_remove(new_dir, new_dentry); if (res) return res; } res = hfs_cat_move(d_inode(old_dentry)->i_ino, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) hfs_cat_build_key(old_dir->i_sb, (btree_key *)&HFS_I(d_inode(old_dentry))->cat_key, new_dir->i_ino, &new_dentry->d_name); return res; } const struct file_operations hfs_dir_operations = { .read = generic_read_dir, .iterate_shared = hfs_readdir, .llseek = generic_file_llseek, .release = hfs_dir_release, }; const struct inode_operations hfs_dir_inode_operations = { .create = hfs_create, .lookup = hfs_lookup, .unlink = hfs_remove, .mkdir = hfs_mkdir, .rmdir = hfs_remove, .rename = hfs_rename, .setattr = hfs_inode_setattr, };
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_RT_H #define _LINUX_SCHED_RT_H #include <linux/sched.h> struct task_struct; static inline bool rt_prio(int prio) { return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); } static inline bool rt_or_dl_prio(int prio) { return unlikely(prio < MAX_RT_PRIO); } /* * Returns true if a task has a priority that belongs to RT class. PI-boosted * tasks will return true. Use rt_policy() to ignore PI-boosted tasks. */ static inline bool rt_task(struct task_struct *p) { return rt_prio(p->prio); } /* * Returns true if a task has a priority that belongs to RT or DL classes. * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore * PI-boosted tasks. */ static inline bool rt_or_dl_task(struct task_struct *p) { return rt_or_dl_prio(p->prio); } /* * Returns true if a task has a policy that belongs to RT or DL classes. * PI-boosted tasks will return false. */ static inline bool rt_or_dl_task_policy(struct task_struct *tsk) { int policy = tsk->policy; if (policy == SCHED_FIFO || policy == SCHED_RR) return true; if (policy == SCHED_DEADLINE) return true; return false; } #ifdef CONFIG_RT_MUTEXES extern void rt_mutex_pre_schedule(void); extern void rt_mutex_schedule(void); extern void rt_mutex_post_schedule(void); /* * Must hold either p->pi_lock or task_rq(p)->lock. */ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) { return p->pi_top_task; } extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); #else static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif extern void normalize_rt_tasks(void); /* * default timeslice is 100 msecs (used only for SCHED_RR tasks). * Timeslices get refilled after they expire. */ #define RR_TIMESLICE (100 * HZ / 1000) #endif /* _LINUX_SCHED_RT_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #ifndef _BR_PRIVATE_STP_H #define _BR_PRIVATE_STP_H #define BPDU_TYPE_CONFIG 0 #define BPDU_TYPE_TCN 0x80 /* IEEE 802.1D-1998 timer values */ #define BR_MIN_HELLO_TIME (1*HZ) #define BR_MAX_HELLO_TIME (10*HZ) #define BR_MIN_FORWARD_DELAY (2*HZ) #define BR_MAX_FORWARD_DELAY (30*HZ) #define BR_MIN_MAX_AGE (6*HZ) #define BR_MAX_MAX_AGE (40*HZ) #define BR_MIN_PATH_COST 1 #define BR_MAX_PATH_COST 65535 struct br_config_bpdu { unsigned int topology_change:1; unsigned int topology_change_ack:1; bridge_id root; int root_path_cost; bridge_id bridge_id; port_id port_id; int message_age; int max_age; int hello_time; int forward_delay; }; /* called under bridge lock */ static inline int br_is_designated_port(const struct net_bridge_port *p) { return !memcmp(&p->designated_bridge, &p->br->bridge_id, 8) && (p->designated_port == p->port_id); } /* br_stp.c */ void br_become_root_bridge(struct net_bridge *br); void br_config_bpdu_generation(struct net_bridge *); void br_configuration_update(struct net_bridge *); void br_port_state_selection(struct net_bridge *); void br_received_config_bpdu(struct net_bridge_port *p, const struct br_config_bpdu *bpdu); void br_received_tcn_bpdu(struct net_bridge_port *p); void br_transmit_config(struct net_bridge_port *p); void br_transmit_tcn(struct net_bridge *br); void br_topology_change_detection(struct net_bridge *br); void __br_set_topology_change(struct net_bridge *br, unsigned char val); /* br_stp_bpdu.c */ void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *); void br_send_tcn_bpdu(struct net_bridge_port *); #endif
264 156 762 112 72 72 92 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_NULLS_H #define _LINUX_RCULIST_NULLS_H #ifdef __KERNEL__ /* * RCU-protected list version */ #include <linux/list_nulls.h> #include <linux/rcupdate.h> /** * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on the node return true after this. It is * useful for RCU based read lockfree traversal if the writer side * must know if the list entry is still hashed or already unhashed. * * In particular, it means that we can not poison the forward pointers * that may still be used for walking the hash list and we can only * zero the pprev pointer so list_unhashed() will return true after * this. * * The caller must take whatever precautions are necessary (such as * holding appropriate locks) to avoid racing with another * list-mutation primitive, such as hlist_nulls_add_head_rcu() or * hlist_nulls_del_rcu(), running on this same list. However, it is * perfectly legal to run concurrently with the _rcu list-traversal * primitives, such as hlist_nulls_for_each_entry_rcu(). */ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) { if (!hlist_nulls_unhashed(n)) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, NULL); } } /** * hlist_nulls_first_rcu - returns the first element of the hash list. * @head: the head of the list. */ #define hlist_nulls_first_rcu(head) \ (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) /** * hlist_nulls_next_rcu - returns the element of the list after @node. * @node: element of the list. */ #define hlist_nulls_next_rcu(node) \ (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) /** * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_nulls_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry(). */ static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) { __hlist_nulls_del(n); WRITE_ONCE(n->pprev, LIST_POISON2); } /** * hlist_nulls_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *first = h->first; WRITE_ONCE(n->next, first); WRITE_ONCE(n->pprev, &h->first); rcu_assign_pointer(hlist_nulls_first_rcu(h), n); if (!is_a_nulls(first)) WRITE_ONCE(first->pprev, &n->next); } /** * hlist_nulls_add_tail_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_nulls, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() * or hlist_nulls_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, struct hlist_nulls_head *h) { struct hlist_nulls_node *i, *last = NULL; /* Note: write side code, so rcu accessors are not needed. */ for (i = h->first; !is_a_nulls(i); i = i->next) last = i; if (last) { WRITE_ONCE(n->next, last->next); n->pprev = &last->next; rcu_assign_pointer(hlist_nulls_next_rcu(last), n); } else { hlist_nulls_add_head_rcu(n, h); } } /* after that hlist_nulls_del will work */ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) { n->pprev = &n->next; n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL); } /** * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head of the list. * @member: the name of the hlist_nulls_node within the struct. * * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/memory-barriers.txt around line 1533 * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) /** * hlist_nulls_for_each_entry_safe - * iterate over list of given type safe against removal of list entry * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_nulls_node to use as a loop cursor. * @head: the head of the list. * @member: the name of the hlist_nulls_node within the struct. */ #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ for (({barrier();}), \ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) #endif #endif
11 11 4 4 4 4 4 91 6 4015 4012 31 7 7 7 54 53 52 54 4 4 4 4 4 4 7 7 3 4 7 4 4 4 4 6 6 6 6 6 6 6 1 1 221 222 223 223 216 7 7 2 2 2 2 2 220 219 220 220 221 212 1 7 7 26 24 26 24 1 25 24 83 85 85 84 91 23 1 22 12 12 12 11 16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 /* * kernel/cpuset.c * * Processor and Memory placement constraints for sets of tasks. * * Copyright (C) 2003 BULL SA. * Copyright (C) 2004-2007 Silicon Graphics, Inc. * Copyright (C) 2006 Google, Inc * * Portions derived from Patrick Mochel's sysfs code. * sysfs is Copyright (c) 2001-3 Patrick Mochel * * 2003-10-10 Written by Simon Derr. * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. * 2006 Rework by Paul Menage to use generic cgroups * 2008 Rework of the scheduler domains and CPU hotplug handling * by Max Krasnyansky * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux * distribution for more details. */ #include "cpuset-internal.h" #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mempolicy.h> #include <linux/mm.h> #include <linux/memory.h> #include <linux/export.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/sched/deadline.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/security.h> #include <linux/oom.h> #include <linux/sched/isolation.h> #include <linux/wait.h> #include <linux/workqueue.h> DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); /* * There could be abnormal cpuset configurations for cpu or memory * node binding, add this key to provide a quick low-cost judgment * of the situation. */ DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key); static const char * const perr_strings[] = { [PERR_INVCPUS] = "Invalid cpu list in cpuset.cpus.exclusive", [PERR_INVPARENT] = "Parent is an invalid partition root", [PERR_NOTPART] = "Parent is not a partition root", [PERR_NOTEXCL] = "Cpu list in cpuset.cpus not exclusive", [PERR_NOCPUS] = "Parent unable to distribute cpu downstream", [PERR_HOTPLUG] = "No cpu available due to hotplug", [PERR_CPUSEMPTY] = "cpuset.cpus and cpuset.cpus.exclusive are empty", [PERR_HKEEPING] = "partition config conflicts with housekeeping setup", [PERR_ACCESS] = "Enable partition not permitted", [PERR_REMOTE] = "Have remote partition underneath", }; /* * For local partitions, update to subpartitions_cpus & isolated_cpus is done * in update_parent_effective_cpumask(). For remote partitions, it is done in * the remote_partition_*() and remote_cpus_update() helpers. */ /* * Exclusive CPUs distributed out to local or remote sub-partitions of * top_cpuset */ static cpumask_var_t subpartitions_cpus; /* * Exclusive CPUs in isolated partitions */ static cpumask_var_t isolated_cpus; /* * Housekeeping (HK_TYPE_DOMAIN) CPUs at boot */ static cpumask_var_t boot_hk_cpus; static bool have_boot_isolcpus; /* List of remote partition root children */ static struct list_head remote_children; /* * A flag to force sched domain rebuild at the end of an operation. * It can be set in * - update_partition_sd_lb() * - update_cpumasks_hier() * - cpuset_update_flag() * - cpuset_hotplug_update_tasks() * - cpuset_handle_hotplug() * * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock. * * Note that update_relax_domain_level() in cpuset-v1.c can still call * rebuild_sched_domains_locked() directly without using this flag. */ static bool force_sd_rebuild; /* * Partition root states: * * 0 - member (not a partition root) * 1 - partition root * 2 - partition root without load balancing (isolated) * -1 - invalid partition root * -2 - invalid isolated partition root * * There are 2 types of partitions - local or remote. Local partitions are * those whose parents are partition root themselves. Setting of * cpuset.cpus.exclusive are optional in setting up local partitions. * Remote partitions are those whose parents are not partition roots. Passing * down exclusive CPUs by setting cpuset.cpus.exclusive along its ancestor * nodes are mandatory in creating a remote partition. * * For simplicity, a local partition can be created under a local or remote * partition but a remote partition cannot have any partition root in its * ancestor chain except the cgroup root. */ #define PRS_MEMBER 0 #define PRS_ROOT 1 #define PRS_ISOLATED 2 #define PRS_INVALID_ROOT -1 #define PRS_INVALID_ISOLATED -2 static inline bool is_prs_invalid(int prs_state) { return prs_state < 0; } /* * Temporary cpumasks for working with partitions that are passed among * functions to avoid memory allocation in inner functions. */ struct tmpmasks { cpumask_var_t addmask, delmask; /* For partition root */ cpumask_var_t new_cpus; /* For update_cpumasks_hier() */ }; void inc_dl_tasks_cs(struct task_struct *p) { struct cpuset *cs = task_cs(p); cs->nr_deadline_tasks++; } void dec_dl_tasks_cs(struct task_struct *p) { struct cpuset *cs = task_cs(p); cs->nr_deadline_tasks--; } static inline int is_partition_valid(const struct cpuset *cs) { return cs->partition_root_state > 0; } static inline int is_partition_invalid(const struct cpuset *cs) { return cs->partition_root_state < 0; } /* * Callers should hold callback_lock to modify partition_root_state. */ static inline void make_partition_invalid(struct cpuset *cs) { if (cs->partition_root_state > 0) cs->partition_root_state = -cs->partition_root_state; } /* * Send notification event of whenever partition_root_state changes. */ static inline void notify_partition_change(struct cpuset *cs, int old_prs) { if (old_prs == cs->partition_root_state) return; cgroup_file_notify(&cs->partition_file); /* Reset prs_err if not invalid */ if (is_partition_valid(cs)) WRITE_ONCE(cs->prs_err, PERR_NONE); } static struct cpuset top_cpuset = { .flags = BIT(CS_ONLINE) | BIT(CS_CPU_EXCLUSIVE) | BIT(CS_MEM_EXCLUSIVE) | BIT(CS_SCHED_LOAD_BALANCE), .partition_root_state = PRS_ROOT, .relax_domain_level = -1, .remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling), }; /* * There are two global locks guarding cpuset structures - cpuset_mutex and * callback_lock. The cpuset code uses only cpuset_mutex. Other kernel * subsystems can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset * structures. Note that cpuset_mutex needs to be a mutex as it is used in * paths that rely on priority inheritance (e.g. scheduler - on RT) for * correctness. * * A task must hold both locks to modify cpusets. If a task holds * cpuset_mutex, it blocks others, ensuring that it is the only task able to * also acquire callback_lock and be able to modify cpusets. It can perform * various checks on the cpuset structure first, knowing nothing will change. * It can also allocate memory while just holding cpuset_mutex. While it is * performing these checks, various callback routines can briefly acquire * callback_lock to query cpusets. Once it is ready to make the changes, it * takes callback_lock, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding * callback_lock, as that would risk double tripping on callback_lock * from one of the callbacks into the cpuset code from within * __alloc_pages(). * * If a task is only holding callback_lock, then it has read-only * access to cpusets. * * Now, the task_struct fields mems_allowed and mempolicy may be changed * by other task, we use alloc_lock in the task_struct fields to protect * them. * * The cpuset_common_seq_show() handlers only hold callback_lock across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. */ static DEFINE_MUTEX(cpuset_mutex); void cpuset_lock(void) { mutex_lock(&cpuset_mutex); } void cpuset_unlock(void) { mutex_unlock(&cpuset_mutex); } static DEFINE_SPINLOCK(callback_lock); void cpuset_callback_lock_irq(void) { spin_lock_irq(&callback_lock); } void cpuset_callback_unlock_irq(void) { spin_unlock_irq(&callback_lock); } static struct workqueue_struct *cpuset_migrate_mm_wq; static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); static inline void check_insane_mems_config(nodemask_t *nodes) { if (!cpusets_insane_config() && movable_only_nodes(nodes)) { static_branch_enable(&cpusets_insane_config_key); pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n" "Cpuset allocations might fail even with a lot of memory available.\n", nodemask_pr_args(nodes)); } } /* * decrease cs->attach_in_progress. * wake_up cpuset_attach_wq if cs->attach_in_progress==0. */ static inline void dec_attach_in_progress_locked(struct cpuset *cs) { lockdep_assert_held(&cpuset_mutex); cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); } static inline void dec_attach_in_progress(struct cpuset *cs) { mutex_lock(&cpuset_mutex); dec_attach_in_progress_locked(cs); mutex_unlock(&cpuset_mutex); } static inline bool cpuset_v2(void) { return !IS_ENABLED(CONFIG_CPUSETS_V1) || cgroup_subsys_on_dfl(cpuset_cgrp_subsys); } /* * Cgroup v2 behavior is used on the "cpus" and "mems" control files when * on default hierarchy or when the cpuset_v2_mode flag is set by mounting * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option. * With v2 behavior, "cpus" and "mems" are always what the users have * requested and won't be changed by hotplug events. Only the effective * cpus or mems will be affected. */ static inline bool is_in_v2_mode(void) { return cpuset_v2() || (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); } /** * partition_is_populated - check if partition has tasks * @cs: partition root to be checked * @excluded_child: a child cpuset to be excluded in task checking * Return: true if there are tasks, false otherwise * * It is assumed that @cs is a valid partition root. @excluded_child should * be non-NULL when this cpuset is going to become a partition itself. */ static inline bool partition_is_populated(struct cpuset *cs, struct cpuset *excluded_child) { struct cgroup_subsys_state *css; struct cpuset *child; if (cs->css.cgroup->nr_populated_csets) return true; if (!excluded_child && !cs->nr_subparts) return cgroup_is_populated(cs->css.cgroup); rcu_read_lock(); cpuset_for_each_child(child, css, cs) { if (child == excluded_child) continue; if (is_partition_valid(child)) continue; if (cgroup_is_populated(child->css.cgroup)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* * Return in pmask the portion of a task's cpusets's cpus_allowed that * are online and are capable of running the task. If none are found, * walk up the cpuset hierarchy until we find one that does have some * appropriate cpus. * * One way or another, we guarantee to return some non-empty subset * of cpu_online_mask. * * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_cpus(struct task_struct *tsk, struct cpumask *pmask) { const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); struct cpuset *cs; if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask))) cpumask_copy(pmask, cpu_online_mask); rcu_read_lock(); cs = task_cs(tsk); while (!cpumask_intersects(cs->effective_cpus, pmask)) cs = parent_cs(cs); cpumask_and(pmask, pmask, cs->effective_cpus); rcu_read_unlock(); } /* * Return in *pmask the portion of a cpusets's mems_allowed that * are online, with memory. If none are online with memory, walk * up the cpuset hierarchy until we find one that does have some * online mems. The top cpuset always has some mems online. * * One way or another, we guarantee to return some non-empty subset * of node_states[N_MEMORY]. * * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) { while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY])) cs = parent_cs(cs); nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]); } /** * alloc_cpumasks - allocate three cpumasks for cpuset * @cs: the cpuset that have cpumasks to be allocated. * @tmp: the tmpmasks structure pointer * Return: 0 if successful, -ENOMEM otherwise. * * Only one of the two input arguments should be non-NULL. */ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) { cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4; if (cs) { pmask1 = &cs->cpus_allowed; pmask2 = &cs->effective_cpus; pmask3 = &cs->effective_xcpus; pmask4 = &cs->exclusive_cpus; } else { pmask1 = &tmp->new_cpus; pmask2 = &tmp->addmask; pmask3 = &tmp->delmask; pmask4 = NULL; } if (!zalloc_cpumask_var(pmask1, GFP_KERNEL)) return -ENOMEM; if (!zalloc_cpumask_var(pmask2, GFP_KERNEL)) goto free_one; if (!zalloc_cpumask_var(pmask3, GFP_KERNEL)) goto free_two; if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL)) goto free_three; return 0; free_three: free_cpumask_var(*pmask3); free_two: free_cpumask_var(*pmask2); free_one: free_cpumask_var(*pmask1); return -ENOMEM; } /** * free_cpumasks - free cpumasks in a tmpmasks structure * @cs: the cpuset that have cpumasks to be free. * @tmp: the tmpmasks structure pointer */ static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) { if (cs) { free_cpumask_var(cs->cpus_allowed); free_cpumask_var(cs->effective_cpus); free_cpumask_var(cs->effective_xcpus); free_cpumask_var(cs->exclusive_cpus); } if (tmp) { free_cpumask_var(tmp->new_cpus); free_cpumask_var(tmp->addmask); free_cpumask_var(tmp->delmask); } } /** * alloc_trial_cpuset - allocate a trial cpuset * @cs: the cpuset that the trial cpuset duplicates */ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) { struct cpuset *trial; trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL); if (!trial) return NULL; if (alloc_cpumasks(trial, NULL)) { kfree(trial); return NULL; } cpumask_copy(trial->cpus_allowed, cs->cpus_allowed); cpumask_copy(trial->effective_cpus, cs->effective_cpus); cpumask_copy(trial->effective_xcpus, cs->effective_xcpus); cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus); return trial; } /** * free_cpuset - free the cpuset * @cs: the cpuset to be freed */ static inline void free_cpuset(struct cpuset *cs) { free_cpumasks(cs, NULL); kfree(cs); } /* Return user specified exclusive CPUs */ static inline struct cpumask *user_xcpus(struct cpuset *cs) { return cpumask_empty(cs->exclusive_cpus) ? cs->cpus_allowed : cs->exclusive_cpus; } static inline bool xcpus_empty(struct cpuset *cs) { return cpumask_empty(cs->cpus_allowed) && cpumask_empty(cs->exclusive_cpus); } /* * cpusets_are_exclusive() - check if two cpusets are exclusive * * Return true if exclusive, false if not */ static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2) { struct cpumask *xcpus1 = user_xcpus(cs1); struct cpumask *xcpus2 = user_xcpus(cs2); if (cpumask_intersects(xcpus1, xcpus2)) return false; return true; } /* * validate_change() - Used to validate that any proposed cpuset change * follows the structural rules for cpusets. * * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes * cpuset_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the * cpuset in the list must use cur below, not trial. * * 'trial' is the address of bulk structure copy of cur, with * perhaps one or more of the fields cpus_allowed, mems_allowed, * or flags changed to new, trial values. * * Return 0 if valid, -errno if not. */ static int validate_change(struct cpuset *cur, struct cpuset *trial) { struct cgroup_subsys_state *css; struct cpuset *c, *par; int ret = 0; rcu_read_lock(); if (!is_in_v2_mode()) ret = cpuset1_validate_change(cur, trial); if (ret) goto out; /* Remaining checks don't apply to root cpuset */ if (cur == &top_cpuset) goto out; par = parent_cs(cur); /* * Cpusets with tasks - existing or newly being attached - can't * be changed to have empty cpus_allowed or mems_allowed. */ ret = -ENOSPC; if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) { if (!cpumask_empty(cur->cpus_allowed) && cpumask_empty(trial->cpus_allowed)) goto out; if (!nodes_empty(cur->mems_allowed) && nodes_empty(trial->mems_allowed)) goto out; } /* * We can't shrink if we won't have enough room for SCHED_DEADLINE * tasks. This check is not done when scheduling is disabled as the * users should know what they are doing. * * For v1, effective_cpus == cpus_allowed & user_xcpus() returns * cpus_allowed. * * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only * for non-isolated partition root. At this point, the target * effective_cpus isn't computed yet. user_xcpus() is the best * approximation. * * TBD: May need to precompute the real effective_cpus here in case * incorrect scheduling of SCHED_DEADLINE tasks in a partition * becomes an issue. */ ret = -EBUSY; if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) && !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial))) goto out; /* * If either I or some sibling (!= me) is exclusive, we can't * overlap. exclusive_cpus cannot overlap with each other if set. */ ret = -EINVAL; cpuset_for_each_child(c, css, par) { bool txset, cxset; /* Are exclusive_cpus set? */ if (c == cur) continue; txset = !cpumask_empty(trial->exclusive_cpus); cxset = !cpumask_empty(c->exclusive_cpus); if (is_cpu_exclusive(trial) || is_cpu_exclusive(c) || (txset && cxset)) { if (!cpusets_are_exclusive(trial, c)) goto out; } else if (txset || cxset) { struct cpumask *xcpus, *acpus; /* * When just one of the exclusive_cpus's is set, * cpus_allowed of the other cpuset, if set, cannot be * a subset of it or none of those CPUs will be * available if these exclusive CPUs are activated. */ if (txset) { xcpus = trial->exclusive_cpus; acpus = c->cpus_allowed; } else { xcpus = c->exclusive_cpus; acpus = trial->cpus_allowed; } if (!cpumask_empty(acpus) && cpumask_subset(acpus, xcpus)) goto out; } if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) && nodes_intersects(trial->mems_allowed, c->mems_allowed)) goto out; } ret = 0; out: rcu_read_unlock(); return ret; } #ifdef CONFIG_SMP /* * Helper routine for generate_sched_domains(). * Do cpusets a, b have overlapping effective cpus_allowed masks? */ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) { return cpumask_intersects(a->effective_cpus, b->effective_cpus); } static void update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c) { if (dattr->relax_domain_level < c->relax_domain_level) dattr->relax_domain_level = c->relax_domain_level; return; } static void update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *root_cs) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; rcu_read_lock(); cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { /* skip the whole subtree if @cp doesn't have any CPU */ if (cpumask_empty(cp->cpus_allowed)) { pos_css = css_rightmost_descendant(pos_css); continue; } if (is_sched_load_balance(cp)) update_domain_attr(dattr, cp); } rcu_read_unlock(); } /* Must be called with cpuset_mutex held. */ static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ return static_key_count(&cpusets_enabled_key.key) + 1; } /* * generate_sched_domains() * * This function builds a partial partition of the systems CPUs * A 'partial partition' is a set of non-overlapping subsets whose * union is a subset of that set. * The output of this function needs to be passed to kernel/sched/core.c * partition_sched_domains() routine, which will rebuild the scheduler's * load balancing domains (sched domains) as specified by that partial * partition. * * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst * for a background explanation of this. * * Does not return errors, on the theory that the callers of this * routine would rather not worry about failures to rebuild sched * domains when operating in the severe memory shortage situations * that could cause allocation failures below. * * Must be called with cpuset_mutex held. * * The three key local variables below are: * cp - cpuset pointer, used (together with pos_css) to perform a * top-down scan of all cpusets. For our purposes, rebuilding * the schedulers sched domains, we can ignore !is_sched_load_ * balance cpusets. * csa - (for CpuSet Array) Array of pointers to all the cpusets * that need to be load balanced, for convenient iterative * access by the subsequent code that finds the best partition, * i.e the set of domains (subsets) of CPUs such that the * cpus_allowed of every cpuset marked is_sched_load_balance * is a subset of one of these domains, while there are as * many such domains as possible, each as small as possible. * doms - Conversion of 'csa' to an array of cpumasks, for passing to * the kernel/sched/core.c routine partition_sched_domains() in a * convenient format, that can be easily compared to the prior * value to determine what partition elements (sched domains) * were changed (added or removed.) * * Finding the best partition (set of domains): * The double nested loops below over i, j scan over the load * balanced cpusets (using the array of cpuset pointers in csa[]) * looking for pairs of cpusets that have overlapping cpus_allowed * and merging them using a union-find algorithm. * * The union of the cpus_allowed masks from the set of all cpusets * having the same root then form the one element of the partition * (one sched domain) to be passed to partition_sched_domains(). * */ static int generate_sched_domains(cpumask_var_t **domains, struct sched_domain_attr **attributes) { struct cpuset *cp; /* top-down scan of cpusets */ struct cpuset **csa; /* array of all cpuset ptrs */ int csn; /* how many cpuset ptrs in csa so far */ int i, j; /* indices for partition finding loops */ cpumask_var_t *doms; /* resulting partition; i.e. sched domains */ struct sched_domain_attr *dattr; /* attributes for custom domains */ int ndoms = 0; /* number of sched domains in result */ int nslot; /* next empty doms[] struct cpumask slot */ struct cgroup_subsys_state *pos_css; bool root_load_balance = is_sched_load_balance(&top_cpuset); bool cgrpv2 = cpuset_v2(); int nslot_update; doms = NULL; dattr = NULL; csa = NULL; /* Special case for the 99% of systems with one, full, sched domain */ if (root_load_balance && cpumask_empty(subpartitions_cpus)) { single_root_domain: ndoms = 1; doms = alloc_sched_domains(ndoms); if (!doms) goto done; dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL); if (dattr) { *dattr = SD_ATTR_INIT; update_domain_attr_tree(dattr, &top_cpuset); } cpumask_and(doms[0], top_cpuset.effective_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN)); goto done; } csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL); if (!csa) goto done; csn = 0; rcu_read_lock(); if (root_load_balance) csa[csn++] = &top_cpuset; cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { if (cp == &top_cpuset) continue; if (cgrpv2) goto v2; /* * v1: * Continue traversing beyond @cp iff @cp has some CPUs and * isn't load balancing. The former is obvious. The * latter: All child cpusets contain a subset of the * parent's cpus, so just skip them, and then we call * update_domain_attr_tree() to calc relax_domain_level of * the corresponding sched domain. */ if (!cpumask_empty(cp->cpus_allowed) && !(is_sched_load_balance(cp) && cpumask_intersects(cp->cpus_allowed, housekeeping_cpumask(HK_TYPE_DOMAIN)))) continue; if (is_sched_load_balance(cp) && !cpumask_empty(cp->effective_cpus)) csa[csn++] = cp; /* skip @cp's subtree */ pos_css = css_rightmost_descendant(pos_css); continue; v2: /* * Only valid partition roots that are not isolated and with * non-empty effective_cpus will be saved into csn[]. */ if ((cp->partition_root_state == PRS_ROOT) && !cpumask_empty(cp->effective_cpus)) csa[csn++] = cp; /* * Skip @cp's subtree if not a partition root and has no * exclusive CPUs to be granted to child cpusets. */ if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus)) pos_css = css_rightmost_descendant(pos_css); } rcu_read_unlock(); /* * If there are only isolated partitions underneath the cgroup root, * we can optimize out unneeded sched domains scanning. */ if (root_load_balance && (csn == 1)) goto single_root_domain; for (i = 0; i < csn; i++) uf_node_init(&csa[i]->node); /* Merge overlapping cpusets */ for (i = 0; i < csn; i++) { for (j = i + 1; j < csn; j++) { if (cpusets_overlap(csa[i], csa[j])) { /* * Cgroup v2 shouldn't pass down overlapping * partition root cpusets. */ WARN_ON_ONCE(cgrpv2); uf_union(&csa[i]->node, &csa[j]->node); } } } /* Count the total number of domains */ for (i = 0; i < csn; i++) { if (uf_find(&csa[i]->node) == &csa[i]->node) ndoms++; } /* * Now we know how many domains to create. * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. */ doms = alloc_sched_domains(ndoms); if (!doms) goto done; /* * The rest of the code, including the scheduler, can deal with * dattr==NULL case. No need to abort if alloc fails. */ dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr), GFP_KERNEL); /* * Cgroup v2 doesn't support domain attributes, just set all of them * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a * subset of HK_TYPE_DOMAIN housekeeping CPUs. */ if (cgrpv2) { for (i = 0; i < ndoms; i++) { /* * The top cpuset may contain some boot time isolated * CPUs that need to be excluded from the sched domain. */ if (csa[i] == &top_cpuset) cpumask_and(doms[i], csa[i]->effective_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN)); else cpumask_copy(doms[i], csa[i]->effective_cpus); if (dattr) dattr[i] = SD_ATTR_INIT; } goto done; } for (nslot = 0, i = 0; i < csn; i++) { nslot_update = 0; for (j = i; j < csn; j++) { if (uf_find(&csa[j]->node) == &csa[i]->node) { struct cpumask *dp = doms[nslot]; if (i == j) { nslot_update = 1; cpumask_clear(dp); if (dattr) *(dattr + nslot) = SD_ATTR_INIT; } cpumask_or(dp, dp, csa[j]->effective_cpus); cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN)); if (dattr) update_domain_attr_tree(dattr + nslot, csa[j]); } } if (nslot_update) nslot++; } BUG_ON(nslot != ndoms); done: kfree(csa); /* * Fallback to the default domain if kmalloc() failed. * See comments in partition_sched_domains(). */ if (doms == NULL) ndoms = 1; *domains = doms; *attributes = dattr; return ndoms; } static void dl_update_tasks_root_domain(struct cpuset *cs) { struct css_task_iter it; struct task_struct *task; if (cs->nr_deadline_tasks == 0) return; css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) dl_add_task_root_domain(task); css_task_iter_end(&it); } void dl_rebuild_rd_accounting(void) { struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; int cpu; u64 cookie = ++dl_cookie; lockdep_assert_held(&cpuset_mutex); lockdep_assert_cpus_held(); lockdep_assert_held(&sched_domains_mutex); rcu_read_lock(); for_each_possible_cpu(cpu) { if (dl_bw_visited(cpu, cookie)) continue; dl_clear_root_domain_cpu(cpu); } cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { if (cpumask_empty(cs->effective_cpus)) { pos_css = css_rightmost_descendant(pos_css); continue; } css_get(&cs->css); rcu_read_unlock(); dl_update_tasks_root_domain(cs); rcu_read_lock(); css_put(&cs->css); } rcu_read_unlock(); } /* * Rebuild scheduler domains. * * If the flag 'sched_load_balance' of any cpuset with non-empty * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset * which has that flag enabled, or if any cpuset with a non-empty * 'cpus' is removed, then call this routine to rebuild the * scheduler's dynamic sched domains. * * Call with cpuset_mutex held. Takes cpus_read_lock(). */ void rebuild_sched_domains_locked(void) { struct cgroup_subsys_state *pos_css; struct sched_domain_attr *attr; cpumask_var_t *doms; struct cpuset *cs; int ndoms; lockdep_assert_cpus_held(); lockdep_assert_held(&cpuset_mutex); force_sd_rebuild = false; /* * If we have raced with CPU hotplug, return early to avoid * passing doms with offlined cpu to partition_sched_domains(). * Anyways, cpuset_handle_hotplug() will rebuild sched domains. * * With no CPUs in any subpartitions, top_cpuset's effective CPUs * should be the same as the active CPUs, so checking only top_cpuset * is enough to detect racing CPU offlines. */ if (cpumask_empty(subpartitions_cpus) && !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) return; /* * With subpartition CPUs, however, the effective CPUs of a partition * root should be only a subset of the active CPUs. Since a CPU in any * partition root could be offlined, all must be checked. */ if (!cpumask_empty(subpartitions_cpus)) { rcu_read_lock(); cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { if (!is_partition_valid(cs)) { pos_css = css_rightmost_descendant(pos_css); continue; } if (!cpumask_subset(cs->effective_cpus, cpu_active_mask)) { rcu_read_unlock(); return; } } rcu_read_unlock(); } /* Generate domain masks and attrs */ ndoms = generate_sched_domains(&doms, &attr); /* Have scheduler rebuild the domains */ partition_sched_domains(ndoms, doms, attr); } #else /* !CONFIG_SMP */ void rebuild_sched_domains_locked(void) { } #endif /* CONFIG_SMP */ static void rebuild_sched_domains_cpuslocked(void) { mutex_lock(&cpuset_mutex); rebuild_sched_domains_locked(); mutex_unlock(&cpuset_mutex); } void rebuild_sched_domains(void) { cpus_read_lock(); rebuild_sched_domains_cpuslocked(); cpus_read_unlock(); } void cpuset_reset_sched_domains(void) { mutex_lock(&cpuset_mutex); partition_sched_domains(1, NULL, NULL); mutex_unlock(&cpuset_mutex); } /** * cpuset_update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * @new_cpus: the temp variable for the new effective_cpus mask * * Iterate through each task of @cs updating its cpus_allowed to the * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. * * For top_cpuset, task_cpu_possible_mask() is used instead of effective_cpus * to make sure all offline CPUs are also included as hotplug code won't * update cpumasks for tasks in top_cpuset. * * As task_cpu_possible_mask() can be task dependent in arm64, we have to * do cpu masking per task instead of doing it once for all. */ void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) { struct css_task_iter it; struct task_struct *task; bool top_cs = cs == &top_cpuset; css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) { const struct cpumask *possible_mask = task_cpu_possible_mask(task); if (top_cs) { /* * PF_NO_SETAFFINITY tasks are ignored. * All per cpu kthreads should have PF_NO_SETAFFINITY * flag set, see kthread_set_per_cpu(). */ if (task->flags & PF_NO_SETAFFINITY) continue; cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus); } else { cpumask_and(new_cpus, possible_mask, cs->effective_cpus); } set_cpus_allowed_ptr(task, new_cpus); } css_task_iter_end(&it); } /** * compute_effective_cpumask - Compute the effective cpumask of the cpuset * @new_cpus: the temp variable for the new effective_cpus mask * @cs: the cpuset the need to recompute the new effective_cpus mask * @parent: the parent cpuset * * The result is valid only if the given cpuset isn't a partition root. */ static void compute_effective_cpumask(struct cpumask *new_cpus, struct cpuset *cs, struct cpuset *parent) { cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus); } /* * Commands for update_parent_effective_cpumask */ enum partition_cmd { partcmd_enable, /* Enable partition root */ partcmd_enablei, /* Enable isolated partition root */ partcmd_disable, /* Disable partition root */ partcmd_update, /* Update parent's effective_cpus */ partcmd_invalidate, /* Make partition invalid */ }; static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct tmpmasks *tmp); /* * Update partition exclusive flag * * Return: 0 if successful, an error code otherwise */ static int update_partition_exclusive_flag(struct cpuset *cs, int new_prs) { bool exclusive = (new_prs > PRS_MEMBER); if (exclusive && !is_cpu_exclusive(cs)) { if (cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 1)) return PERR_NOTEXCL; } else if (!exclusive && is_cpu_exclusive(cs)) { /* Turning off CS_CPU_EXCLUSIVE will not return error */ cpuset_update_flag(CS_CPU_EXCLUSIVE, cs, 0); } return 0; } /* * Update partition load balance flag and/or rebuild sched domain * * Changing load balance flag will automatically call * rebuild_sched_domains_locked(). * This function is for cgroup v2 only. */ static void update_partition_sd_lb(struct cpuset *cs, int old_prs) { int new_prs = cs->partition_root_state; bool rebuild_domains = (new_prs > 0) || (old_prs > 0); bool new_lb; /* * If cs is not a valid partition root, the load balance state * will follow its parent. */ if (new_prs > 0) { new_lb = (new_prs != PRS_ISOLATED); } else { new_lb = is_sched_load_balance(parent_cs(cs)); } if (new_lb != !!is_sched_load_balance(cs)) { rebuild_domains = true; if (new_lb) set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); else clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); } if (rebuild_domains) cpuset_force_rebuild(); } /* * tasks_nocpu_error - Return true if tasks will have no effective_cpus */ static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs, struct cpumask *xcpus) { /* * A populated partition (cs or parent) can't have empty effective_cpus */ return (cpumask_subset(parent->effective_cpus, xcpus) && partition_is_populated(parent, cs)) || (!cpumask_intersects(xcpus, cpu_active_mask) && partition_is_populated(cs, NULL)); } static void reset_partition_data(struct cpuset *cs) { struct cpuset *parent = parent_cs(cs); if (!cpuset_v2()) return; lockdep_assert_held(&callback_lock); cs->nr_subparts = 0; if (cpumask_empty(cs->exclusive_cpus)) { cpumask_clear(cs->effective_xcpus); if (is_cpu_exclusive(cs)) clear_bit(CS_CPU_EXCLUSIVE, &cs->flags); } if (!cpumask_and(cs->effective_cpus, parent->effective_cpus, cs->cpus_allowed)) cpumask_copy(cs->effective_cpus, parent->effective_cpus); } /* * isolated_cpus_update - Update the isolated_cpus mask * @old_prs: old partition_root_state * @new_prs: new partition_root_state * @xcpus: exclusive CPUs with state change */ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus) { WARN_ON_ONCE(old_prs == new_prs); if (new_prs == PRS_ISOLATED) cpumask_or(isolated_cpus, isolated_cpus, xcpus); else cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); } /* * partition_xcpus_add - Add new exclusive CPUs to partition * @new_prs: new partition_root_state * @parent: parent cpuset * @xcpus: exclusive CPUs to be added * Return: true if isolated_cpus modified, false otherwise * * Remote partition if parent == NULL */ static bool partition_xcpus_add(int new_prs, struct cpuset *parent, struct cpumask *xcpus) { bool isolcpus_updated; WARN_ON_ONCE(new_prs < 0); lockdep_assert_held(&callback_lock); if (!parent) parent = &top_cpuset; if (parent == &top_cpuset) cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus); isolcpus_updated = (new_prs != parent->partition_root_state); if (isolcpus_updated) isolated_cpus_update(parent->partition_root_state, new_prs, xcpus); cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus); return isolcpus_updated; } /* * partition_xcpus_del - Remove exclusive CPUs from partition * @old_prs: old partition_root_state * @parent: parent cpuset * @xcpus: exclusive CPUs to be removed * Return: true if isolated_cpus modified, false otherwise * * Remote partition if parent == NULL */ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, struct cpumask *xcpus) { bool isolcpus_updated; WARN_ON_ONCE(old_prs < 0); lockdep_assert_held(&callback_lock); if (!parent) parent = &top_cpuset; if (parent == &top_cpuset) cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus); isolcpus_updated = (old_prs != parent->partition_root_state); if (isolcpus_updated) isolated_cpus_update(old_prs, parent->partition_root_state, xcpus); cpumask_and(xcpus, xcpus, cpu_active_mask); cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus); return isolcpus_updated; } static void update_unbound_workqueue_cpumask(bool isolcpus_updated) { int ret; lockdep_assert_cpus_held(); if (!isolcpus_updated) return; ret = workqueue_unbound_exclude_cpumask(isolated_cpus); WARN_ON_ONCE(ret < 0); } /** * cpuset_cpu_is_isolated - Check if the given CPU is isolated * @cpu: the CPU number to be checked * Return: true if CPU is used in an isolated partition, false otherwise */ bool cpuset_cpu_is_isolated(int cpu) { return cpumask_test_cpu(cpu, isolated_cpus); } EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated); /* * compute_effective_exclusive_cpumask - compute effective exclusive CPUs * @cs: cpuset * @xcpus: effective exclusive CPUs value to be set * @real_cs: the real cpuset (can be NULL) * Return: 0 if there is no sibling conflict, > 0 otherwise * * If exclusive_cpus isn't explicitly set or a real_cs is provided, we have to * scan the sibling cpusets and exclude their exclusive_cpus or effective_xcpus * as well. The provision of real_cs means that a cpumask is being changed and * the given cs is a trial one. */ static int compute_effective_exclusive_cpumask(struct cpuset *cs, struct cpumask *xcpus, struct cpuset *real_cs) { struct cgroup_subsys_state *css; struct cpuset *parent = parent_cs(cs); struct cpuset *sibling; int retval = 0; if (!xcpus) xcpus = cs->effective_xcpus; cpumask_and(xcpus, user_xcpus(cs), parent->effective_xcpus); if (!real_cs) { if (!cpumask_empty(cs->exclusive_cpus)) return 0; } else { cs = real_cs; } /* * Exclude exclusive CPUs from siblings */ rcu_read_lock(); cpuset_for_each_child(sibling, css, parent) { if (sibling == cs) continue; if (!cpumask_empty(sibling->exclusive_cpus) && cpumask_intersects(xcpus, sibling->exclusive_cpus)) { cpumask_andnot(xcpus, xcpus, sibling->exclusive_cpus); retval++; continue; } if (!cpumask_empty(sibling->effective_xcpus) && cpumask_intersects(xcpus, sibling->effective_xcpus)) { cpumask_andnot(xcpus, xcpus, sibling->effective_xcpus); retval++; } } rcu_read_unlock(); return retval; } static inline bool is_remote_partition(struct cpuset *cs) { return !list_empty(&cs->remote_sibling); } static inline bool is_local_partition(struct cpuset *cs) { return is_partition_valid(cs) && !is_remote_partition(cs); } /* * remote_partition_enable - Enable current cpuset as a remote partition root * @cs: the cpuset to update * @new_prs: new partition_root_state * @tmp: temporary masks * Return: 0 if successful, errcode if error * * Enable the current cpuset to become a remote partition root taking CPUs * directly from the top cpuset. cpuset_mutex must be held by the caller. */ static int remote_partition_enable(struct cpuset *cs, int new_prs, struct tmpmasks *tmp) { bool isolcpus_updated; /* * The user must have sysadmin privilege. */ if (!capable(CAP_SYS_ADMIN)) return PERR_ACCESS; /* * The requested exclusive_cpus must not be allocated to other * partitions and it can't use up all the root's effective_cpus. * * Note that if there is any local partition root above it or * remote partition root underneath it, its exclusive_cpus must * have overlapped with subpartitions_cpus. */ compute_effective_exclusive_cpumask(cs, tmp->new_cpus, NULL); if (cpumask_empty(tmp->new_cpus) || cpumask_intersects(tmp->new_cpus, subpartitions_cpus) || cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) return PERR_INVCPUS; spin_lock_irq(&callback_lock); isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); list_add(&cs->remote_sibling, &remote_children); cpumask_copy(cs->effective_xcpus, tmp->new_cpus); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); cpuset_force_rebuild(); cs->prs_err = 0; /* * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); return 0; } /* * remote_partition_disable - Remove current cpuset from remote partition list * @cs: the cpuset to update * @tmp: temporary masks * * The effective_cpus is also updated. * * cpuset_mutex must be held by the caller. */ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) { bool isolcpus_updated; WARN_ON_ONCE(!is_remote_partition(cs)); WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); spin_lock_irq(&callback_lock); list_del_init(&cs->remote_sibling); isolcpus_updated = partition_xcpus_del(cs->partition_root_state, NULL, cs->effective_xcpus); if (cs->prs_err) cs->partition_root_state = -cs->partition_root_state; else cs->partition_root_state = PRS_MEMBER; /* effective_xcpus may need to be changed */ compute_effective_exclusive_cpumask(cs, NULL, NULL); reset_partition_data(cs); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); cpuset_force_rebuild(); /* * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); } /* * remote_cpus_update - cpus_exclusive change of remote partition * @cs: the cpuset to be updated * @xcpus: the new exclusive_cpus mask, if non-NULL * @excpus: the new effective_xcpus mask * @tmp: temporary masks * * top_cpuset and subpartitions_cpus will be updated or partition can be * invalidated. */ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, struct cpumask *excpus, struct tmpmasks *tmp) { bool adding, deleting; int prs = cs->partition_root_state; int isolcpus_updated = 0; if (WARN_ON_ONCE(!is_remote_partition(cs))) return; WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus)); if (cpumask_empty(excpus)) { cs->prs_err = PERR_CPUSEMPTY; goto invalidate; } adding = cpumask_andnot(tmp->addmask, excpus, cs->effective_xcpus); deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, excpus); /* * Additions of remote CPUs is only allowed if those CPUs are * not allocated to other partitions and there are effective_cpus * left in the top cpuset. */ if (adding) { if (!capable(CAP_SYS_ADMIN)) cs->prs_err = PERR_ACCESS; else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) cs->prs_err = PERR_NOCPUS; if (cs->prs_err) goto invalidate; } spin_lock_irq(&callback_lock); if (adding) isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask); if (deleting) isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask); /* * Need to update effective_xcpus and exclusive_cpus now as * update_sibling_cpumasks() below may iterate back to the same cs. */ cpumask_copy(cs->effective_xcpus, excpus); if (xcpus) cpumask_copy(cs->exclusive_cpus, xcpus); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); if (adding || deleting) cpuset_force_rebuild(); /* * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); return; invalidate: remote_partition_disable(cs, tmp); } /* * prstate_housekeeping_conflict - check for partition & housekeeping conflicts * @prstate: partition root state to be checked * @new_cpus: cpu mask * Return: true if there is conflict, false otherwise * * CPUs outside of boot_hk_cpus, if defined, can only be used in an * isolated partition. */ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus) { if (!have_boot_isolcpus) return false; if ((prstate != PRS_ISOLATED) && !cpumask_subset(new_cpus, boot_hk_cpus)) return true; return false; } /** * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset * @cs: The cpuset that requests change in partition root state * @cmd: Partition root state change command * @newmask: Optional new cpumask for partcmd_update * @tmp: Temporary addmask and delmask * Return: 0 or a partition root state error code * * For partcmd_enable*, the cpuset is being transformed from a non-partition * root to a partition root. The effective_xcpus (cpus_allowed if * effective_xcpus not set) mask of the given cpuset will be taken away from * parent's effective_cpus. The function will return 0 if all the CPUs listed * in effective_xcpus can be granted or an error code will be returned. * * For partcmd_disable, the cpuset is being transformed from a partition * root back to a non-partition root. Any CPUs in effective_xcpus will be * given back to parent's effective_cpus. 0 will always be returned. * * For partcmd_update, if the optional newmask is specified, the cpu list is * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is * assumed to remain the same. The cpuset should either be a valid or invalid * partition root. The partition root state may change from valid to invalid * or vice versa. An error code will be returned if transitioning from * invalid to valid violates the exclusivity rule. * * For partcmd_invalidate, the current partition will be made invalid. * * The partcmd_enable* and partcmd_disable commands are used by * update_prstate(). An error code may be returned and the caller will check * for error. * * The partcmd_update command is used by update_cpumasks_hier() with newmask * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used * by update_cpumask() with NULL newmask. In both cases, the callers won't * check for error and so partition_root_state and prs_err will be updated * directly. */ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, struct cpumask *newmask, struct tmpmasks *tmp) { struct cpuset *parent = parent_cs(cs); int adding; /* Adding cpus to parent's effective_cpus */ int deleting; /* Deleting cpus from parent's effective_cpus */ int old_prs, new_prs; int part_error = PERR_NONE; /* Partition error? */ int subparts_delta = 0; int isolcpus_updated = 0; struct cpumask *xcpus = user_xcpus(cs); bool nocpu; lockdep_assert_held(&cpuset_mutex); WARN_ON_ONCE(is_remote_partition(cs)); /* * new_prs will only be changed for the partcmd_update and * partcmd_invalidate commands. */ adding = deleting = false; old_prs = new_prs = cs->partition_root_state; if (cmd == partcmd_invalidate) { if (is_prs_invalid(old_prs)) return 0; /* * Make the current partition invalid. */ if (is_partition_valid(parent)) adding = cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); if (old_prs > 0) { new_prs = -old_prs; subparts_delta--; } goto write_error; } /* * The parent must be a partition root. * The new cpumask, if present, or the current cpus_allowed must * not be empty. */ if (!is_partition_valid(parent)) { return is_partition_invalid(parent) ? PERR_INVPARENT : PERR_NOTPART; } if (!newmask && xcpus_empty(cs)) return PERR_CPUSEMPTY; nocpu = tasks_nocpu_error(parent, cs, xcpus); if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) { /* * Need to call compute_effective_exclusive_cpumask() in case * exclusive_cpus not set. Sibling conflict should only happen * if exclusive_cpus isn't set. */ xcpus = tmp->new_cpus; if (compute_effective_exclusive_cpumask(cs, xcpus, NULL)) WARN_ON_ONCE(!cpumask_empty(cs->exclusive_cpus)); /* * Enabling partition root is not allowed if its * effective_xcpus is empty. */ if (cpumask_empty(xcpus)) return PERR_INVCPUS; if (prstate_housekeeping_conflict(new_prs, xcpus)) return PERR_HKEEPING; /* * A parent can be left with no CPU as long as there is no * task directly associated with the parent partition. */ if (nocpu) return PERR_NOCPUS; deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_xcpus); if (deleting) subparts_delta++; new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED; } else if (cmd == partcmd_disable) { /* * May need to add cpus back to parent's effective_cpus * (and maybe removed from subpartitions_cpus/isolated_cpus) * for valid partition root. xcpus may contain CPUs that * shouldn't be removed from the two global cpumasks. */ if (is_partition_valid(cs)) { cpumask_copy(tmp->addmask, cs->effective_xcpus); adding = true; subparts_delta--; } new_prs = PRS_MEMBER; } else if (newmask) { /* * Empty cpumask is not allowed */ if (cpumask_empty(newmask)) { part_error = PERR_CPUSEMPTY; goto write_error; } /* Check newmask again, whether cpus are available for parent/cs */ nocpu |= tasks_nocpu_error(parent, cs, newmask); /* * partcmd_update with newmask: * * Compute add/delete mask to/from effective_cpus * * For valid partition: * addmask = exclusive_cpus & ~newmask * & parent->effective_xcpus * delmask = newmask & ~exclusive_cpus * & parent->effective_xcpus * * For invalid partition: * delmask = newmask & parent->effective_xcpus */ if (is_prs_invalid(old_prs)) { adding = false; deleting = cpumask_and(tmp->delmask, newmask, parent->effective_xcpus); } else { cpumask_andnot(tmp->addmask, xcpus, newmask); adding = cpumask_and(tmp->addmask, tmp->addmask, parent->effective_xcpus); cpumask_andnot(tmp->delmask, newmask, xcpus); deleting = cpumask_and(tmp->delmask, tmp->delmask, parent->effective_xcpus); } /* * Make partition invalid if parent's effective_cpus could * become empty and there are tasks in the parent. */ if (nocpu && (!adding || !cpumask_intersects(tmp->addmask, cpu_active_mask))) { part_error = PERR_NOCPUS; deleting = false; adding = cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); } } else { /* * partcmd_update w/o newmask * * delmask = effective_xcpus & parent->effective_cpus * * This can be called from: * 1) update_cpumasks_hier() * 2) cpuset_hotplug_update_tasks() * * Check to see if it can be transitioned from valid to * invalid partition or vice versa. * * A partition error happens when parent has tasks and all * its effective CPUs will have to be distributed out. */ WARN_ON_ONCE(!is_partition_valid(parent)); if (nocpu) { part_error = PERR_NOCPUS; if (is_partition_valid(cs)) adding = cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus); } else if (is_partition_invalid(cs) && cpumask_subset(xcpus, parent->effective_xcpus)) { struct cgroup_subsys_state *css; struct cpuset *child; bool exclusive = true; /* * Convert invalid partition to valid has to * pass the cpu exclusivity test. */ rcu_read_lock(); cpuset_for_each_child(child, css, parent) { if (child == cs) continue; if (!cpusets_are_exclusive(cs, child)) { exclusive = false; break; } } rcu_read_unlock(); if (exclusive) deleting = cpumask_and(tmp->delmask, xcpus, parent->effective_cpus); else part_error = PERR_NOTEXCL; } } write_error: if (part_error) WRITE_ONCE(cs->prs_err, part_error); if (cmd == partcmd_update) { /* * Check for possible transition between valid and invalid * partition root. */ switch (cs->partition_root_state) { case PRS_ROOT: case PRS_ISOLATED: if (part_error) { new_prs = -old_prs; subparts_delta--; } break; case PRS_INVALID_ROOT: case PRS_INVALID_ISOLATED: if (!part_error) { new_prs = -old_prs; subparts_delta++; } break; } } if (!adding && !deleting && (new_prs == old_prs)) return 0; /* * Transitioning between invalid to valid or vice versa may require * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update, * validate_change() has already been successfully called and * CPU lists in cs haven't been updated yet. So defer it to later. */ if ((old_prs != new_prs) && (cmd != partcmd_update)) { int err = update_partition_exclusive_flag(cs, new_prs); if (err) return err; } /* * Change the parent's effective_cpus & effective_xcpus (top cpuset * only). * * Newly added CPUs will be removed from effective_cpus and * newly deleted ones will be added back to effective_cpus. */ spin_lock_irq(&callback_lock); if (old_prs != new_prs) { cs->partition_root_state = new_prs; if (new_prs <= 0) cs->nr_subparts = 0; } /* * Adding to parent's effective_cpus means deletion CPUs from cs * and vice versa. */ if (adding) isolcpus_updated += partition_xcpus_del(old_prs, parent, tmp->addmask); if (deleting) isolcpus_updated += partition_xcpus_add(new_prs, parent, tmp->delmask); if (is_partition_valid(parent)) { parent->nr_subparts += subparts_delta; WARN_ON_ONCE(parent->nr_subparts < 0); } spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); if ((old_prs != new_prs) && (cmd == partcmd_update)) update_partition_exclusive_flag(cs, new_prs); if (adding || deleting) { cpuset_update_tasks_cpumask(parent, tmp->addmask); update_sibling_cpumasks(parent, cs, tmp); } /* * For partcmd_update without newmask, it is being called from * cpuset_handle_hotplug(). Update the load balance flag and * scheduling domain accordingly. */ if ((cmd == partcmd_update) && !newmask) update_partition_sd_lb(cs, old_prs); notify_partition_change(cs, old_prs); return 0; } /** * compute_partition_effective_cpumask - compute effective_cpus for partition * @cs: partition root cpuset * @new_ecpus: previously computed effective_cpus to be updated * * Compute the effective_cpus of a partition root by scanning effective_xcpus * of child partition roots and excluding their effective_xcpus. * * This has the side effect of invalidating valid child partition roots, * if necessary. Since it is called from either cpuset_hotplug_update_tasks() * or update_cpumasks_hier() where parent and children are modified * successively, we don't need to call update_parent_effective_cpumask() * and the child's effective_cpus will be updated in later iterations. * * Note that rcu_read_lock() is assumed to be held. */ static void compute_partition_effective_cpumask(struct cpuset *cs, struct cpumask *new_ecpus) { struct cgroup_subsys_state *css; struct cpuset *child; bool populated = partition_is_populated(cs, NULL); /* * Check child partition roots to see if they should be * invalidated when * 1) child effective_xcpus not a subset of new * excluisve_cpus * 2) All the effective_cpus will be used up and cp * has tasks */ compute_effective_exclusive_cpumask(cs, new_ecpus, NULL); cpumask_and(new_ecpus, new_ecpus, cpu_active_mask); rcu_read_lock(); cpuset_for_each_child(child, css, cs) { if (!is_partition_valid(child)) continue; /* * There shouldn't be a remote partition underneath another * partition root. */ WARN_ON_ONCE(is_remote_partition(child)); child->prs_err = 0; if (!cpumask_subset(child->effective_xcpus, cs->effective_xcpus)) child->prs_err = PERR_INVCPUS; else if (populated && cpumask_subset(new_ecpus, child->effective_xcpus)) child->prs_err = PERR_NOCPUS; if (child->prs_err) { int old_prs = child->partition_root_state; /* * Invalidate child partition */ spin_lock_irq(&callback_lock); make_partition_invalid(child); cs->nr_subparts--; child->nr_subparts = 0; spin_unlock_irq(&callback_lock); notify_partition_change(child, old_prs); continue; } cpumask_andnot(new_ecpus, new_ecpus, child->effective_xcpus); } rcu_read_unlock(); } /* * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree * @cs: the cpuset to consider * @tmp: temp variables for calculating effective_cpus & partition setup * @force: don't skip any descendant cpusets if set * * When configured cpumask is changed, the effective cpumasks of this cpuset * and all its descendants need to be updated. * * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. * * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, bool force) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; bool need_rebuild_sched_domains = false; int old_prs, new_prs; rcu_read_lock(); cpuset_for_each_descendant_pre(cp, pos_css, cs) { struct cpuset *parent = parent_cs(cp); bool remote = is_remote_partition(cp); bool update_parent = false; old_prs = new_prs = cp->partition_root_state; /* * For child remote partition root (!= cs), we need to call * remote_cpus_update() if effective_xcpus will be changed. * Otherwise, we can skip the whole subtree. * * remote_cpus_update() will reuse tmp->new_cpus only after * its value is being processed. */ if (remote && (cp != cs)) { compute_effective_exclusive_cpumask(cp, tmp->new_cpus, NULL); if (cpumask_equal(cp->effective_xcpus, tmp->new_cpus)) { pos_css = css_rightmost_descendant(pos_css); continue; } rcu_read_unlock(); remote_cpus_update(cp, NULL, tmp->new_cpus, tmp); rcu_read_lock(); /* Remote partition may be invalidated */ new_prs = cp->partition_root_state; remote = (new_prs == old_prs); } if (remote || (is_partition_valid(parent) && is_partition_valid(cp))) compute_partition_effective_cpumask(cp, tmp->new_cpus); else compute_effective_cpumask(tmp->new_cpus, cp, parent); if (remote) goto get_css; /* Ready to update cpuset data */ /* * A partition with no effective_cpus is allowed as long as * there is no task associated with it. Call * update_parent_effective_cpumask() to check it. */ if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) { update_parent = true; goto update_parent_effective; } /* * If it becomes empty, inherit the effective mask of the * parent, which is guaranteed to have some CPUs unless * it is a partition root that has explicitly distributed * out all its CPUs. */ if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) cpumask_copy(tmp->new_cpus, parent->effective_cpus); /* * Skip the whole subtree if * 1) the cpumask remains the same, * 2) has no partition root state, * 3) force flag not set, and * 4) for v2 load balance state same as its parent. */ if (!cp->partition_root_state && !force && cpumask_equal(tmp->new_cpus, cp->effective_cpus) && (!cpuset_v2() || (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { pos_css = css_rightmost_descendant(pos_css); continue; } update_parent_effective: /* * update_parent_effective_cpumask() should have been called * for cs already in update_cpumask(). We should also call * cpuset_update_tasks_cpumask() again for tasks in the parent * cpuset if the parent's effective_cpus changes. */ if ((cp != cs) && old_prs) { switch (parent->partition_root_state) { case PRS_ROOT: case PRS_ISOLATED: update_parent = true; break; default: /* * When parent is not a partition root or is * invalid, child partition roots become * invalid too. */ if (is_partition_valid(cp)) new_prs = -cp->partition_root_state; WRITE_ONCE(cp->prs_err, is_partition_invalid(parent) ? PERR_INVPARENT : PERR_NOTPART); break; } } get_css: if (!css_tryget_online(&cp->css)) continue; rcu_read_unlock(); if (update_parent) { update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp); /* * The cpuset partition_root_state may become * invalid. Capture it. */ new_prs = cp->partition_root_state; } spin_lock_irq(&callback_lock); cpumask_copy(cp->effective_cpus, tmp->new_cpus); cp->partition_root_state = new_prs; if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) compute_effective_exclusive_cpumask(cp, NULL, NULL); /* * Make sure effective_xcpus is properly set for a valid * partition root. */ if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus)) cpumask_and(cp->effective_xcpus, cp->cpus_allowed, parent->effective_xcpus); else if (new_prs < 0) reset_partition_data(cp); spin_unlock_irq(&callback_lock); notify_partition_change(cp, old_prs); WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); cpuset_update_tasks_cpumask(cp, cp->effective_cpus); /* * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE * from parent if current cpuset isn't a valid partition root * and their load balance states differ. */ if (cpuset_v2() && !is_partition_valid(cp) && (is_sched_load_balance(parent) != is_sched_load_balance(cp))) { if (is_sched_load_balance(parent)) set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); else clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); } /* * On legacy hierarchy, if the effective cpumask of any non- * empty cpuset is changed, we need to rebuild sched domains. * On default hierarchy, the cpuset needs to be a partition * root as well. */ if (!cpumask_empty(cp->cpus_allowed) && is_sched_load_balance(cp) && (!cpuset_v2() || is_partition_valid(cp))) need_rebuild_sched_domains = true; rcu_read_lock(); css_put(&cp->css); } rcu_read_unlock(); if (need_rebuild_sched_domains) cpuset_force_rebuild(); } /** * update_sibling_cpumasks - Update siblings cpumasks * @parent: Parent cpuset * @cs: Current cpuset * @tmp: Temp variables */ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct tmpmasks *tmp) { struct cpuset *sibling; struct cgroup_subsys_state *pos_css; lockdep_assert_held(&cpuset_mutex); /* * Check all its siblings and call update_cpumasks_hier() * if their effective_cpus will need to be changed. * * It is possible a change in parent's effective_cpus * due to a change in a child partition's effective_xcpus will impact * its siblings even if they do not inherit parent's effective_cpus * directly. * * The update_cpumasks_hier() function may sleep. So we have to * release the RCU read lock before calling it. */ rcu_read_lock(); cpuset_for_each_child(sibling, pos_css, parent) { if (sibling == cs) continue; if (!is_partition_valid(sibling)) { compute_effective_cpumask(tmp->new_cpus, sibling, parent); if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus)) continue; } else if (is_remote_partition(sibling)) { /* * Change in a sibling cpuset won't affect a remote * partition root. */ continue; } if (!css_tryget_online(&sibling->css)) continue; rcu_read_unlock(); update_cpumasks_hier(sibling, tmp, false); rcu_read_lock(); css_put(&sibling->css); } rcu_read_unlock(); } /** * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it * @cs: the cpuset to consider * @trialcs: trial cpuset * @buf: buffer of cpu numbers written to this cpuset */ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { int retval; struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; bool force = false; int old_prs = cs->partition_root_state; /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ if (cs == &top_cpuset) return -EACCES; /* * An empty cpus_allowed is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. */ if (!*buf) { cpumask_clear(trialcs->cpus_allowed); if (cpumask_empty(trialcs->exclusive_cpus)) cpumask_clear(trialcs->effective_xcpus); } else { retval = cpulist_parse(buf, trialcs->cpus_allowed); if (retval < 0) return retval; if (!cpumask_subset(trialcs->cpus_allowed, top_cpuset.cpus_allowed)) return -EINVAL; /* * When exclusive_cpus isn't explicitly set, it is constrained * by cpus_allowed and parent's effective_xcpus. Otherwise, * trialcs->effective_xcpus is used as a temporary cpumask * for checking validity of the partition root. */ trialcs->partition_root_state = PRS_MEMBER; if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs)) compute_effective_exclusive_cpumask(trialcs, NULL, cs); } /* Nothing to do if the cpus didn't change */ if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed)) return 0; if (alloc_cpumasks(NULL, &tmp)) return -ENOMEM; if (old_prs) { if (is_partition_valid(cs) && cpumask_empty(trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_INVCPUS; } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_HKEEPING; } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_NOCPUS; } } /* * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); if ((retval == -EINVAL) && cpuset_v2()) { struct cgroup_subsys_state *css; struct cpuset *cp; /* * The -EINVAL error code indicates that partition sibling * CPU exclusivity rule has been violated. We still allow * the cpumask change to proceed while invalidating the * partition. However, any conflicting sibling partitions * have to be marked as invalid too. */ invalidate = true; rcu_read_lock(); cpuset_for_each_child(cp, css, parent) { struct cpumask *xcpus = user_xcpus(trialcs); if (is_partition_valid(cp) && cpumask_intersects(xcpus, cp->effective_xcpus)) { rcu_read_unlock(); update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp); rcu_read_lock(); } } rcu_read_unlock(); retval = 0; } if (retval < 0) goto out_free; if (is_partition_valid(cs) || (is_partition_invalid(cs) && !invalidate)) { struct cpumask *xcpus = trialcs->effective_xcpus; if (cpumask_empty(xcpus) && is_partition_invalid(cs)) xcpus = trialcs->cpus_allowed; /* * Call remote_cpus_update() to handle valid remote partition */ if (is_remote_partition(cs)) remote_cpus_update(cs, NULL, xcpus, &tmp); else if (invalidate) update_parent_effective_cpumask(cs, partcmd_invalidate, NULL, &tmp); else update_parent_effective_cpumask(cs, partcmd_update, xcpus, &tmp); } spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); if ((old_prs > 0) && !is_partition_valid(cs)) reset_partition_data(cs); spin_unlock_irq(&callback_lock); /* effective_cpus/effective_xcpus will be updated here */ update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) update_partition_sd_lb(cs, old_prs); out_free: free_cpumasks(NULL, &tmp); return retval; } /** * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset * @cs: the cpuset to consider * @trialcs: trial cpuset * @buf: buffer of cpu numbers written to this cpuset * * The tasks' cpumask will be updated if cs is a valid partition root. */ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { int retval; struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; bool force = false; int old_prs = cs->partition_root_state; if (!*buf) { cpumask_clear(trialcs->exclusive_cpus); cpumask_clear(trialcs->effective_xcpus); } else { retval = cpulist_parse(buf, trialcs->exclusive_cpus); if (retval < 0) return retval; } /* Nothing to do if the CPUs didn't change */ if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus)) return 0; if (*buf) { trialcs->partition_root_state = PRS_MEMBER; /* * Reject the change if there is exclusive CPUs conflict with * the siblings. */ if (compute_effective_exclusive_cpumask(trialcs, NULL, cs)) return -EINVAL; } /* * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); if (retval) return retval; if (alloc_cpumasks(NULL, &tmp)) return -ENOMEM; if (old_prs) { if (cpumask_empty(trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_INVCPUS; } else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_HKEEPING; } else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) { invalidate = true; cs->prs_err = PERR_NOCPUS; } if (is_remote_partition(cs)) { if (invalidate) remote_partition_disable(cs, &tmp); else remote_cpus_update(cs, trialcs->exclusive_cpus, trialcs->effective_xcpus, &tmp); } else if (invalidate) { update_parent_effective_cpumask(cs, partcmd_invalidate, NULL, &tmp); } else { update_parent_effective_cpumask(cs, partcmd_update, trialcs->effective_xcpus, &tmp); } } spin_lock_irq(&callback_lock); cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus); cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus); if ((old_prs > 0) && !is_partition_valid(cs)) reset_partition_data(cs); spin_unlock_irq(&callback_lock); /* * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus * of the subtree when it is a valid partition root or effective_xcpus * is updated. */ if (is_partition_valid(cs) || force) update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) update_partition_sd_lb(cs, old_prs); free_cpumasks(NULL, &tmp); return 0; } /* * Migrate memory region from one set of nodes to another. This is * performed asynchronously as it can be called from process migration path * holding locks involved in process management. All mm migrations are * performed in the queued order and can be waited for by flushing * cpuset_migrate_mm_wq. */ struct cpuset_migrate_mm_work { struct work_struct work; struct mm_struct *mm; nodemask_t from; nodemask_t to; }; static void cpuset_migrate_mm_workfn(struct work_struct *work) { struct cpuset_migrate_mm_work *mwork = container_of(work, struct cpuset_migrate_mm_work, work); /* on a wq worker, no need to worry about %current's mems_allowed */ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); mmput(mwork->mm); kfree(mwork); } static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to) { struct cpuset_migrate_mm_work *mwork; if (nodes_equal(*from, *to)) { mmput(mm); return; } mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); if (mwork) { mwork->mm = mm; mwork->from = *from; mwork->to = *to; INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); queue_work(cpuset_migrate_mm_wq, &mwork->work); } else { mmput(mm); } } static void cpuset_post_attach(void) { flush_workqueue(cpuset_migrate_mm_wq); } /* * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy * @tsk: the task to change * @newmems: new nodes that the task will be set * * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed * and rebind an eventual tasks' mempolicy. If the task is allocating in * parallel, it might temporarily see an empty intersection, which results in * a seqlock check and retry before OOM or allocation failure. */ static void cpuset_change_task_nodemask(struct task_struct *tsk, nodemask_t *newmems) { task_lock(tsk); local_irq_disable(); write_seqcount_begin(&tsk->mems_allowed_seq); nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems); mpol_rebind_task(tsk, newmems); tsk->mems_allowed = *newmems; write_seqcount_end(&tsk->mems_allowed_seq); local_irq_enable(); task_unlock(tsk); } static void *cpuset_being_rebound; /** * cpuset_update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. * @cs: the cpuset in which each task's mems_allowed mask needs to be changed * * Iterate through each task of @cs updating its mems_allowed to the * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. */ void cpuset_update_tasks_nodemask(struct cpuset *cs) { static nodemask_t newmems; /* protected by cpuset_mutex */ struct css_task_iter it; struct task_struct *task; cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ guarantee_online_mems(cs, &newmems); /* * The mpol_rebind_mm() call takes mmap_lock, which we couldn't * take while holding tasklist_lock. Forks can happen - the * mpol_dup() cpuset_being_rebound check will catch such forks, * and rebind their vma mempolicies too. Because we still hold * the global cpuset_mutex, we know that no other rebind effort * will be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. */ css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) { struct mm_struct *mm; bool migrate; cpuset_change_task_nodemask(task, &newmems); mm = get_task_mm(task); if (!mm) continue; migrate = is_memory_migrate(cs); mpol_rebind_mm(mm, &cs->mems_allowed); if (migrate) cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); else mmput(mm); } css_task_iter_end(&it); /* * All the tasks' nodemasks have been updated, update * cs->old_mems_allowed. */ cs->old_mems_allowed = newmems; /* We're done rebinding vmas to this cpuset's new mems_allowed. */ cpuset_being_rebound = NULL; } /* * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree * @cs: the cpuset to consider * @new_mems: a temp variable for calculating new effective_mems * * When configured nodemask is changed, the effective nodemasks of this cpuset * and all its descendants need to be updated. * * On legacy hierarchy, effective_mems will be the same with mems_allowed. * * Called with cpuset_mutex held */ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; rcu_read_lock(); cpuset_for_each_descendant_pre(cp, pos_css, cs) { struct cpuset *parent = parent_cs(cp); nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems); /* * If it becomes empty, inherit the effective mask of the * parent, which is guaranteed to have some MEMs. */ if (is_in_v2_mode() && nodes_empty(*new_mems)) *new_mems = parent->effective_mems; /* Skip the whole subtree if the nodemask remains the same. */ if (nodes_equal(*new_mems, cp->effective_mems)) { pos_css = css_rightmost_descendant(pos_css); continue; } if (!css_tryget_online(&cp->css)) continue; rcu_read_unlock(); spin_lock_irq(&callback_lock); cp->effective_mems = *new_mems; spin_unlock_irq(&callback_lock); WARN_ON(!is_in_v2_mode() && !nodes_equal(cp->mems_allowed, cp->effective_mems)); cpuset_update_tasks_nodemask(cp); rcu_read_lock(); css_put(&cp->css); } rcu_read_unlock(); } /* * Handle user request to change the 'mems' memory placement * of a cpuset. Needs to validate the request, update the * cpusets mems_allowed, and for each task in the cpuset, * update mems_allowed and rebind task's mempolicy and any vma * mempolicies and if the cpuset is marked 'memory_migrate', * migrate the tasks pages to the new memory. * * Call with cpuset_mutex held. May take callback_lock during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_lock, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. */ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, const char *buf) { int retval; /* * top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; * it's read-only */ if (cs == &top_cpuset) { retval = -EACCES; goto done; } /* * An empty mems_allowed is ok iff there are no tasks in the cpuset. * Since nodelist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have memory. */ if (!*buf) { nodes_clear(trialcs->mems_allowed); } else { retval = nodelist_parse(buf, trialcs->mems_allowed); if (retval < 0) goto done; if (!nodes_subset(trialcs->mems_allowed, top_cpuset.mems_allowed)) { retval = -EINVAL; goto done; } } if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) { retval = 0; /* Too easy - nothing to do */ goto done; } retval = validate_change(cs, trialcs); if (retval < 0) goto done; check_insane_mems_config(&trialcs->mems_allowed); spin_lock_irq(&callback_lock); cs->mems_allowed = trialcs->mems_allowed; spin_unlock_irq(&callback_lock); /* use trialcs->mems_allowed as a temp variable */ update_nodemasks_hier(cs, &trialcs->mems_allowed); done: return retval; } bool current_cpuset_is_being_rebound(void) { bool ret; rcu_read_lock(); ret = task_cs(current) == cpuset_being_rebound; rcu_read_unlock(); return ret; } /* * cpuset_update_flag - read a 0 or a 1 in a file and update associated flag * bit: the bit to update (see cpuset_flagbits_t) * cs: the cpuset to update * turning_on: whether the flag is being set or cleared * * Call with cpuset_mutex held. */ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on) { struct cpuset *trialcs; int balance_flag_changed; int spread_flag_changed; int err; trialcs = alloc_trial_cpuset(cs); if (!trialcs) return -ENOMEM; if (turning_on) set_bit(bit, &trialcs->flags); else clear_bit(bit, &trialcs->flags); err = validate_change(cs, trialcs); if (err < 0) goto out; balance_flag_changed = (is_sched_load_balance(cs) != is_sched_load_balance(trialcs)); spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); spin_lock_irq(&callback_lock); cs->flags = trialcs->flags; spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { if (cpuset_v2()) cpuset_force_rebuild(); else rebuild_sched_domains_locked(); } if (spread_flag_changed) cpuset1_update_tasks_flags(cs); out: free_cpuset(trialcs); return err; } /** * update_prstate - update partition_root_state * @cs: the cpuset to update * @new_prs: new partition root state * Return: 0 if successful, != 0 if error * * Call with cpuset_mutex held. */ static int update_prstate(struct cpuset *cs, int new_prs) { int err = PERR_NONE, old_prs = cs->partition_root_state; struct cpuset *parent = parent_cs(cs); struct tmpmasks tmpmask; bool isolcpus_updated = false; if (old_prs == new_prs) return 0; /* * Treat a previously invalid partition root as if it is a "member". */ if (new_prs && is_prs_invalid(old_prs)) old_prs = PRS_MEMBER; if (alloc_cpumasks(NULL, &tmpmask)) return -ENOMEM; err = update_partition_exclusive_flag(cs, new_prs); if (err) goto out; if (!old_prs) { /* * cpus_allowed and exclusive_cpus cannot be both empty. */ if (xcpus_empty(cs)) { err = PERR_CPUSEMPTY; goto out; } /* * We don't support the creation of a new local partition with * a remote partition underneath it. This unsupported * setting can happen only if parent is the top_cpuset because * a remote partition cannot be created underneath an existing * local or remote partition. */ if ((parent == &top_cpuset) && cpumask_intersects(cs->exclusive_cpus, subpartitions_cpus)) { err = PERR_REMOTE; goto out; } /* * If parent is valid partition, enable local partiion. * Otherwise, enable a remote partition. */ if (is_partition_valid(parent)) { enum partition_cmd cmd = (new_prs == PRS_ROOT) ? partcmd_enable : partcmd_enablei; err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask); } else { err = remote_partition_enable(cs, new_prs, &tmpmask); } } else if (old_prs && new_prs) { /* * A change in load balance state only, no change in cpumasks. * Need to update isolated_cpus. */ isolcpus_updated = true; } else { /* * Switching back to member is always allowed even if it * disables child partitions. */ if (is_remote_partition(cs)) remote_partition_disable(cs, &tmpmask); else update_parent_effective_cpumask(cs, partcmd_disable, NULL, &tmpmask); /* * Invalidation of child partitions will be done in * update_cpumasks_hier(). */ } out: /* * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error * happens. */ if (err) { new_prs = -new_prs; update_partition_exclusive_flag(cs, new_prs); } spin_lock_irq(&callback_lock); cs->partition_root_state = new_prs; WRITE_ONCE(cs->prs_err, err); if (!is_partition_valid(cs)) reset_partition_data(cs); else if (isolcpus_updated) isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus); spin_unlock_irq(&callback_lock); update_unbound_workqueue_cpumask(isolcpus_updated); /* Force update if switching back to member & update effective_xcpus */ update_cpumasks_hier(cs, &tmpmask, !new_prs); /* A newly created partition must have effective_xcpus set */ WARN_ON_ONCE(!old_prs && (new_prs > 0) && cpumask_empty(cs->effective_xcpus)); /* Update sched domains and load balance flag */ update_partition_sd_lb(cs, old_prs); notify_partition_change(cs, old_prs); if (force_sd_rebuild) rebuild_sched_domains_locked(); free_cpumasks(NULL, &tmpmask); return 0; } static struct cpuset *cpuset_attach_old_cs; /* * Check to see if a cpuset can accept a new task * For v1, cpus_allowed and mems_allowed can't be empty. * For v2, effective_cpus can't be empty. * Note that in v1, effective_cpus = cpus_allowed. */ static int cpuset_can_attach_check(struct cpuset *cs) { if (cpumask_empty(cs->effective_cpus) || (!is_in_v2_mode() && nodes_empty(cs->mems_allowed))) return -ENOSPC; return 0; } static void reset_migrate_dl_data(struct cpuset *cs) { cs->nr_migrate_dl_tasks = 0; cs->sum_migrate_dl_bw = 0; } /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ static int cpuset_can_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; struct cpuset *cs, *oldcs; struct task_struct *task; bool cpus_updated, mems_updated; int ret; /* used later by cpuset_attach() */ cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); oldcs = cpuset_attach_old_cs; cs = css_cs(css); mutex_lock(&cpuset_mutex); /* Check to see if task is allowed in the cpuset */ ret = cpuset_can_attach_check(cs); if (ret) goto out_unlock; cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); cgroup_taskset_for_each(task, css, tset) { ret = task_can_attach(task); if (ret) goto out_unlock; /* * Skip rights over task check in v2 when nothing changes, * migration permission derives from hierarchy ownership in * cgroup_procs_write_permission()). */ if (!cpuset_v2() || (cpus_updated || mems_updated)) { ret = security_task_setscheduler(task); if (ret) goto out_unlock; } if (dl_task(task)) { cs->nr_migrate_dl_tasks++; cs->sum_migrate_dl_bw += task->dl.dl_bw; } } if (!cs->nr_migrate_dl_tasks) goto out_success; if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); if (unlikely(cpu >= nr_cpu_ids)) { reset_migrate_dl_data(cs); ret = -EINVAL; goto out_unlock; } ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); if (ret) { reset_migrate_dl_data(cs); goto out_unlock; } } out_success: /* * Mark attach is in progress. This makes validate_change() fail * changes which zero cpus/mems_allowed. */ cs->attach_in_progress++; out_unlock: mutex_unlock(&cpuset_mutex); return ret; } static void cpuset_cancel_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; struct cpuset *cs; cgroup_taskset_first(tset, &css); cs = css_cs(css); mutex_lock(&cpuset_mutex); dec_attach_in_progress_locked(cs); if (cs->nr_migrate_dl_tasks) { int cpu = cpumask_any(cs->effective_cpus); dl_bw_free(cpu, cs->sum_migrate_dl_bw); reset_migrate_dl_data(cs); } mutex_unlock(&cpuset_mutex); } /* * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task() * but we can't allocate it dynamically there. Define it global and * allocate from cpuset_init(). */ static cpumask_var_t cpus_attach; static nodemask_t cpuset_attach_nodemask_to; static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { lockdep_assert_held(&cpuset_mutex); if (cs != &top_cpuset) guarantee_online_cpus(task, cpus_attach); else cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), subpartitions_cpus); /* * can_attach beforehand should guarantee that this doesn't * fail. TODO: have a better way to handle failure here */ WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach)); cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to); cpuset1_update_task_spread_flags(cs, task); } static void cpuset_attach(struct cgroup_taskset *tset) { struct task_struct *task; struct task_struct *leader; struct cgroup_subsys_state *css; struct cpuset *cs; struct cpuset *oldcs = cpuset_attach_old_cs; bool cpus_updated, mems_updated; cgroup_taskset_first(tset, &css); cs = css_cs(css); lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ mutex_lock(&cpuset_mutex); cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus); mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems); /* * In the default hierarchy, enabling cpuset in the child cgroups * will trigger a number of cpuset_attach() calls with no change * in effective cpus and mems. In that case, we can optimize out * by skipping the task iteration and update. */ if (cpuset_v2() && !cpus_updated && !mems_updated) { cpuset_attach_nodemask_to = cs->effective_mems; goto out; } guarantee_online_mems(cs, &cpuset_attach_nodemask_to); cgroup_taskset_for_each(task, css, tset) cpuset_attach_task(cs, task); /* * Change mm for all threadgroup leaders. This is expensive and may * sleep and should be moved outside migration path proper. Skip it * if there is no change in effective_mems and CS_MEMORY_MIGRATE is * not set. */ cpuset_attach_nodemask_to = cs->effective_mems; if (!is_memory_migrate(cs) && !mems_updated) goto out; cgroup_taskset_for_each_leader(leader, css, tset) { struct mm_struct *mm = get_task_mm(leader); if (mm) { mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); /* * old_mems_allowed is the same with mems_allowed * here, except if this task is being moved * automatically due to hotplug. In that case * @mems_allowed has been updated and is empty, so * @old_mems_allowed is the right nodesets that we * migrate mm from. */ if (is_memory_migrate(cs)) cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, &cpuset_attach_nodemask_to); else mmput(mm); } } out: cs->old_mems_allowed = cpuset_attach_nodemask_to; if (cs->nr_migrate_dl_tasks) { cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; reset_migrate_dl_data(cs); } dec_attach_in_progress_locked(cs); mutex_unlock(&cpuset_mutex); } /* * Common handling for a write to a "cpus" or "mems" file. */ ssize_t cpuset_write_resmask(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct cpuset *cs = css_cs(of_css(of)); struct cpuset *trialcs; int retval = -ENODEV; buf = strstrip(buf); cpus_read_lock(); mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; trialcs = alloc_trial_cpuset(cs); if (!trialcs) { retval = -ENOMEM; goto out_unlock; } switch (of_cft(of)->private) { case FILE_CPULIST: retval = update_cpumask(cs, trialcs, buf); break; case FILE_EXCLUSIVE_CPULIST: retval = update_exclusive_cpumask(cs, trialcs, buf); break; case FILE_MEMLIST: retval = update_nodemask(cs, trialcs, buf); break; default: retval = -EINVAL; break; } free_cpuset(trialcs); if (force_sd_rebuild) rebuild_sched_domains_locked(); out_unlock: mutex_unlock(&cpuset_mutex); cpus_read_unlock(); flush_workqueue(cpuset_migrate_mm_wq); return retval ?: nbytes; } /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller * chunks, there is no guarantee of atomicity. Since the display format * used, list of ranges of sequential numbers, is variable length, * and since these maps can change value dynamically, one could read * gibberish by doing partial reads while a list was changing. */ int cpuset_common_seq_show(struct seq_file *sf, void *v) { struct cpuset *cs = css_cs(seq_css(sf)); cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; spin_lock_irq(&callback_lock); switch (type) { case FILE_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed)); break; case FILE_MEMLIST: seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed)); break; case FILE_EFFECTIVE_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus)); break; case FILE_EFFECTIVE_MEMLIST: seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems)); break; case FILE_EXCLUSIVE_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus)); break; case FILE_EFFECTIVE_XCPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus)); break; case FILE_SUBPARTS_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus)); break; case FILE_ISOLATED_CPULIST: seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus)); break; default: ret = -EINVAL; } spin_unlock_irq(&callback_lock); return ret; } static int cpuset_partition_show(struct seq_file *seq, void *v) { struct cpuset *cs = css_cs(seq_css(seq)); const char *err, *type = NULL; switch (cs->partition_root_state) { case PRS_ROOT: seq_puts(seq, "root\n"); break; case PRS_ISOLATED: seq_puts(seq, "isolated\n"); break; case PRS_MEMBER: seq_puts(seq, "member\n"); break; case PRS_INVALID_ROOT: type = "root"; fallthrough; case PRS_INVALID_ISOLATED: if (!type) type = "isolated"; err = perr_strings[READ_ONCE(cs->prs_err)]; if (err) seq_printf(seq, "%s invalid (%s)\n", type, err); else seq_printf(seq, "%s invalid\n", type); break; } return 0; } static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct cpuset *cs = css_cs(of_css(of)); int val; int retval = -ENODEV; buf = strstrip(buf); if (!strcmp(buf, "root")) val = PRS_ROOT; else if (!strcmp(buf, "member")) val = PRS_MEMBER; else if (!strcmp(buf, "isolated")) val = PRS_ISOLATED; else return -EINVAL; css_get(&cs->css); cpus_read_lock(); mutex_lock(&cpuset_mutex); if (is_cpuset_online(cs)) retval = update_prstate(cs, val); mutex_unlock(&cpuset_mutex); cpus_read_unlock(); css_put(&cs->css); return retval ?: nbytes; } /* * This is currently a minimal set for the default hierarchy. It can be * expanded later on by migrating more features and control files from v1. */ static struct cftype dfl_files[] = { { .name = "cpus", .seq_show = cpuset_common_seq_show, .write = cpuset_write_resmask, .max_write_len = (100U + 6 * NR_CPUS), .private = FILE_CPULIST, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "mems", .seq_show = cpuset_common_seq_show, .write = cpuset_write_resmask, .max_write_len = (100U + 6 * MAX_NUMNODES), .private = FILE_MEMLIST, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "cpus.effective", .seq_show = cpuset_common_seq_show, .private = FILE_EFFECTIVE_CPULIST, }, { .name = "mems.effective", .seq_show = cpuset_common_seq_show, .private = FILE_EFFECTIVE_MEMLIST, }, { .name = "cpus.partition", .seq_show = cpuset_partition_show, .write = cpuset_partition_write, .private = FILE_PARTITION_ROOT, .flags = CFTYPE_NOT_ON_ROOT, .file_offset = offsetof(struct cpuset, partition_file), }, { .name = "cpus.exclusive", .seq_show = cpuset_common_seq_show, .write = cpuset_write_resmask, .max_write_len = (100U + 6 * NR_CPUS), .private = FILE_EXCLUSIVE_CPULIST, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "cpus.exclusive.effective", .seq_show = cpuset_common_seq_show, .private = FILE_EFFECTIVE_XCPULIST, .flags = CFTYPE_NOT_ON_ROOT, }, { .name = "cpus.subpartitions", .seq_show = cpuset_common_seq_show, .private = FILE_SUBPARTS_CPULIST, .flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG, }, { .name = "cpus.isolated", .seq_show = cpuset_common_seq_show, .private = FILE_ISOLATED_CPULIST, .flags = CFTYPE_ONLY_ON_ROOT, }, { } /* terminate */ }; /** * cpuset_css_alloc - Allocate a cpuset css * @parent_css: Parent css of the control group that the new cpuset will be * part of * Return: cpuset css on success, -ENOMEM on failure. * * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return * top cpuset css otherwise. */ static struct cgroup_subsys_state * cpuset_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuset *cs; if (!parent_css) return &top_cpuset.css; cs = kzalloc(sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); if (alloc_cpumasks(cs, NULL)) { kfree(cs); return ERR_PTR(-ENOMEM); } __set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); fmeter_init(&cs->fmeter); cs->relax_domain_level = -1; INIT_LIST_HEAD(&cs->remote_sibling); /* Set CS_MEMORY_MIGRATE for default hierarchy */ if (cpuset_v2()) __set_bit(CS_MEMORY_MIGRATE, &cs->flags); return &cs->css; } static int cpuset_css_online(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); struct cpuset *parent = parent_cs(cs); struct cpuset *tmp_cs; struct cgroup_subsys_state *pos_css; if (!parent) return 0; cpus_read_lock(); mutex_lock(&cpuset_mutex); set_bit(CS_ONLINE, &cs->flags); if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); if (is_spread_slab(parent)) set_bit(CS_SPREAD_SLAB, &cs->flags); /* * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated */ if (cpuset_v2() && !is_sched_load_balance(parent)) clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cpuset_inc(); spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs->effective_mems = parent->effective_mems; } spin_unlock_irq(&callback_lock); if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; /* * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is * set. This flag handling is implemented in cgroup core for * historical reasons - the flag may be specified during mount. * * Currently, if any sibling cpusets have exclusive cpus or mem, we * refuse to clone the configuration - thereby refusing the task to * be entered, and as a result refusing the sys_unshare() or * clone() which initiated it. If this becomes a problem for some * users who wish to allow that scenario, then this could be * changed to grant parent->cpus_allowed-sibling_cpus_exclusive * (and likewise for mems) to the new cgroup. */ rcu_read_lock(); cpuset_for_each_child(tmp_cs, pos_css, parent) { if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) { rcu_read_unlock(); goto out_unlock; } } rcu_read_unlock(); spin_lock_irq(&callback_lock); cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return 0; } /* * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which * will call rebuild_sched_domains_locked(). That is not needed * in the default hierarchy where only changes in partition * will cause repartitioning. * * If the cpuset has the 'sched.partition' flag enabled, simulate * turning 'sched.partition" off. */ static void cpuset_css_offline(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); cpus_read_lock(); mutex_lock(&cpuset_mutex); if (!cpuset_v2() && is_sched_load_balance(cs)) cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); cpuset_dec(); clear_bit(CS_ONLINE, &cs->flags); mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } static void cpuset_css_killed(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); cpus_read_lock(); mutex_lock(&cpuset_mutex); /* Reset valid partition back to member */ if (is_partition_valid(cs)) update_prstate(cs, PRS_MEMBER); mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } static void cpuset_css_free(struct cgroup_subsys_state *css) { struct cpuset *cs = css_cs(css); free_cpuset(cs); } static void cpuset_bind(struct cgroup_subsys_state *root_css) { mutex_lock(&cpuset_mutex); spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask); top_cpuset.mems_allowed = node_possible_map; } else { cpumask_copy(top_cpuset.cpus_allowed, top_cpuset.effective_cpus); top_cpuset.mems_allowed = top_cpuset.effective_mems; } spin_unlock_irq(&callback_lock); mutex_unlock(&cpuset_mutex); } /* * In case the child is cloned into a cpuset different from its parent, * additional checks are done to see if the move is allowed. */ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) { struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); bool same_cs; int ret; rcu_read_lock(); same_cs = (cs == task_cs(current)); rcu_read_unlock(); if (same_cs) return 0; lockdep_assert_held(&cgroup_mutex); mutex_lock(&cpuset_mutex); /* Check to see if task is allowed in the cpuset */ ret = cpuset_can_attach_check(cs); if (ret) goto out_unlock; ret = task_can_attach(task); if (ret) goto out_unlock; ret = security_task_setscheduler(task); if (ret) goto out_unlock; /* * Mark attach is in progress. This makes validate_change() fail * changes which zero cpus/mems_allowed. */ cs->attach_in_progress++; out_unlock: mutex_unlock(&cpuset_mutex); return ret; } static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset) { struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]); bool same_cs; rcu_read_lock(); same_cs = (cs == task_cs(current)); rcu_read_unlock(); if (same_cs) return; dec_attach_in_progress(cs); } /* * Make sure the new task conform to the current state of its parent, * which could have been changed by cpuset just after it inherits the * state from the parent and before it sits on the cgroup's task list. */ static void cpuset_fork(struct task_struct *task) { struct cpuset *cs; bool same_cs; rcu_read_lock(); cs = task_cs(task); same_cs = (cs == task_cs(current)); rcu_read_unlock(); if (same_cs) { if (cs == &top_cpuset) return; set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; return; } /* CLONE_INTO_CGROUP */ mutex_lock(&cpuset_mutex); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); cpuset_attach_task(cs, task); dec_attach_in_progress_locked(cs); mutex_unlock(&cpuset_mutex); } struct cgroup_subsys cpuset_cgrp_subsys = { .css_alloc = cpuset_css_alloc, .css_online = cpuset_css_online, .css_offline = cpuset_css_offline, .css_killed = cpuset_css_killed, .css_free = cpuset_css_free, .can_attach = cpuset_can_attach, .cancel_attach = cpuset_cancel_attach, .attach = cpuset_attach, .post_attach = cpuset_post_attach, .bind = cpuset_bind, .can_fork = cpuset_can_fork, .cancel_fork = cpuset_cancel_fork, .fork = cpuset_fork, #ifdef CONFIG_CPUSETS_V1 .legacy_cftypes = cpuset1_files, #endif .dfl_cftypes = dfl_files, .early_init = true, .threaded = true, }; /** * cpuset_init - initialize cpusets at system boot * * Description: Initialize top_cpuset **/ int __init cpuset_init(void) { BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL)); cpumask_setall(top_cpuset.cpus_allowed); nodes_setall(top_cpuset.mems_allowed); cpumask_setall(top_cpuset.effective_cpus); cpumask_setall(top_cpuset.effective_xcpus); cpumask_setall(top_cpuset.exclusive_cpus); nodes_setall(top_cpuset.effective_mems); fmeter_init(&top_cpuset.fmeter); INIT_LIST_HEAD(&remote_children); BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL)); have_boot_isolcpus = housekeeping_enabled(HK_TYPE_DOMAIN); if (have_boot_isolcpus) { BUG_ON(!alloc_cpumask_var(&boot_hk_cpus, GFP_KERNEL)); cpumask_copy(boot_hk_cpus, housekeeping_cpumask(HK_TYPE_DOMAIN)); cpumask_andnot(isolated_cpus, cpu_possible_mask, boot_hk_cpus); } return 0; } static void hotplug_update_tasks(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems, bool cpus_updated, bool mems_updated) { /* A partition root is allowed to have empty effective cpus */ if (cpumask_empty(new_cpus) && !is_partition_valid(cs)) cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus); if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; spin_lock_irq(&callback_lock); cpumask_copy(cs->effective_cpus, new_cpus); cs->effective_mems = *new_mems; spin_unlock_irq(&callback_lock); if (cpus_updated) cpuset_update_tasks_cpumask(cs, new_cpus); if (mems_updated) cpuset_update_tasks_nodemask(cs); } void cpuset_force_rebuild(void) { force_sd_rebuild = true; } /** * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug * @cs: cpuset in interest * @tmp: the tmpmasks structure pointer * * Compare @cs's cpu and mem masks against top_cpuset and if some have gone * offline, update @cs accordingly. If @cs ends up with no CPU or memory, * all its tasks are moved to the nearest ancestor with both resources. */ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) { static cpumask_t new_cpus; static nodemask_t new_mems; bool cpus_updated; bool mems_updated; bool remote; int partcmd = -1; struct cpuset *parent; retry: wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); mutex_lock(&cpuset_mutex); /* * We have raced with task attaching. We wait until attaching * is finished, so we won't attach a task to an empty cpuset. */ if (cs->attach_in_progress) { mutex_unlock(&cpuset_mutex); goto retry; } parent = parent_cs(cs); compute_effective_cpumask(&new_cpus, cs, parent); nodes_and(new_mems, cs->mems_allowed, parent->effective_mems); if (!tmp || !cs->partition_root_state) goto update_tasks; /* * Compute effective_cpus for valid partition root, may invalidate * child partition roots if necessary. */ remote = is_remote_partition(cs); if (remote || (is_partition_valid(cs) && is_partition_valid(parent))) compute_partition_effective_cpumask(cs, &new_cpus); if (remote && cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) { cs->prs_err = PERR_HOTPLUG; remote_partition_disable(cs, tmp); compute_effective_cpumask(&new_cpus, cs, parent); remote = false; } /* * Force the partition to become invalid if either one of * the following conditions hold: * 1) empty effective cpus but not valid empty partition. * 2) parent is invalid or doesn't grant any cpus to child * partitions. */ if (is_local_partition(cs) && (!is_partition_valid(parent) || tasks_nocpu_error(parent, cs, &new_cpus))) partcmd = partcmd_invalidate; /* * On the other hand, an invalid partition root may be transitioned * back to a regular one. */ else if (is_partition_valid(parent) && is_partition_invalid(cs)) partcmd = partcmd_update; if (partcmd >= 0) { update_parent_effective_cpumask(cs, partcmd, NULL, tmp); if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { compute_partition_effective_cpumask(cs, &new_cpus); cpuset_force_rebuild(); } } update_tasks: cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus); mems_updated = !nodes_equal(new_mems, cs->effective_mems); if (!cpus_updated && !mems_updated) goto unlock; /* Hotplug doesn't affect this cpuset */ if (mems_updated) check_insane_mems_config(&new_mems); if (is_in_v2_mode()) hotplug_update_tasks(cs, &new_cpus, &new_mems, cpus_updated, mems_updated); else cpuset1_hotplug_update_tasks(cs, &new_cpus, &new_mems, cpus_updated, mems_updated); unlock: mutex_unlock(&cpuset_mutex); } /** * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset * * This function is called after either CPU or memory configuration has * changed and updates cpuset accordingly. The top_cpuset is always * synchronized to cpu_active_mask and N_MEMORY, which is necessary in * order to make cpusets transparent (of no affect) on systems that are * actively using CPU hotplug but making no active use of cpusets. * * Non-root cpusets are only affected by offlining. If any CPUs or memory * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on * all descendants. * * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. * * CPU / memory hotplug is handled synchronously. */ static void cpuset_handle_hotplug(void) { static cpumask_t new_cpus; static nodemask_t new_mems; bool cpus_updated, mems_updated; bool on_dfl = is_in_v2_mode(); struct tmpmasks tmp, *ptmp = NULL; if (on_dfl && !alloc_cpumasks(NULL, &tmp)) ptmp = &tmp; lockdep_assert_cpus_held(); mutex_lock(&cpuset_mutex); /* fetch the available cpus/mems and find out which changed how */ cpumask_copy(&new_cpus, cpu_active_mask); new_mems = node_states[N_MEMORY]; /* * If subpartitions_cpus is populated, it is likely that the check * below will produce a false positive on cpus_updated when the cpu * list isn't changed. It is extra work, but it is better to be safe. */ cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) || !cpumask_empty(subpartitions_cpus); mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems); /* For v1, synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { cpuset_force_rebuild(); spin_lock_irq(&callback_lock); if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); /* * Make sure that CPUs allocated to child partitions * do not show up in effective_cpus. If no CPU is left, * we clear the subpartitions_cpus & let the child partitions * fight for the CPUs again. */ if (!cpumask_empty(subpartitions_cpus)) { if (cpumask_subset(&new_cpus, subpartitions_cpus)) { top_cpuset.nr_subparts = 0; cpumask_clear(subpartitions_cpus); } else { cpumask_andnot(&new_cpus, &new_cpus, subpartitions_cpus); } } cpumask_copy(top_cpuset.effective_cpus, &new_cpus); spin_unlock_irq(&callback_lock); /* we don't mess with cpumasks of tasks in top_cpuset */ } /* synchronize mems_allowed to N_MEMORY */ if (mems_updated) { spin_lock_irq(&callback_lock); if (!on_dfl) top_cpuset.mems_allowed = new_mems; top_cpuset.effective_mems = new_mems; spin_unlock_irq(&callback_lock); cpuset_update_tasks_nodemask(&top_cpuset); } mutex_unlock(&cpuset_mutex); /* if cpus or mems changed, we need to propagate to descendants */ if (cpus_updated || mems_updated) { struct cpuset *cs; struct cgroup_subsys_state *pos_css; rcu_read_lock(); cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { if (cs == &top_cpuset || !css_tryget_online(&cs->css)) continue; rcu_read_unlock(); cpuset_hotplug_update_tasks(cs, ptmp); rcu_read_lock(); css_put(&cs->css); } rcu_read_unlock(); } /* rebuild sched domains if necessary */ if (force_sd_rebuild) rebuild_sched_domains_cpuslocked(); free_cpumasks(NULL, ptmp); } void cpuset_update_active_cpus(void) { /* * We're inside cpu hotplug critical region which usually nests * inside cgroup synchronization. Bounce actual hotplug processing * to a work item to avoid reverse locking order. */ cpuset_handle_hotplug(); } /* * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. * Call this routine anytime after node_states[N_MEMORY] changes. * See cpuset_update_active_cpus() for CPU hotplug handling. */ static int cpuset_track_online_nodes(struct notifier_block *self, unsigned long action, void *arg) { cpuset_handle_hotplug(); return NOTIFY_OK; } /** * cpuset_init_smp - initialize cpus_allowed * * Description: Finish top cpuset after cpu, node maps are initialized */ void __init cpuset_init_smp(void) { /* * cpus_allowd/mems_allowed set to v2 values in the initial * cpuset_bind() call will be reset to v1 values in another * cpuset_bind() call when v1 cpuset is mounted. */ top_cpuset.old_mems_allowed = top_cpuset.mems_allowed; cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask); top_cpuset.effective_mems = node_states[N_MEMORY]; hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI); cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); BUG_ON(!cpuset_migrate_mm_wq); } /** * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @pmask: pointer to struct cpumask variable to receive cpus_allowed set. * * Description: Returns the cpumask_var_t cpus_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of cpu_online_mask, even if this means going outside the * tasks cpuset, except when the task is in the top cpuset. **/ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; struct cpuset *cs; spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); cs = task_cs(tsk); if (cs != &top_cpuset) guarantee_online_cpus(tsk, pmask); /* * Tasks in the top cpuset won't get update to their cpumasks * when a hotplug online/offline event happens. So we include all * offline cpus in the allowed cpu list. */ if ((cs == &top_cpuset) || cpumask_empty(pmask)) { const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); /* * We first exclude cpus allocated to partitions. If there is no * allowable online cpu left, we fall back to all possible cpus. */ cpumask_andnot(pmask, possible_mask, subpartitions_cpus); if (!cpumask_intersects(pmask, cpu_online_mask)) cpumask_copy(pmask, possible_mask); } rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); } /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. * @tsk: pointer to task_struct with which the scheduler is struggling * * Description: In the case that the scheduler cannot find an allowed cpu in * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy * mode however, this value is the same as task_cs(tsk)->effective_cpus, * which will not contain a sane cpumask during cases such as cpu hotplugging. * This is the absolute last resort for the scheduler and it is only used if * _every_ other avenue has been traveled. * * Returns true if the affinity of @tsk was changed, false otherwise. **/ bool cpuset_cpus_allowed_fallback(struct task_struct *tsk) { const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); const struct cpumask *cs_mask; bool changed = false; rcu_read_lock(); cs_mask = task_cs(tsk)->cpus_allowed; if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) { do_set_cpus_allowed(tsk, cs_mask); changed = true; } rcu_read_unlock(); /* * We own tsk->cpus_allowed, nobody can change it under us. * * But we used cs && cs->cpus_allowed lockless and thus can * race with cgroup_attach_task() or update_cpumask() and get * the wrong tsk->cpus_allowed. However, both cases imply the * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr() * which takes task_rq_lock(). * * If we are called after it dropped the lock we must see all * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary * set any mask even if it is not right from task_cs() pov, * the pending set_cpus_allowed_ptr() will fix things. * * select_fallback_rq() will fix things ups and set cpu_possible_mask * if required. */ return changed; } void __init cpuset_init_current_mems_allowed(void) { nodes_setall(current->mems_allowed); } /** * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed. * * Description: Returns the nodemask_t mems_allowed of the cpuset * attached to the specified @tsk. Guaranteed to return some non-empty * subset of node_states[N_MEMORY], even if this means going outside the * tasks cpuset. **/ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) { nodemask_t mask; unsigned long flags; spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return mask; } /** * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed * @nodemask: the nodemask to be checked * * Are any of the nodes in the nodemask allowed in current->mems_allowed? */ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) { return nodes_intersects(*nodemask, current->mems_allowed); } /* * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or * mem_hardwall ancestor to the specified cpuset. Call holding * callback_lock. If no ancestor is mem_exclusive or mem_hardwall * (an unusual configuration), then returns the root cpuset. */ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) { while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs)) cs = parent_cs(cs); return cs; } /* * cpuset_node_allowed - Can we allocate on a memory node? * @node: is this an allowed node? * @gfp_mask: memory allocation flags * * If we're in interrupt, yes, we can always allocate. If @node is set in * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this * node is set in the nearest hardwalled cpuset ancestor to current's cpuset, * yes. If current has access to memory reserves as an oom victim, yes. * Otherwise, no. * * GFP_USER allocations are marked with the __GFP_HARDWALL bit, * and do not allow allocations outside the current tasks cpuset * unless the task has been OOM killed. * GFP_KERNEL allocations are not so marked, so can escape to the * nearest enclosing hardwalled ancestor cpuset. * * Scanning up parent cpusets requires callback_lock. The * __alloc_pages() routine only calls here with __GFP_HARDWALL bit * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the * current tasks mems_allowed came up empty on the first pass over * the zonelist. So only GFP_KERNEL allocations, if all nodes in the * cpuset are short of memory, might require taking the callback_lock. * * The first call here from mm/page_alloc:get_page_from_freelist() * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, * so no allocation on a node outside the cpuset is allowed (unless * in interrupt, of course). * * The second pass through get_page_from_freelist() doesn't even call * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set * in alloc_flags. That logic and the checks below have the combined * affect that: * in_interrupt - any node ok (current task context irrelevant) * GFP_ATOMIC - any node ok * tsk_is_oom_victim - any node ok * GFP_KERNEL - any node in enclosing hardwalled cpuset ok * GFP_USER - only nodes in current tasks mems allowed ok. */ bool cpuset_node_allowed(int node, gfp_t gfp_mask) { struct cpuset *cs; /* current cpuset ancestors */ bool allowed; /* is allocation in zone z allowed? */ unsigned long flags; if (in_interrupt()) return true; if (node_isset(node, current->mems_allowed)) return true; /* * Allow tasks that have access to memory reserves because they have * been OOM killed to get memory anywhere. */ if (unlikely(tsk_is_oom_victim(current))) return true; if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ return false; if (current->flags & PF_EXITING) /* Let dying task have memory */ return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return allowed; } /** * cpuset_spread_node() - On which node to begin search for a page * @rotor: round robin rotor * * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for * tasks in a cpuset with is_spread_page or is_spread_slab set), * and if the memory allocation used cpuset_mem_spread_node() * to determine on which node to start looking, as it will for * certain page cache or slab cache pages such as used for file * system buffers and inode caches, then instead of starting on the * local node to look for a free page, rather spread the starting * node around the tasks mems_allowed nodes. * * We don't have to worry about the returned node being offline * because "it can't happen", and even if it did, it would be ok. * * The routines calling guarantee_online_mems() are careful to * only set nodes in task->mems_allowed that are online. So it * should not be possible for the following code to return an * offline node. But if it did, that would be ok, as this routine * is not returning the node where the allocation must be, only * the node where the search should start. The zonelist passed to * __alloc_pages() will include all nodes. If the slab allocator * is passed an offline node, it will fall back to the local node. * See kmem_cache_alloc_node(). */ static int cpuset_spread_node(int *rotor) { return *rotor = next_node_in(*rotor, current->mems_allowed); } /** * cpuset_mem_spread_node() - On which node to begin search for a file page */ int cpuset_mem_spread_node(void) { if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE) current->cpuset_mem_spread_rotor = node_random(&current->mems_allowed); return cpuset_spread_node(&current->cpuset_mem_spread_rotor); } /** * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's? * @tsk1: pointer to task_struct of some task. * @tsk2: pointer to task_struct of some other task. * * Description: Return true if @tsk1's mems_allowed intersects the * mems_allowed of @tsk2. Used by the OOM killer to determine if * one of the task's memory usage might impact the memory available * to the other. **/ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2) { return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed); } /** * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed * * Description: Prints current's name, cpuset name, and cached copy of its * mems_allowed to the kernel log. */ void cpuset_print_current_mems_allowed(void) { struct cgroup *cgrp; rcu_read_lock(); cgrp = task_cs(current)->css.cgroup; pr_cont(",cpuset="); pr_cont_cgroup_name(cgrp); pr_cont(",mems_allowed=%*pbl", nodemask_pr_args(&current->mems_allowed)); rcu_read_unlock(); } /* Display task mems_allowed in /proc/<pid>/status file. */ void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) { seq_printf(m, "Mems_allowed:\t%*pb\n", nodemask_pr_args(&task->mems_allowed)); seq_printf(m, "Mems_allowed_list:\t%*pbl\n", nodemask_pr_args(&task->mems_allowed)); }
22 9 64 14 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _NET_GSO_H #define _NET_GSO_H #include <linux/skbuff.h> /* Keeps track of mac header offset relative to skb->head. * It is useful for TSO of Tunneling protocol. e.g. GRE. * For non-tunnel skb it points to skb_mac_header() and for * tunnel skb it points to outer mac header. * Keeps track of level of encapsulation of network headers. */ struct skb_gso_cb { union { int mac_offset; int data_offset; }; int encap_level; __wsum csum; __u16 csum_start; }; #define SKB_GSO_CB_OFFSET 32 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET)) static inline int skb_tnl_header_len(const struct sk_buff *inner_skb) { return (skb_mac_header(inner_skb) - inner_skb->head) - SKB_GSO_CB(inner_skb)->mac_offset; } static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra) { int new_headroom, headroom; int ret; headroom = skb_headroom(skb); ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC); if (ret) return ret; new_headroom = skb_headroom(skb); SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom); return 0; } static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res) { /* Do not update partial checksums if remote checksum is enabled. */ if (skb->remcsum_offload) return; SKB_GSO_CB(skb)->csum = res; SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head; } /* Compute the checksum for a gso segment. First compute the checksum value * from the start of transport header to SKB_GSO_CB(skb)->csum_start, and * then add in skb->csum (checksum from csum_start to end of packet). * skb->csum and csum_start are then updated to reflect the checksum of the * resultant packet starting from the transport header-- the resultant checksum * is in the res argument (i.e. normally zero or ~ of checksum of a pseudo * header. */ static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res) { unsigned char *csum_start = skb_transport_header(skb); int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start; __wsum partial = SKB_GSO_CB(skb)->csum; SKB_GSO_CB(skb)->csum = res; SKB_GSO_CB(skb)->csum_start = csum_start - skb->head; return csum_fold(csum_partial(csum_start, plen, partial)); } struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); static inline struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) { return __skb_gso_segment(skb, features, true); } struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, netdev_features_t features, __be16 type); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu); bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, int pulled_hlen, u16 mac_offset, int mac_len) { skb->protocol = protocol; skb->encapsulation = 1; skb_push(skb, pulled_hlen); skb_reset_transport_header(skb); skb->mac_header = mac_offset; skb->network_header = skb->mac_header + mac_len; skb->mac_len = mac_len; } #endif /* _NET_GSO_H */
45 45 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2023 Western Digital Corporation or its affiliates. */ #include <linux/btrfs_tree.h> #include "ctree.h" #include "fs.h" #include "accessors.h" #include "transaction.h" #include "disk-io.h" #include "raid-stripe-tree.h" #include "volumes.h" #include "print-tree.h" static int btrfs_partially_delete_raid_extent(struct btrfs_trans_handle *trans, struct btrfs_path *path, const struct btrfs_key *oldkey, u64 newlen, u64 frontpad) { struct btrfs_root *stripe_root = trans->fs_info->stripe_root; struct btrfs_stripe_extent *extent, *newitem; struct extent_buffer *leaf; int slot; size_t item_size; struct btrfs_key newkey = { .objectid = oldkey->objectid + frontpad, .type = BTRFS_RAID_STRIPE_KEY, .offset = newlen, }; int ret; ASSERT(newlen > 0); ASSERT(oldkey->type == BTRFS_RAID_STRIPE_KEY); leaf = path->nodes[0]; slot = path->slots[0]; item_size = btrfs_item_size(leaf, slot); newitem = kzalloc(item_size, GFP_NOFS); if (!newitem) return -ENOMEM; extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent); for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) { struct btrfs_raid_stride *stride = &extent->strides[i]; u64 phys; phys = btrfs_raid_stride_physical(leaf, stride) + frontpad; btrfs_set_stack_raid_stride_physical(&newitem->strides[i], phys); } ret = btrfs_del_item(trans, stripe_root, path); if (ret) goto out; btrfs_release_path(path); ret = btrfs_insert_item(trans, stripe_root, &newkey, newitem, item_size); out: kfree(newitem); return ret; } int btrfs_delete_raid_extent(struct btrfs_trans_handle *trans, u64 start, u64 length) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_root *stripe_root = fs_info->stripe_root; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *leaf; u64 found_start; u64 found_end; u64 end = start + length; int slot; int ret; if (!btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE) || !stripe_root) return 0; if (!btrfs_is_testing(fs_info)) { struct btrfs_chunk_map *map; bool use_rst; map = btrfs_find_chunk_map(fs_info, start, length); if (!map) return -EINVAL; use_rst = btrfs_need_stripe_tree_update(fs_info, map->type); btrfs_free_chunk_map(map); if (!use_rst) return 0; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; while (1) { key.objectid = start; key.type = BTRFS_RAID_STRIPE_KEY; key.offset = 0; ret = btrfs_search_slot(trans, stripe_root, &key, path, -1, 1); if (ret < 0) break; if (path->slots[0] == btrfs_header_nritems(path->nodes[0])) path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); found_start = key.objectid; found_end = found_start + key.offset; ret = 0; /* * The stripe extent starts before the range we want to delete, * but the range spans more than one stripe extent: * * |--- RAID Stripe Extent ---||--- RAID Stripe Extent ---| * |--- keep ---|--- drop ---| * * This means we have to get the previous item, truncate its * length and then restart the search. */ if (found_start > start) { if (slot == 0) { ret = btrfs_previous_item(stripe_root, path, start, BTRFS_RAID_STRIPE_KEY); if (ret) { if (ret > 0) ret = -ENOENT; break; } } else { path->slots[0]--; } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); found_start = key.objectid; found_end = found_start + key.offset; ASSERT(found_start <= start); } if (key.type != BTRFS_RAID_STRIPE_KEY) break; /* That stripe ends before we start, we're done. */ if (found_end <= start) break; trace_btrfs_raid_extent_delete(fs_info, start, end, found_start, found_end); /* * The stripe extent starts before the range we want to delete * and ends after the range we want to delete, i.e. we're * punching a hole in the stripe extent: * * |--- RAID Stripe Extent ---| * | keep |--- drop ---| keep | * * This means we need to a) truncate the existing item and b) * create a second item for the remaining range. */ if (found_start < start && found_end > end) { size_t item_size; u64 diff_start = start - found_start; u64 diff_end = found_end - end; struct btrfs_stripe_extent *extent; struct btrfs_key newkey = { .objectid = end, .type = BTRFS_RAID_STRIPE_KEY, .offset = diff_end, }; /* The "right" item. */ ret = btrfs_duplicate_item(trans, stripe_root, path, &newkey); if (ret) break; item_size = btrfs_item_size(leaf, path->slots[0]); extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_stripe_extent); for (int i = 0; i < btrfs_num_raid_stripes(item_size); i++) { struct btrfs_raid_stride *stride = &extent->strides[i]; u64 phys; phys = btrfs_raid_stride_physical(leaf, stride); phys += diff_start + length; btrfs_set_raid_stride_physical(leaf, stride, phys); } /* The "left" item. */ path->slots[0]--; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_partially_delete_raid_extent(trans, path, &key, diff_start, 0); break; } /* * The stripe extent starts before the range we want to delete: * * |--- RAID Stripe Extent ---| * |--- keep ---|--- drop ---| * * This means we have to duplicate the tree item, truncate the * length to the new size and then re-insert the item. */ if (found_start < start) { u64 diff_start = start - found_start; btrfs_partially_delete_raid_extent(trans, path, &key, diff_start, 0); start += (key.offset - diff_start); length -= (key.offset - diff_start); if (length == 0) break; btrfs_release_path(path); continue; } /* * The stripe extent ends after the range we want to delete: * * |--- RAID Stripe Extent ---| * |--- drop ---|--- keep ---| * * This means we have to duplicate the tree item, truncate the * length to the new size and then re-insert the item. */ if (found_end > end) { u64 diff_end = found_end - end; btrfs_partially_delete_raid_extent(trans, path, &key, key.offset - length, length); ASSERT(key.offset - diff_end == length); break; } /* Finally we can delete the whole item, no more special cases. */ ret = btrfs_del_item(trans, stripe_root, path); if (ret) break; start += key.offset; length -= key.offset; if (length == 0) break; btrfs_release_path(path); } btrfs_free_path(path); return ret; } static int update_raid_extent_item(struct btrfs_trans_handle *trans, struct btrfs_key *key, struct btrfs_stripe_extent *stripe_extent, const size_t item_size) { struct btrfs_path *path; struct extent_buffer *leaf; int ret; int slot; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, trans->fs_info->stripe_root, key, path, 0, 1); if (ret) return (ret == 1 ? ret : -EINVAL); leaf = path->nodes[0]; slot = path->slots[0]; write_extent_buffer(leaf, stripe_extent, btrfs_item_ptr_offset(leaf, slot), item_size); btrfs_free_path(path); return ret; } EXPORT_FOR_TESTS int btrfs_insert_one_raid_extent(struct btrfs_trans_handle *trans, struct btrfs_io_context *bioc) { struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_key stripe_key; struct btrfs_root *stripe_root = fs_info->stripe_root; const int num_stripes = btrfs_bg_type_to_factor(bioc->map_type); struct btrfs_stripe_extent *stripe_extent; const size_t item_size = struct_size(stripe_extent, strides, num_stripes); int ret; stripe_extent = kzalloc(item_size, GFP_NOFS); if (!stripe_extent) { btrfs_abort_transaction(trans, -ENOMEM); btrfs_end_transaction(trans); return -ENOMEM; } trace_btrfs_insert_one_raid_extent(fs_info, bioc->logical, bioc->size, num_stripes); for (int i = 0; i < num_stripes; i++) { u64 devid = bioc->stripes[i].dev->devid; u64 physical = bioc->stripes[i].physical; struct btrfs_raid_stride *raid_stride = &stripe_extent->strides[i]; btrfs_set_stack_raid_stride_devid(raid_stride, devid); btrfs_set_stack_raid_stride_physical(raid_stride, physical); } stripe_key.objectid = bioc->logical; stripe_key.type = BTRFS_RAID_STRIPE_KEY; stripe_key.offset = bioc->size; ret = btrfs_insert_item(trans, stripe_root, &stripe_key, stripe_extent, item_size); if (ret == -EEXIST) ret = update_raid_extent_item(trans, &stripe_key, stripe_extent, item_size); if (ret) btrfs_abort_transaction(trans, ret); kfree(stripe_extent); return ret; } int btrfs_insert_raid_extent(struct btrfs_trans_handle *trans, struct btrfs_ordered_extent *ordered_extent) { struct btrfs_io_context *bioc; int ret; if (!btrfs_fs_incompat(trans->fs_info, RAID_STRIPE_TREE)) return 0; list_for_each_entry(bioc, &ordered_extent->bioc_list, rst_ordered_entry) { ret = btrfs_insert_one_raid_extent(trans, bioc); if (ret) return ret; } while (!list_empty(&ordered_extent->bioc_list)) { bioc = list_first_entry(&ordered_extent->bioc_list, typeof(*bioc), rst_ordered_entry); list_del(&bioc->rst_ordered_entry); btrfs_put_bioc(bioc); } return 0; } int btrfs_get_raid_extent_offset(struct btrfs_fs_info *fs_info, u64 logical, u64 *length, u64 map_type, u32 stripe_index, struct btrfs_io_stripe *stripe) { struct btrfs_root *stripe_root = fs_info->stripe_root; struct btrfs_stripe_extent *stripe_extent; struct btrfs_key stripe_key; struct btrfs_key found_key; struct btrfs_path *path; struct extent_buffer *leaf; const u64 end = logical + *length; int num_stripes; u64 offset; u64 found_logical; u64 found_length; u64 found_end; int slot; int ret; stripe_key.objectid = logical; stripe_key.type = BTRFS_RAID_STRIPE_KEY; stripe_key.offset = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (stripe->rst_search_commit_root) { path->skip_locking = 1; path->search_commit_root = 1; } ret = btrfs_search_slot(NULL, stripe_root, &stripe_key, path, 0, 0); if (ret < 0) goto free_path; if (ret) { if (path->slots[0] != 0) path->slots[0]--; } while (1) { leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &found_key, slot); found_logical = found_key.objectid; found_length = found_key.offset; found_end = found_logical + found_length; if (found_logical > end) { ret = -ENODATA; goto out; } if (in_range(logical, found_logical, found_length)) break; ret = btrfs_next_item(stripe_root, path); if (ret) goto out; } offset = logical - found_logical; /* * If we have a logically contiguous, but physically non-continuous * range, we need to split the bio. Record the length after which we * must split the bio. */ if (end > found_end) *length -= end - found_end; num_stripes = btrfs_num_raid_stripes(btrfs_item_size(leaf, slot)); stripe_extent = btrfs_item_ptr(leaf, slot, struct btrfs_stripe_extent); for (int i = 0; i < num_stripes; i++) { struct btrfs_raid_stride *stride = &stripe_extent->strides[i]; u64 devid = btrfs_raid_stride_devid(leaf, stride); u64 physical = btrfs_raid_stride_physical(leaf, stride); if (devid != stripe->dev->devid) continue; if ((map_type & BTRFS_BLOCK_GROUP_DUP) && stripe_index != i) continue; stripe->physical = physical + offset; trace_btrfs_get_raid_extent_offset(fs_info, logical, *length, stripe->physical, devid); ret = 0; goto free_path; } /* If we're here, we haven't found the requested devid in the stripe. */ ret = -ENODATA; out: if (ret > 0) ret = -ENODATA; if (ret && ret != -EIO && !stripe->rst_search_commit_root) { btrfs_debug(fs_info, "cannot find raid-stripe for logical [%llu, %llu] devid %llu, profile %s", logical, logical + *length, stripe->dev->devid, btrfs_bg_type_to_raid_name(map_type)); } free_path: btrfs_free_path(path); return ret; }
79 1 79 81 85 3 167 26 166 11 119 51 33 30 167 26 167 142 140 177 177 177 167 63 70 147 16 141 141 91 76 76 55 52 26 141 141 140 24 41 1 4 26 26 26 86 86 86 23 85 14 86 11 3 23 4 22 1 22 22 143 286 257 73 1 121 119 147 166 234 235 230 166 166 167 167 96 32 104 122 71 181 230 3 72 72 69 3 3 2 41 39 31 31 2 41 18 8 12 59 9 54 23 90 6 77 31 7 7 16 19 195 25 34 27 8 85 86 17 15 10 11 7 1 4 11 2 7 116 116 22 12 10 15 2 2 5 4 15 3 2 14 108 35 22 29 4 7 62 4 33 224 1796 1568 221 134 213 145 63 200 140 2 136 1 1 11 128 1759 323 76 73 3 72 72 1270 205 1229 360 7 17 11 1275 1186 321 865 98 92 7 94 98 7 7 1 6 6 87 98 98 877 806 69 62 69 4 4 1 3 69 69 286 284 283 5 5 5 5 5 1 1 1 1003 930 59 875 79 80 80 77 2 75 2 78 78 80 823 777 48 48 48 44 45 48 48 30 4 7 19 24 20 19 21 5 16 2 17 17 14 14 14 14 14 17 17 17 17 17 14 7 2 2 2 2 7 36 10 9 14 2 11 11 2 3 3 6 6 4 4 383 100 323 19 37 46 3 18 124 124 84 46 40 10 8 36 2 36 41 41 40 1 41 46 8 15 37 86 3 1 79 2 79 75 3 88 88 88 2 85 85 84 9 79 8 72 71 71 3 85 85 63 19 13 13 13 12 4 4 6 1 5 15 15 19 1 16 3 2 13 3 3 5 1 4 4 1 3 3 9 2 1 1 19 10 1 8 19 2 16 2 16 9 9 9 9 7 2 13 2 12 2 13 10 6 9 7 9 7 9 7 11 5 7 7 1 9 2 4 4 6 3 2 14 1 22 3 19 3 3 3 2 2 1 1 1 1 3 10 10 8 6 2 3 4 5 2 6 1 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 // SPDX-License-Identifier: GPL-2.0 /* * Implementation of the diskquota system for the LINUX operating system. QUOTA * is implemented using the BSD system call interface as the means of * communication with the user level. This file contains the generic routines * called by the different filesystems on allocation of an inode or block. * These routines take care of the administration needed to have a consistent * diskquota tracking system. The ideas of both user and group quotas are based * on the Melbourne quota system as used on BSD derived systems. The internal * implementation is based on one of the several variants of the LINUX * inode-subsystem with added complexity of the diskquota system. * * Author: Marco van Wieringen <mvw@planets.elm.net> * * Fixes: Dmitry Gorodchanin <pgmdsg@ibi.com>, 11 Feb 96 * * Revised list management to avoid races * -- Bill Hawes, <whawes@star.net>, 9/98 * * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...(). * As the consequence the locking was moved from dquot_decr_...(), * dquot_incr_...() to calling functions. * invalidate_dquots() now writes modified dquots. * Serialized quota_off() and quota_on() for mount point. * Fixed a few bugs in grow_dquots(). * Fixed deadlock in write_dquot() - we no longer account quotas on * quota files * remove_dquot_ref() moved to inode.c - it now traverses through inodes * add_dquot_ref() restarts after blocking * Added check for bogus uid and fixed check for group in quotactl. * Jan Kara, <jack@suse.cz>, sponsored by SuSE CR, 10-11/99 * * Used struct list_head instead of own list struct * Invalidation of referenced dquots is no longer possible * Improved free_dquots list management * Quota and i_blocks are now updated in one place to avoid races * Warnings are now delayed so we won't block in critical section * Write updated not to require dquot lock * Jan Kara, <jack@suse.cz>, 9/2000 * * Added dynamic quota structure allocation * Jan Kara <jack@suse.cz> 12/2000 * * Rewritten quota interface. Implemented new quota format and * formats registering. * Jan Kara, <jack@suse.cz>, 2001,2002 * * New SMP locking. * Jan Kara, <jack@suse.cz>, 10/2002 * * Added journalled quota support, fix lock inversion problems * Jan Kara, <jack@suse.cz>, 2003,2004 * * (C) Copyright 1994 - 1997 Marco van Wieringen */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/mm.h> #include <linux/time.h> #include <linux/types.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/tty.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/init.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/kmod.h> #include <linux/namei.h> #include <linux/capability.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/sched/mm.h> #include <linux/uaccess.h> /* * There are five quota SMP locks: * * dq_list_lock protects all lists with quotas and quota formats. * * dquot->dq_dqb_lock protects data from dq_dqb * * inode->i_lock protects inode->i_blocks, i_bytes and also guards * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that * dquot_transfer() can stabilize amount it transfers * * dq_data_lock protects mem_dqinfo structures and modifications of dquot * pointers in the inode * * dq_state_lock protects modifications of quota state (on quotaon and * quotaoff) and readers who care about latest values take it as well. * * The spinlock ordering is hence: * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock, * dq_list_lock > dq_state_lock * * Note that some things (eg. sb pointer, type, id) doesn't change during * the life of the dquot structure and so needn't to be protected by a lock * * Operation accessing dquots via inode pointers are protected by dquot_srcu. * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and * synchronize_srcu(&dquot_srcu) is called after clearing pointers from * inode and before dropping dquot references to avoid use of dquots after * they are freed. dq_data_lock is used to serialize the pointer setting and * clearing operations. * Special care needs to be taken about S_NOQUOTA inode flag (marking that * inode is a quota file). Functions adding pointers from inode to dquots have * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they * have to do all pointer modifications before dropping dq_data_lock. This makes * sure they cannot race with quotaon which first sets S_NOQUOTA flag and * then drops all pointers to dquots from an inode. * * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to * memory (or space for it is being allocated) on the first dqget(), when it is * being written out, and when it is being released on the last dqput(). The * allocation and release operations are serialized by the dq_lock and by * checking the use count in dquot_release(). * * Lock ordering (including related VFS locks) is the following: * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); DEFINE_STATIC_SRCU(dquot_srcu); static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq); void __quota_error(struct super_block *sb, const char *func, const char *fmt, ...) { if (printk_ratelimit()) { va_list args; struct va_format vaf; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_ERR "Quota error (device %s): %s: %pV\n", sb->s_id, func, &vaf); va_end(args); } } EXPORT_SYMBOL(__quota_error); #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING) static char *quotatypes[] = INITQFNAMES; #endif static struct quota_format_type *quota_formats; /* List of registered formats */ static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES; /* SLAB cache for dquot structures */ static struct kmem_cache *dquot_cachep; void register_quota_format(struct quota_format_type *fmt) { spin_lock(&dq_list_lock); fmt->qf_next = quota_formats; quota_formats = fmt; spin_unlock(&dq_list_lock); } EXPORT_SYMBOL(register_quota_format); void unregister_quota_format(struct quota_format_type *fmt) { struct quota_format_type **actqf; spin_lock(&dq_list_lock); for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next) ; if (*actqf) *actqf = (*actqf)->qf_next; spin_unlock(&dq_list_lock); } EXPORT_SYMBOL(unregister_quota_format); static struct quota_format_type *find_quota_format(int id) { struct quota_format_type *actqf; spin_lock(&dq_list_lock); for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next) ; if (!actqf || !try_module_get(actqf->qf_owner)) { int qm; spin_unlock(&dq_list_lock); for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++) ; if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name)) return NULL; spin_lock(&dq_list_lock); for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next) ; if (actqf && !try_module_get(actqf->qf_owner)) actqf = NULL; } spin_unlock(&dq_list_lock); return actqf; } static void put_quota_format(struct quota_format_type *fmt) { module_put(fmt->qf_owner); } /* * Dquot List Management: * The quota code uses five lists for dquot management: the inuse_list, * releasing_dquots, free_dquots, dqi_dirty_list, and dquot_hash[] array. * A single dquot structure may be on some of those lists, depending on * its current state. * * All dquots are placed to the end of inuse_list when first created, and this * list is used for invalidate operation, which must look at every dquot. * * When the last reference of a dquot is dropped, the dquot is added to * releasing_dquots. We'll then queue work item which will call * synchronize_srcu() and after that perform the final cleanup of all the * dquots on the list. Each cleaned up dquot is moved to free_dquots list. * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot * struct. * * Unused and cleaned up dquots are in the free_dquots list and this list is * searched whenever we need an available dquot. Dquots are removed from the * list as soon as they are used again and dqstats.free_dquots gives the number * of dquots on the list. When dquot is invalidated it's completely released * from memory. * * Dirty dquots are added to the dqi_dirty_list of quota_info when mark * dirtied, and this list is searched when writing dirty dquots back to * quota file. Note that some filesystems do dirty dquot tracking on their * own (e.g. in a journal) and thus don't use dqi_dirty_list. * * Dquots with a specific identity (device, type and id) are placed on * one of the dquot_hash[] hash chains. The provides an efficient search * mechanism to locate a specific dquot. */ static LIST_HEAD(inuse_list); static LIST_HEAD(free_dquots); static LIST_HEAD(releasing_dquots); static unsigned int dq_hash_bits, dq_hash_mask; static struct hlist_head *dquot_hash; struct dqstats dqstats; EXPORT_SYMBOL(dqstats); static qsize_t inode_get_rsv_space(struct inode *inode); static qsize_t __inode_get_rsv_space(struct inode *inode); static int __dquot_initialize(struct inode *inode, int type); static void quota_release_workfn(struct work_struct *work); static DECLARE_DELAYED_WORK(quota_release_work, quota_release_workfn); static inline unsigned int hashfn(const struct super_block *sb, struct kqid qid) { unsigned int id = from_kqid(&init_user_ns, qid); int type = qid.type; unsigned long tmp; tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type); return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask; } /* * Following list functions expect dq_list_lock to be held */ static inline void insert_dquot_hash(struct dquot *dquot) { struct hlist_head *head; head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id); hlist_add_head(&dquot->dq_hash, head); } static inline void remove_dquot_hash(struct dquot *dquot) { hlist_del_init(&dquot->dq_hash); } static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb, struct kqid qid) { struct dquot *dquot; hlist_for_each_entry(dquot, dquot_hash+hashent, dq_hash) if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid)) return dquot; return NULL; } /* Add a dquot to the tail of the free list */ static inline void put_dquot_last(struct dquot *dquot) { list_add_tail(&dquot->dq_free, &free_dquots); dqstats_inc(DQST_FREE_DQUOTS); } static inline void put_releasing_dquots(struct dquot *dquot) { list_add_tail(&dquot->dq_free, &releasing_dquots); set_bit(DQ_RELEASING_B, &dquot->dq_flags); } static inline void remove_free_dquot(struct dquot *dquot) { if (list_empty(&dquot->dq_free)) return; list_del_init(&dquot->dq_free); if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags)) dqstats_dec(DQST_FREE_DQUOTS); else clear_bit(DQ_RELEASING_B, &dquot->dq_flags); } static inline void put_inuse(struct dquot *dquot) { /* We add to the back of inuse list so we don't have to restart * when traversing this list and we block */ list_add_tail(&dquot->dq_inuse, &inuse_list); dqstats_inc(DQST_ALLOC_DQUOTS); } static inline void remove_inuse(struct dquot *dquot) { dqstats_dec(DQST_ALLOC_DQUOTS); list_del(&dquot->dq_inuse); } /* * End of list functions needing dq_list_lock */ static void wait_on_dquot(struct dquot *dquot) { mutex_lock(&dquot->dq_lock); mutex_unlock(&dquot->dq_lock); } static inline int dquot_active(struct dquot *dquot) { return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); } static inline int dquot_dirty(struct dquot *dquot) { return test_bit(DQ_MOD_B, &dquot->dq_flags); } static inline int mark_dquot_dirty(struct dquot *dquot) { return dquot->dq_sb->dq_op->mark_dirty(dquot); } /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */ int dquot_mark_dquot_dirty(struct dquot *dquot) { int ret = 1; if (!dquot_active(dquot)) return 0; if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags); /* If quota is dirty already, we don't have to acquire dq_list_lock */ if (dquot_dirty(dquot)) return 1; spin_lock(&dq_list_lock); if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) { list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)-> info[dquot->dq_id.type].dqi_dirty_list); ret = 0; } spin_unlock(&dq_list_lock); return ret; } EXPORT_SYMBOL(dquot_mark_dquot_dirty); /* Dirtify all the dquots - this can block when journalling */ static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) { int ret, err, cnt; struct dquot *dquot; ret = err = 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (dquot) /* Even in case of error we have to continue */ ret = mark_dquot_dirty(dquot); if (!err && ret < 0) err = ret; } return err; } static inline void dqput_all(struct dquot **dquot) { unsigned int cnt; for (cnt = 0; cnt < MAXQUOTAS; cnt++) dqput(dquot[cnt]); } static inline int clear_dquot_dirty(struct dquot *dquot) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY) return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags); spin_lock(&dq_list_lock); if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) { spin_unlock(&dq_list_lock); return 0; } list_del_init(&dquot->dq_dirty); spin_unlock(&dq_list_lock); return 1; } void mark_info_dirty(struct super_block *sb, int type) { spin_lock(&dq_data_lock); sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY; spin_unlock(&dq_data_lock); } EXPORT_SYMBOL(mark_info_dirty); /* * Read dquot from disk and alloc space for it */ int dquot_acquire(struct dquot *dquot) { int ret = 0, ret2 = 0; unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); memalloc = memalloc_nofs_save(); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); if (ret < 0) goto out_iolock; } /* Make sure flags update is visible after dquot has been filled */ smp_mb__before_atomic(); set_bit(DQ_READ_B, &dquot->dq_flags); /* Instantiate dquot if needed */ if (!dquot_active(dquot) && !dquot->dq_off) { ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); /* Write the info if needed */ if (info_dirty(&dqopt->info[dquot->dq_id.type])) { ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( dquot->dq_sb, dquot->dq_id.type); } if (ret < 0) goto out_iolock; if (ret2 < 0) { ret = ret2; goto out_iolock; } } /* * Make sure flags update is visible after on-disk struct has been * allocated. Paired with smp_rmb() in dqget(). */ smp_mb__before_atomic(); set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } EXPORT_SYMBOL(dquot_acquire); /* * Write dquot to disk */ int dquot_commit(struct dquot *dquot) { int ret = 0; unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); memalloc = memalloc_nofs_save(); if (!clear_dquot_dirty(dquot)) goto out_lock; /* Inactive dquot can be only if there was error during read/init * => we have better not writing it */ if (dquot_active(dquot)) ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot); else ret = -EIO; out_lock: memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } EXPORT_SYMBOL(dquot_commit); /* * Release dquot */ int dquot_release(struct dquot *dquot) { int ret = 0, ret2 = 0; unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); memalloc = memalloc_nofs_save(); /* Check whether we are not racing with some other dqget() */ if (dquot_is_busy(dquot)) goto out_dqlock; if (dqopt->ops[dquot->dq_id.type]->release_dqblk) { ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot); /* Write the info */ if (info_dirty(&dqopt->info[dquot->dq_id.type])) { ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info( dquot->dq_sb, dquot->dq_id.type); } if (ret >= 0) ret = ret2; } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_dqlock: memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } EXPORT_SYMBOL(dquot_release); void dquot_destroy(struct dquot *dquot) { kmem_cache_free(dquot_cachep, dquot); } EXPORT_SYMBOL(dquot_destroy); static inline void do_destroy_dquot(struct dquot *dquot) { dquot->dq_sb->dq_op->destroy_dquot(dquot); } /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new * quota users. There can still be some users of quotas due to inodes being * just deleted or pruned by prune_icache() (those are not attached to any * list) or parallel quotactl call. We have to wait for such users. */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; restart: flush_delayed_work(&quota_release_work); spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_id.type != type) continue; /* Wait for dquot users */ if (atomic_read(&dquot->dq_count)) { atomic_inc(&dquot->dq_count); spin_unlock(&dq_list_lock); /* * Once dqput() wakes us up, we know it's time to free * the dquot. * IMPORTANT: we rely on the fact that there is always * at most one process waiting for dquot to free. * Otherwise dq_count would be > 1 and we would never * wake up. */ wait_event(dquot_ref_wq, atomic_read(&dquot->dq_count) == 1); dqput(dquot); /* At this moment dquot() need not exist (it could be * reclaimed by prune_dqcache(). Hence we must * restart. */ goto restart; } /* * The last user already dropped its reference but dquot didn't * get fully cleaned up yet. Restart the scan which flushes the * work cleaning up released dquots. */ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { spin_unlock(&dq_list_lock); goto restart; } /* * Quota now has no users and it has been written on last * dqput() */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); do_destroy_dquot(dquot); } spin_unlock(&dq_list_lock); } /* Call callback for every active dquot on given filesystem */ int dquot_scan_active(struct super_block *sb, int (*fn)(struct dquot *dquot, unsigned long priv), unsigned long priv) { struct dquot *dquot, *old_dquot = NULL; int ret = 0; WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); spin_lock(&dq_list_lock); list_for_each_entry(dquot, &inuse_list, dq_inuse) { if (!dquot_active(dquot)) continue; if (dquot->dq_sb != sb) continue; /* Now we have active dquot so we can just increase use count */ atomic_inc(&dquot->dq_count); spin_unlock(&dq_list_lock); dqput(old_dquot); old_dquot = dquot; /* * ->release_dquot() can be racing with us. Our reference * protects us from new calls to it so just wait for any * outstanding call and recheck the DQ_ACTIVE_B after that. */ wait_on_dquot(dquot); if (dquot_active(dquot)) { ret = fn(dquot, priv); if (ret < 0) goto out; } spin_lock(&dq_list_lock); /* We are safe to continue now because our dquot could not * be moved out of the inuse list while we hold the reference */ } spin_unlock(&dq_list_lock); out: dqput(old_dquot); return ret; } EXPORT_SYMBOL(dquot_scan_active); static inline int dquot_write_dquot(struct dquot *dquot) { int ret = dquot->dq_sb->dq_op->write_dquot(dquot); if (ret < 0) { quota_error(dquot->dq_sb, "Can't write quota structure " "(error %d). Quota may get out of sync!", ret); /* Clear dirty bit anyway to avoid infinite loop. */ clear_dquot_dirty(dquot); } return ret; } /* Write all dquot structures to quota files */ int dquot_writeback_dquots(struct super_block *sb, int type) { struct list_head dirty; struct dquot *dquot; struct quota_info *dqopt = sb_dqopt(sb); int cnt; int err, ret = 0; WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); flush_delayed_work(&quota_release_work); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; if (!sb_has_quota_active(sb, cnt)) continue; spin_lock(&dq_list_lock); /* Move list away to avoid livelock. */ list_replace_init(&dqopt->info[cnt].dqi_dirty_list, &dirty); while (!list_empty(&dirty)) { dquot = list_first_entry(&dirty, struct dquot, dq_dirty); WARN_ON(!dquot_active(dquot)); /* If the dquot is releasing we should not touch it */ if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) { spin_unlock(&dq_list_lock); flush_delayed_work(&quota_release_work); spin_lock(&dq_list_lock); continue; } /* Now we have active dquot from which someone is * holding reference so we can safely just increase * use count */ dqgrab(dquot); spin_unlock(&dq_list_lock); err = dquot_write_dquot(dquot); if (err && !ret) ret = err; dqput(dquot); spin_lock(&dq_list_lock); } spin_unlock(&dq_list_lock); } for (cnt = 0; cnt < MAXQUOTAS; cnt++) if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt) && info_dirty(&dqopt->info[cnt])) sb->dq_op->write_info(sb, cnt); dqstats_inc(DQST_SYNCS); return ret; } EXPORT_SYMBOL(dquot_writeback_dquots); /* Write all dquot structures to disk and make them visible from userspace */ int dquot_quota_sync(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); int cnt; int ret; ret = dquot_writeback_dquots(sb, type); if (ret) return ret; if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) return 0; /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ if (sb->s_op->sync_fs) { ret = sb->s_op->sync_fs(sb, 1); if (ret) return ret; } ret = sync_blockdev(sb->s_bdev); if (ret) return ret; /* * Now when everything is written we can discard the pagecache so * that userspace sees the changes. */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; if (!sb_has_quota_active(sb, cnt)) continue; inode_lock(dqopt->files[cnt]); truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); inode_unlock(dqopt->files[cnt]); } return 0; } EXPORT_SYMBOL(dquot_quota_sync); static unsigned long dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct dquot *dquot; unsigned long freed = 0; spin_lock(&dq_list_lock); while (!list_empty(&free_dquots) && sc->nr_to_scan) { dquot = list_first_entry(&free_dquots, struct dquot, dq_free); remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); do_destroy_dquot(dquot); sc->nr_to_scan--; freed++; } spin_unlock(&dq_list_lock); return freed; } static unsigned long dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return vfs_pressure_ratio( percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS])); } /* * Safely release dquot and put reference to dquot. */ static void quota_release_workfn(struct work_struct *work) { struct dquot *dquot; struct list_head rls_head; spin_lock(&dq_list_lock); /* Exchange the list head to avoid livelock. */ list_replace_init(&releasing_dquots, &rls_head); spin_unlock(&dq_list_lock); synchronize_srcu(&dquot_srcu); restart: spin_lock(&dq_list_lock); while (!list_empty(&rls_head)) { dquot = list_first_entry(&rls_head, struct dquot, dq_free); WARN_ON_ONCE(atomic_read(&dquot->dq_count)); /* * Note that DQ_RELEASING_B protects us from racing with * invalidate_dquots() calls so we are safe to work with the * dquot even after we drop dq_list_lock. */ if (dquot_dirty(dquot)) { spin_unlock(&dq_list_lock); /* Commit dquot before releasing */ dquot_write_dquot(dquot); goto restart; } if (dquot_active(dquot)) { spin_unlock(&dq_list_lock); dquot->dq_sb->dq_op->release_dquot(dquot); goto restart; } /* Dquot is inactive and clean, now move it to free list */ remove_free_dquot(dquot); put_dquot_last(dquot); } spin_unlock(&dq_list_lock); } /* * Put reference to dquot */ void dqput(struct dquot *dquot) { if (!dquot) return; #ifdef CONFIG_QUOTA_DEBUG if (!atomic_read(&dquot->dq_count)) { quota_error(dquot->dq_sb, "trying to free free dquot of %s %d", quotatypes[dquot->dq_id.type], from_kqid(&init_user_ns, dquot->dq_id)); BUG(); } #endif dqstats_inc(DQST_DROPS); spin_lock(&dq_list_lock); if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); /* Releasing dquot during quotaoff phase? */ if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) && atomic_read(&dquot->dq_count) == 1) wake_up(&dquot_ref_wq); spin_unlock(&dq_list_lock); return; } /* Need to release dquot? */ WARN_ON_ONCE(!list_empty(&dquot->dq_free)); put_releasing_dquots(dquot); atomic_dec(&dquot->dq_count); spin_unlock(&dq_list_lock); queue_delayed_work(system_unbound_wq, &quota_release_work, 1); } EXPORT_SYMBOL(dqput); struct dquot *dquot_alloc(struct super_block *sb, int type) { return kmem_cache_zalloc(dquot_cachep, GFP_NOFS); } EXPORT_SYMBOL(dquot_alloc); static struct dquot *get_empty_dquot(struct super_block *sb, int type) { struct dquot *dquot; dquot = sb->dq_op->alloc_dquot(sb, type); if(!dquot) return NULL; mutex_init(&dquot->dq_lock); INIT_LIST_HEAD(&dquot->dq_free); INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); dquot->dq_sb = sb; dquot->dq_id = make_kqid_invalid(type); atomic_set(&dquot->dq_count, 1); spin_lock_init(&dquot->dq_dqb_lock); return dquot; } /* * Get reference to dquot * * Locking is slightly tricky here. We are guarded from parallel quotaoff() * destroying our dquot by: * a) checking for quota flags under dq_list_lock and * b) getting a reference to dquot before we release dq_list_lock */ struct dquot *dqget(struct super_block *sb, struct kqid qid) { unsigned int hashent = hashfn(sb, qid); struct dquot *dquot, *empty = NULL; if (!qid_has_mapping(sb->s_user_ns, qid)) return ERR_PTR(-EINVAL); if (!sb_has_quota_active(sb, qid.type)) return ERR_PTR(-ESRCH); we_slept: spin_lock(&dq_list_lock); spin_lock(&dq_state_lock); if (!sb_has_quota_active(sb, qid.type)) { spin_unlock(&dq_state_lock); spin_unlock(&dq_list_lock); dquot = ERR_PTR(-ESRCH); goto out; } spin_unlock(&dq_state_lock); dquot = find_dquot(hashent, sb, qid); if (!dquot) { if (!empty) { spin_unlock(&dq_list_lock); empty = get_empty_dquot(sb, qid.type); if (!empty) schedule(); /* Try to wait for a moment... */ goto we_slept; } dquot = empty; empty = NULL; dquot->dq_id = qid; /* all dquots go on the inuse_list */ put_inuse(dquot); /* hash it first so it can be found */ insert_dquot_hash(dquot); spin_unlock(&dq_list_lock); dqstats_inc(DQST_LOOKUPS); } else { if (!atomic_read(&dquot->dq_count)) remove_free_dquot(dquot); atomic_inc(&dquot->dq_count); spin_unlock(&dq_list_lock); dqstats_inc(DQST_CACHE_HITS); dqstats_inc(DQST_LOOKUPS); } /* Wait for dq_lock - after this we know that either dquot_release() is * already finished or it will be canceled due to dq_count > 0 test */ wait_on_dquot(dquot); /* Read the dquot / allocate space in quota file */ if (!dquot_active(dquot)) { int err; err = sb->dq_op->acquire_dquot(dquot); if (err < 0) { dqput(dquot); dquot = ERR_PTR(err); goto out; } } /* * Make sure following reads see filled structure - paired with * smp_mb__before_atomic() in dquot_acquire(). */ smp_rmb(); /* Has somebody invalidated entry under us? */ WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); out: if (empty) do_destroy_dquot(empty); return dquot; } EXPORT_SYMBOL(dqget); static inline struct dquot __rcu **i_dquot(struct inode *inode) { return inode->i_sb->s_op->get_dquots(inode); } static int dqinit_needed(struct inode *inode, int type) { struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) return 0; dquots = i_dquot(inode); if (type != -1) return !dquots[type]; for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (!dquots[cnt]) return 1; return 0; } /* This routine is guarded by s_umount semaphore */ static int add_dquot_ref(struct super_block *sb, int type) { struct inode *inode, *old_inode = NULL; #ifdef CONFIG_QUOTA_DEBUG int reserved = 0; #endif int err = 0; spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { spin_lock(&inode->i_lock); if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || !atomic_read(&inode->i_writecount) || !dqinit_needed(inode, type)) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&sb->s_inode_list_lock); #ifdef CONFIG_QUOTA_DEBUG if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; #endif iput(old_inode); err = __dquot_initialize(inode, type); if (err) { iput(inode); goto out; } /* * We hold a reference to 'inode' so it couldn't have been * removed from s_inodes list while we dropped the * s_inode_list_lock. We cannot iput the inode now as we can be * holding the last reference and we cannot iput it under * s_inode_list_lock. So we keep the reference and iput it * later. */ old_inode = inode; cond_resched(); spin_lock(&sb->s_inode_list_lock); } spin_unlock(&sb->s_inode_list_lock); iput(old_inode); out: #ifdef CONFIG_QUOTA_DEBUG if (reserved) { quota_error(sb, "Writes happened before quota was turned on " "thus quota information is probably inconsistent. " "Please run quotacheck(8)"); } #endif return err; } static void remove_dquot_ref(struct super_block *sb, int type) { struct inode *inode; #ifdef CONFIG_QUOTA_DEBUG int reserved = 0; #endif spin_lock(&sb->s_inode_list_lock); list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { /* * We have to scan also I_NEW inodes because they can already * have quota pointer initialized. Luckily, we need to touch * only quota pointers and these have separate locking * (dq_data_lock). */ spin_lock(&dq_data_lock); if (!IS_NOQUOTA(inode)) { struct dquot __rcu **dquots = i_dquot(inode); struct dquot *dquot = srcu_dereference_check( dquots[type], &dquot_srcu, lockdep_is_held(&dq_data_lock)); #ifdef CONFIG_QUOTA_DEBUG if (unlikely(inode_get_rsv_space(inode) > 0)) reserved = 1; #endif rcu_assign_pointer(dquots[type], NULL); if (dquot) dqput(dquot); } spin_unlock(&dq_data_lock); } spin_unlock(&sb->s_inode_list_lock); #ifdef CONFIG_QUOTA_DEBUG if (reserved) { printk(KERN_WARNING "VFS (%s): Writes happened after quota" " was disabled thus quota information is probably " "inconsistent. Please run quotacheck(8).\n", sb->s_id); } #endif } /* Gather all references from inodes and drop them */ static void drop_dquot_ref(struct super_block *sb, int type) { if (sb->dq_op) remove_dquot_ref(sb, type); } static inline void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) { if (dquot->dq_dqb.dqb_rsvspace >= number) dquot->dq_dqb.dqb_rsvspace -= number; else { WARN_ON_ONCE(1); dquot->dq_dqb.dqb_rsvspace = 0; } if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= dquot->dq_dqb.dqb_bsoftlimit) dquot->dq_dqb.dqb_btime = (time64_t) 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); } static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curinodes >= number) dquot->dq_dqb.dqb_curinodes -= number; else dquot->dq_dqb.dqb_curinodes = 0; if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit) dquot->dq_dqb.dqb_itime = (time64_t) 0; clear_bit(DQ_INODES_B, &dquot->dq_flags); } static void dquot_decr_space(struct dquot *dquot, qsize_t number) { if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE || dquot->dq_dqb.dqb_curspace >= number) dquot->dq_dqb.dqb_curspace -= number; else dquot->dq_dqb.dqb_curspace = 0; if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <= dquot->dq_dqb.dqb_bsoftlimit) dquot->dq_dqb.dqb_btime = (time64_t) 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); } struct dquot_warn { struct super_block *w_sb; struct kqid w_dq_id; short w_type; }; static int warning_issued(struct dquot *dquot, const int warntype) { int flag = (warntype == QUOTA_NL_BHARDWARN || warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B : ((warntype == QUOTA_NL_IHARDWARN || warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0); if (!flag) return 0; return test_and_set_bit(flag, &dquot->dq_flags); } #ifdef CONFIG_PRINT_QUOTA_WARNING static int flag_print_warnings = 1; static int need_print_warning(struct dquot_warn *warn) { if (!flag_print_warnings) return 0; switch (warn->w_dq_id.type) { case USRQUOTA: return uid_eq(current_fsuid(), warn->w_dq_id.uid); case GRPQUOTA: return in_group_p(warn->w_dq_id.gid); case PRJQUOTA: return 1; } return 0; } /* Print warning to user which exceeded quota */ static void print_warning(struct dquot_warn *warn) { char *msg = NULL; struct tty_struct *tty; int warntype = warn->w_type; if (warntype == QUOTA_NL_IHARDBELOW || warntype == QUOTA_NL_ISOFTBELOW || warntype == QUOTA_NL_BHARDBELOW || warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn)) return; tty = get_current_tty(); if (!tty) return; tty_write_message(tty, warn->w_sb->s_id); if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN) tty_write_message(tty, ": warning, "); else tty_write_message(tty, ": write failed, "); tty_write_message(tty, quotatypes[warn->w_dq_id.type]); switch (warntype) { case QUOTA_NL_IHARDWARN: msg = " file limit reached.\r\n"; break; case QUOTA_NL_ISOFTLONGWARN: msg = " file quota exceeded too long.\r\n"; break; case QUOTA_NL_ISOFTWARN: msg = " file quota exceeded.\r\n"; break; case QUOTA_NL_BHARDWARN: msg = " block limit reached.\r\n"; break; case QUOTA_NL_BSOFTLONGWARN: msg = " block quota exceeded too long.\r\n"; break; case QUOTA_NL_BSOFTWARN: msg = " block quota exceeded.\r\n"; break; } tty_write_message(tty, msg); tty_kref_put(tty); } #endif static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot, int warntype) { if (warning_issued(dquot, warntype)) return; warn->w_type = warntype; warn->w_sb = dquot->dq_sb; warn->w_dq_id = dquot->dq_id; } /* * Write warnings to the console and send warning messages over netlink. * * Note that this function can call into tty and networking code. */ static void flush_warnings(struct dquot_warn *warn) { int i; for (i = 0; i < MAXQUOTAS; i++) { if (warn[i].w_type == QUOTA_NL_NOWARN) continue; #ifdef CONFIG_PRINT_QUOTA_WARNING print_warning(&warn[i]); #endif quota_send_warning(warn[i].w_dq_id, warn[i].w_sb->s_dev, warn[i].w_type); } } static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; return capable(CAP_SYS_RESOURCE) && (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & DQF_ROOT_SQUASH)); } static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes, struct dquot_warn *warn) { qsize_t newinodes; int ret = 0; spin_lock(&dquot->dq_dqb_lock); newinodes = dquot->dq_dqb.dqb_curinodes + inodes; if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) goto add; if (dquot->dq_dqb.dqb_ihardlimit && newinodes > dquot->dq_dqb.dqb_ihardlimit && !ignore_hardlimit(dquot)) { prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN); ret = -EDQUOT; goto out; } if (dquot->dq_dqb.dqb_isoftlimit && newinodes > dquot->dq_dqb.dqb_isoftlimit && dquot->dq_dqb.dqb_itime && ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime && !ignore_hardlimit(dquot)) { prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN); ret = -EDQUOT; goto out; } if (dquot->dq_dqb.dqb_isoftlimit && newinodes > dquot->dq_dqb.dqb_isoftlimit && dquot->dq_dqb.dqb_itime == 0) { prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN); dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace; } add: dquot->dq_dqb.dqb_curinodes = newinodes; out: spin_unlock(&dquot->dq_dqb_lock); return ret; } static int dquot_add_space(struct dquot *dquot, qsize_t space, qsize_t rsv_space, unsigned int flags, struct dquot_warn *warn) { qsize_t tspace; struct super_block *sb = dquot->dq_sb; int ret = 0; spin_lock(&dquot->dq_dqb_lock); if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || test_bit(DQ_FAKE_B, &dquot->dq_flags)) goto finish; tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace + space + rsv_space; if (dquot->dq_dqb.dqb_bhardlimit && tspace > dquot->dq_dqb.dqb_bhardlimit && !ignore_hardlimit(dquot)) { if (flags & DQUOT_SPACE_WARN) prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); ret = -EDQUOT; goto finish; } if (dquot->dq_dqb.dqb_bsoftlimit && tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime && ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime && !ignore_hardlimit(dquot)) { if (flags & DQUOT_SPACE_WARN) prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); ret = -EDQUOT; goto finish; } if (dquot->dq_dqb.dqb_bsoftlimit && tspace > dquot->dq_dqb.dqb_bsoftlimit && dquot->dq_dqb.dqb_btime == 0) { if (flags & DQUOT_SPACE_WARN) { prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN); dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() + sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace; } else { /* * We don't allow preallocation to exceed softlimit so exceeding will * be always printed */ ret = -EDQUOT; goto finish; } } finish: /* * We have to be careful and go through warning generation & grace time * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it * only here... */ if (flags & DQUOT_SPACE_NOFAIL) ret = 0; if (!ret) { dquot->dq_dqb.dqb_rsvspace += rsv_space; dquot->dq_dqb.dqb_curspace += space; } spin_unlock(&dquot->dq_dqb_lock); return ret; } static int info_idq_free(struct dquot *dquot, qsize_t inodes) { qsize_t newinodes; if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit || !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type)) return QUOTA_NL_NOWARN; newinodes = dquot->dq_dqb.dqb_curinodes - inodes; if (newinodes <= dquot->dq_dqb.dqb_isoftlimit) return QUOTA_NL_ISOFTBELOW; if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit && newinodes < dquot->dq_dqb.dqb_ihardlimit) return QUOTA_NL_IHARDBELOW; return QUOTA_NL_NOWARN; } static int info_bdq_free(struct dquot *dquot, qsize_t space) { qsize_t tspace; tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace; if (test_bit(DQ_FAKE_B, &dquot->dq_flags) || tspace <= dquot->dq_dqb.dqb_bsoftlimit) return QUOTA_NL_NOWARN; if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit) return QUOTA_NL_BSOFTBELOW; if (tspace >= dquot->dq_dqb.dqb_bhardlimit && tspace - space < dquot->dq_dqb.dqb_bhardlimit) return QUOTA_NL_BHARDBELOW; return QUOTA_NL_NOWARN; } static int inode_quota_active(const struct inode *inode) { struct super_block *sb = inode->i_sb; if (IS_NOQUOTA(inode)) return 0; return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); } /* * Initialize quota pointers in inode * * It is better to call this function outside of any transaction as it * might need a lot of space in journal for dquot structure allocation. */ static int __dquot_initialize(struct inode *inode, int type) { int cnt, init_needed = 0; struct dquot __rcu **dquots; struct dquot *got[MAXQUOTAS] = {}; struct super_block *sb = inode->i_sb; qsize_t rsv; int ret = 0; if (!inode_quota_active(inode)) return 0; dquots = i_dquot(inode); /* First get references to structures we might need. */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { struct kqid qid; kprojid_t projid; int rc; struct dquot *dquot; if (type != -1 && cnt != type) continue; /* * The i_dquot should have been initialized in most cases, * we check it without locking here to avoid unnecessary * dqget()/dqput() calls. */ if (dquots[cnt]) continue; if (!sb_has_quota_active(sb, cnt)) continue; init_needed = 1; switch (cnt) { case USRQUOTA: qid = make_kqid_uid(inode->i_uid); break; case GRPQUOTA: qid = make_kqid_gid(inode->i_gid); break; case PRJQUOTA: rc = inode->i_sb->dq_op->get_projid(inode, &projid); if (rc) continue; qid = make_kqid_projid(projid); break; } dquot = dqget(sb, qid); if (IS_ERR(dquot)) { /* We raced with somebody turning quotas off... */ if (PTR_ERR(dquot) != -ESRCH) { ret = PTR_ERR(dquot); goto out_put; } dquot = NULL; } got[cnt] = dquot; } /* All required i_dquot has been initialized */ if (!init_needed) return 0; spin_lock(&dq_data_lock); if (IS_NOQUOTA(inode)) goto out_lock; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; /* Avoid races with quotaoff() */ if (!sb_has_quota_active(sb, cnt)) continue; /* We could race with quotaon or dqget() could have failed */ if (!got[cnt]) continue; if (!dquots[cnt]) { rcu_assign_pointer(dquots[cnt], got[cnt]); got[cnt] = NULL; /* * Make quota reservation system happy if someone * did a write before quota was turned on */ rsv = inode_get_rsv_space(inode); if (unlikely(rsv)) { struct dquot *dquot = srcu_dereference_check( dquots[cnt], &dquot_srcu, lockdep_is_held(&dq_data_lock)); spin_lock(&inode->i_lock); /* Get reservation again under proper lock */ rsv = __inode_get_rsv_space(inode); spin_lock(&dquot->dq_dqb_lock); dquot->dq_dqb.dqb_rsvspace += rsv; spin_unlock(&dquot->dq_dqb_lock); spin_unlock(&inode->i_lock); } } } out_lock: spin_unlock(&dq_data_lock); out_put: /* Drop unused references */ dqput_all(got); return ret; } int dquot_initialize(struct inode *inode) { return __dquot_initialize(inode, -1); } EXPORT_SYMBOL(dquot_initialize); bool dquot_initialize_needed(struct inode *inode) { struct dquot __rcu **dquots; int i; if (!inode_quota_active(inode)) return false; dquots = i_dquot(inode); for (i = 0; i < MAXQUOTAS; i++) if (!dquots[i] && sb_has_quota_active(inode->i_sb, i)) return true; return false; } EXPORT_SYMBOL(dquot_initialize_needed); /* * Release all quotas referenced by inode. * * This function only be called on inode free or converting * a file to quota file, no other users for the i_dquot in * both cases, so we needn't call synchronize_srcu() after * clearing i_dquot. */ static void __dquot_drop(struct inode *inode) { int cnt; struct dquot __rcu **dquots = i_dquot(inode); struct dquot *put[MAXQUOTAS]; spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, lockdep_is_held(&dq_data_lock)); rcu_assign_pointer(dquots[cnt], NULL); } spin_unlock(&dq_data_lock); dqput_all(put); } void dquot_drop(struct inode *inode) { struct dquot __rcu * const *dquots; int cnt; if (IS_NOQUOTA(inode)) return; /* * Test before calling to rule out calls from proc and such * where we are not allowed to block. Note that this is * actually reliable test even without the lock - the caller * must assure that nobody can come after the DQUOT_DROP and * add quota pointers back anyway. */ dquots = i_dquot(inode); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (dquots[cnt]) break; } if (cnt < MAXQUOTAS) __dquot_drop(inode); } EXPORT_SYMBOL(dquot_drop); /* * inode_reserved_space is managed internally by quota, and protected by * i_lock similar to i_blocks+i_bytes. */ static qsize_t *inode_reserved_space(struct inode * inode) { /* Filesystem must explicitly define it's own method in order to use * quota reservation interface */ BUG_ON(!inode->i_sb->dq_op->get_reserved_space); return inode->i_sb->dq_op->get_reserved_space(inode); } static qsize_t __inode_get_rsv_space(struct inode *inode) { if (!inode->i_sb->dq_op->get_reserved_space) return 0; return *inode_reserved_space(inode); } static qsize_t inode_get_rsv_space(struct inode *inode) { qsize_t ret; if (!inode->i_sb->dq_op->get_reserved_space) return 0; spin_lock(&inode->i_lock); ret = __inode_get_rsv_space(inode); spin_unlock(&inode->i_lock); return ret; } /* * This functions updates i_blocks+i_bytes fields and quota information * (together with appropriate checks). * * NOTE: We absolutely rely on the fact that caller dirties the inode * (usually helpers in quotaops.h care about this) and holds a handle for * the current transaction so that dquot write and inode write go into the * same transaction. */ /* * This operation can block, but only after everything is updated */ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; int reserve = flags & DQUOT_SPACE_RESERVE; struct dquot __rcu **dquots; struct dquot *dquot; if (!inode_quota_active(inode)) { if (reserve) { spin_lock(&inode->i_lock); *inode_reserved_space(inode) += number; spin_unlock(&inode->i_lock); } else { inode_add_bytes(inode, number); } goto out; } for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; if (reserve) { ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); } else { ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); } if (ret) { /* Back out changes we already did */ for (cnt--; cnt >= 0; cnt--) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; spin_lock(&dquot->dq_dqb_lock); if (reserve) dquot_free_reserved_space(dquot, number); else dquot_decr_space(dquot, number); spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); goto out_flush_warn; } } if (reserve) *inode_reserved_space(inode) += number; else __inode_add_bytes(inode, number); spin_unlock(&inode->i_lock); if (reserve) goto out_flush_warn; ret = mark_all_dquot_dirty(dquots); out_flush_warn: srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); out: return ret; } EXPORT_SYMBOL(__dquot_alloc_space); /* * This operation can block, but only after everything is updated */ int dquot_alloc_inode(struct inode *inode) { int cnt, ret = 0, index; struct dquot_warn warn[MAXQUOTAS]; struct dquot __rcu * const *dquots; struct dquot *dquot; if (!inode_quota_active(inode)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; ret = dquot_add_inodes(dquot, 1, &warn[cnt]); if (ret) { for (cnt--; cnt >= 0; cnt--) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; /* Back out changes we already did */ spin_lock(&dquot->dq_dqb_lock); dquot_decr_inodes(dquot, 1); spin_unlock(&dquot->dq_dqb_lock); } goto warn_put_all; } } warn_put_all: spin_unlock(&inode->i_lock); if (ret == 0) ret = mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); return ret; } EXPORT_SYMBOL(dquot_alloc_inode); /* * Convert in-memory reserved quotas to real consumed quotas */ void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) { struct dquot __rcu **dquots; struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { spin_lock(&inode->i_lock); *inode_reserved_space(inode) -= number; __inode_add_bytes(inode, number); spin_unlock(&inode->i_lock); return; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) number = dquot->dq_dqb.dqb_rsvspace; dquot->dq_dqb.dqb_curspace += number; dquot->dq_dqb.dqb_rsvspace -= number; spin_unlock(&dquot->dq_dqb_lock); } } /* Update inode bytes */ *inode_reserved_space(inode) -= number; __inode_add_bytes(inode, number); spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); } EXPORT_SYMBOL(dquot_claim_space_nodirty); /* * Convert allocated space back to in-memory reserved quotas */ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) { struct dquot __rcu **dquots; struct dquot *dquot; int cnt, index; if (!inode_quota_active(inode)) { spin_lock(&inode->i_lock); *inode_reserved_space(inode) += number; __inode_sub_bytes(inode, number); spin_unlock(&inode->i_lock); return; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); /* Claim reserved quotas to allocated quotas */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (dquot) { spin_lock(&dquot->dq_dqb_lock); if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) number = dquot->dq_dqb.dqb_curspace; dquot->dq_dqb.dqb_rsvspace += number; dquot->dq_dqb.dqb_curspace -= number; spin_unlock(&dquot->dq_dqb_lock); } } /* Update inode bytes */ *inode_reserved_space(inode) += number; __inode_sub_bytes(inode, number); spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); } EXPORT_SYMBOL(dquot_reclaim_space_nodirty); /* * This operation can block, but only after everything is updated */ void __dquot_free_space(struct inode *inode, qsize_t number, int flags) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot __rcu **dquots; struct dquot *dquot; int reserve = flags & DQUOT_SPACE_RESERVE, index; if (!inode_quota_active(inode)) { if (reserve) { spin_lock(&inode->i_lock); *inode_reserved_space(inode) -= number; spin_unlock(&inode->i_lock); } else { inode_sub_bytes(inode, number); } return; } dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; spin_lock(&dquot->dq_dqb_lock); wtype = info_bdq_free(dquot, number); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn[cnt], dquot, wtype); if (reserve) dquot_free_reserved_space(dquot, number); else dquot_decr_space(dquot, number); spin_unlock(&dquot->dq_dqb_lock); } if (reserve) *inode_reserved_space(inode) -= number; else __inode_sub_bytes(inode, number); spin_unlock(&inode->i_lock); if (reserve) goto out_unlock; mark_all_dquot_dirty(dquots); out_unlock: srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(__dquot_free_space); /* * This operation can block, but only after everything is updated */ void dquot_free_inode(struct inode *inode) { unsigned int cnt; struct dquot_warn warn[MAXQUOTAS]; struct dquot __rcu * const *dquots; struct dquot *dquot; int index; if (!inode_quota_active(inode)) return; dquots = i_dquot(inode); index = srcu_read_lock(&dquot_srcu); spin_lock(&inode->i_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { int wtype; warn[cnt].w_type = QUOTA_NL_NOWARN; dquot = srcu_dereference(dquots[cnt], &dquot_srcu); if (!dquot) continue; spin_lock(&dquot->dq_dqb_lock); wtype = info_idq_free(dquot, 1); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn[cnt], dquot, wtype); dquot_decr_inodes(dquot, 1); spin_unlock(&dquot->dq_dqb_lock); } spin_unlock(&inode->i_lock); mark_all_dquot_dirty(dquots); srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn); } EXPORT_SYMBOL(dquot_free_inode); /* * Transfer the number of inode and blocks from one diskquota to an other. * On success, dquot references in transfer_to are consumed and references * to original dquots that need to be released are placed there. On failure, * references are kept untouched. * * This operation can block, but only after everything is updated * A transaction must be started when entering this function. * * We are holding reference on transfer_from & transfer_to, no need to * protect them by srcu_read_lock(). */ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) { qsize_t cur_space; qsize_t rsv_space = 0; qsize_t inode_usage = 1; struct dquot __rcu **dquots; struct dquot *transfer_from[MAXQUOTAS] = {}; int cnt, index, ret = 0, err; char is_valid[MAXQUOTAS] = {}; struct dquot_warn warn_to[MAXQUOTAS]; struct dquot_warn warn_from_inodes[MAXQUOTAS]; struct dquot_warn warn_from_space[MAXQUOTAS]; if (IS_NOQUOTA(inode)) return 0; if (inode->i_sb->dq_op->get_inode_usage) { ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage); if (ret) return ret; } /* Initialize the arrays */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { warn_to[cnt].w_type = QUOTA_NL_NOWARN; warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; } spin_lock(&dq_data_lock); spin_lock(&inode->i_lock); if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); return 0; } cur_space = __inode_get_bytes(inode); rsv_space = __inode_get_rsv_space(inode); dquots = i_dquot(inode); /* * Build the transfer_from list, check limits, and update usage in * the target structures. */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { /* * Skip changes for same uid or gid or for turned off quota-type. */ if (!transfer_to[cnt]) continue; /* Avoid races with quotaoff() */ if (!sb_has_quota_active(inode->i_sb, cnt)) continue; is_valid[cnt] = 1; transfer_from[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, lockdep_is_held(&dq_data_lock)); ret = dquot_add_inodes(transfer_to[cnt], inode_usage, &warn_to[cnt]); if (ret) goto over_quota; ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space, DQUOT_SPACE_WARN, &warn_to[cnt]); if (ret) { spin_lock(&transfer_to[cnt]->dq_dqb_lock); dquot_decr_inodes(transfer_to[cnt], inode_usage); spin_unlock(&transfer_to[cnt]->dq_dqb_lock); goto over_quota; } } /* Decrease usage for source structures and update quota pointers */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!is_valid[cnt]) continue; /* Due to IO error we might not have transfer_from[] structure */ if (transfer_from[cnt]) { int wtype; spin_lock(&transfer_from[cnt]->dq_dqb_lock); wtype = info_idq_free(transfer_from[cnt], inode_usage); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn_from_inodes[cnt], transfer_from[cnt], wtype); wtype = info_bdq_free(transfer_from[cnt], cur_space + rsv_space); if (wtype != QUOTA_NL_NOWARN) prepare_warning(&warn_from_space[cnt], transfer_from[cnt], wtype); dquot_decr_inodes(transfer_from[cnt], inode_usage); dquot_decr_space(transfer_from[cnt], cur_space); dquot_free_reserved_space(transfer_from[cnt], rsv_space); spin_unlock(&transfer_from[cnt]->dq_dqb_lock); } rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); } spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); /* * These arrays are local and we hold dquot references so we don't need * the srcu protection but still take dquot_srcu to avoid warning in * mark_all_dquot_dirty(). */ index = srcu_read_lock(&dquot_srcu); err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); if (err < 0) ret = err; err = mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); if (err < 0) ret = err; srcu_read_unlock(&dquot_srcu, index); flush_warnings(warn_to); flush_warnings(warn_from_inodes); flush_warnings(warn_from_space); /* Pass back references to put */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (is_valid[cnt]) transfer_to[cnt] = transfer_from[cnt]; return ret; over_quota: /* Back out changes we already did */ for (cnt--; cnt >= 0; cnt--) { if (!is_valid[cnt]) continue; spin_lock(&transfer_to[cnt]->dq_dqb_lock); dquot_decr_inodes(transfer_to[cnt], inode_usage); dquot_decr_space(transfer_to[cnt], cur_space); dquot_free_reserved_space(transfer_to[cnt], rsv_space); spin_unlock(&transfer_to[cnt]->dq_dqb_lock); } spin_unlock(&inode->i_lock); spin_unlock(&dq_data_lock); flush_warnings(warn_to); return ret; } EXPORT_SYMBOL(__dquot_transfer); /* Wrapper for transferring ownership of an inode for uid/gid only * Called from FSXXX_setattr() */ int dquot_transfer(struct mnt_idmap *idmap, struct inode *inode, struct iattr *iattr) { struct dquot *transfer_to[MAXQUOTAS] = {}; struct dquot *dquot; struct super_block *sb = inode->i_sb; int ret; if (!inode_quota_active(inode)) return 0; if (i_uid_needs_update(idmap, iattr, inode)) { kuid_t kuid = from_vfsuid(idmap, i_user_ns(inode), iattr->ia_vfsuid); dquot = dqget(sb, make_kqid_uid(kuid)); if (IS_ERR(dquot)) { if (PTR_ERR(dquot) != -ESRCH) { ret = PTR_ERR(dquot); goto out_put; } dquot = NULL; } transfer_to[USRQUOTA] = dquot; } if (i_gid_needs_update(idmap, iattr, inode)) { kgid_t kgid = from_vfsgid(idmap, i_user_ns(inode), iattr->ia_vfsgid); dquot = dqget(sb, make_kqid_gid(kgid)); if (IS_ERR(dquot)) { if (PTR_ERR(dquot) != -ESRCH) { ret = PTR_ERR(dquot); goto out_put; } dquot = NULL; } transfer_to[GRPQUOTA] = dquot; } ret = __dquot_transfer(inode, transfer_to); out_put: dqput_all(transfer_to); return ret; } EXPORT_SYMBOL(dquot_transfer); /* * Write info of quota file to disk */ int dquot_commit_info(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); return dqopt->ops[type]->write_file_info(sb, type); } EXPORT_SYMBOL(dquot_commit_info); int dquot_get_next_id(struct super_block *sb, struct kqid *qid) { struct quota_info *dqopt = sb_dqopt(sb); if (!sb_has_quota_active(sb, qid->type)) return -ESRCH; if (!dqopt->ops[qid->type]->get_next_id) return -ENOSYS; return dqopt->ops[qid->type]->get_next_id(sb, qid); } EXPORT_SYMBOL(dquot_get_next_id); /* * Definitions of diskquota operations. */ const struct dquot_operations dquot_operations = { .write_dquot = dquot_commit, .acquire_dquot = dquot_acquire, .release_dquot = dquot_release, .mark_dirty = dquot_mark_dquot_dirty, .write_info = dquot_commit_info, .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, .get_next_id = dquot_get_next_id, }; EXPORT_SYMBOL(dquot_operations); /* * Generic helper for ->open on filesystems supporting disk quotas. */ int dquot_file_open(struct inode *inode, struct file *file) { int error; error = generic_file_open(inode, file); if (!error && (file->f_mode & FMODE_WRITE)) error = dquot_initialize(inode); return error; } EXPORT_SYMBOL(dquot_file_open); static void vfs_cleanup_quota_inode(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); struct inode *inode = dqopt->files[type]; if (!inode) return; if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { inode_lock(inode); inode->i_flags &= ~S_NOQUOTA; inode_unlock(inode); } dqopt->files[type] = NULL; iput(inode); } /* * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) */ int dquot_disable(struct super_block *sb, int type, unsigned int flags) { int cnt; struct quota_info *dqopt = sb_dqopt(sb); rwsem_assert_held_write(&sb->s_umount); /* Cannot turn off usage accounting without turning off limits, or * suspend quotas and simultaneously turn quotas off. */ if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED)) || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED | DQUOT_USAGE_ENABLED))) return -EINVAL; /* * Skip everything if there's nothing to do. We have to do this because * sometimes we are called when fill_super() failed and calling * sync_fs() in such cases does no good. */ if (!sb_any_quota_loaded(sb)) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; if (!sb_has_quota_loaded(sb, cnt)) continue; if (flags & DQUOT_SUSPENDED) { spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(DQUOT_SUSPENDED, cnt); spin_unlock(&dq_state_lock); } else { spin_lock(&dq_state_lock); dqopt->flags &= ~dquot_state_flag(flags, cnt); /* Turning off suspended quotas? */ if (!sb_has_quota_loaded(sb, cnt) && sb_has_quota_suspended(sb, cnt)) { dqopt->flags &= ~dquot_state_flag( DQUOT_SUSPENDED, cnt); spin_unlock(&dq_state_lock); vfs_cleanup_quota_inode(sb, cnt); continue; } spin_unlock(&dq_state_lock); } /* We still have to keep quota loaded? */ if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED)) continue; /* Note: these are blocking operations */ drop_dquot_ref(sb, cnt); invalidate_dquots(sb, cnt); /* * Now all dquots should be invalidated, all writes done so we * should be only users of the info. No locks needed. */ if (info_dirty(&dqopt->info[cnt])) sb->dq_op->write_info(sb, cnt); if (dqopt->ops[cnt]->free_file_info) dqopt->ops[cnt]->free_file_info(sb, cnt); put_quota_format(dqopt->info[cnt].dqi_format); dqopt->info[cnt].dqi_flags = 0; dqopt->info[cnt].dqi_igrace = 0; dqopt->info[cnt].dqi_bgrace = 0; dqopt->ops[cnt] = NULL; } /* Skip syncing and setting flags if quota files are hidden */ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) goto put_inodes; /* Sync the superblock so that buffers with quota data are written to * disk (and so userspace sees correct data afterwards). */ if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, 1); sync_blockdev(sb->s_bdev); /* Now the quota files are just ordinary files and we can set the * inode flags back. Moreover we discard the pagecache so that * userspace sees the writes we did bypassing the pagecache. We * must also discard the blockdev buffers so that we see the * changes done by userspace on the next quotaon() */ for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (!sb_has_quota_loaded(sb, cnt) && dqopt->files[cnt]) { inode_lock(dqopt->files[cnt]); truncate_inode_pages(&dqopt->files[cnt]->i_data, 0); inode_unlock(dqopt->files[cnt]); } if (sb->s_bdev) invalidate_bdev(sb->s_bdev); put_inodes: /* We are done when suspending quotas */ if (flags & DQUOT_SUSPENDED) return 0; for (cnt = 0; cnt < MAXQUOTAS; cnt++) if (!sb_has_quota_loaded(sb, cnt)) vfs_cleanup_quota_inode(sb, cnt); return 0; } EXPORT_SYMBOL(dquot_disable); int dquot_quota_off(struct super_block *sb, int type) { return dquot_disable(sb, type, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); } EXPORT_SYMBOL(dquot_quota_off); /* * Turn quotas on on a device */ static int vfs_setup_quota_inode(struct inode *inode, int type) { struct super_block *sb = inode->i_sb; struct quota_info *dqopt = sb_dqopt(sb); if (is_bad_inode(inode)) return -EUCLEAN; if (!S_ISREG(inode->i_mode)) return -EACCES; if (IS_RDONLY(inode)) return -EROFS; if (sb_has_quota_loaded(sb, type)) return -EBUSY; /* * Quota files should never be encrypted. They should be thought of as * filesystem metadata, not user data. New-style internal quota files * cannot be encrypted by users anyway, but old-style external quota * files could potentially be incorrectly created in an encrypted * directory, hence this explicit check. Some reasons why encrypted * quota files don't work include: (1) some filesystems that support * encryption don't handle it in their quota_read and quota_write, and * (2) cleaning up encrypted quota files at unmount would need special * consideration, as quota files are cleaned up later than user files. */ if (IS_ENCRYPTED(inode)) return -EINVAL; dqopt->files[type] = igrab(inode); if (!dqopt->files[type]) return -EIO; if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { /* We don't want quota and atime on quota files (deadlocks * possible) Also nobody should write to the file - we use * special IO operations which ignore the immutable bit. */ inode_lock(inode); inode->i_flags |= S_NOQUOTA; inode_unlock(inode); /* * When S_NOQUOTA is set, remove dquot references as no more * references can be added */ __dquot_drop(inode); } return 0; } int dquot_load_quota_sb(struct super_block *sb, int type, int format_id, unsigned int flags) { struct quota_format_type *fmt; struct quota_info *dqopt = sb_dqopt(sb); int error; lockdep_assert_held_write(&sb->s_umount); /* Just unsuspend quotas? */ if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) return -EINVAL; fmt = find_quota_format(format_id); if (!fmt) return -ESRCH; if (!sb->dq_op || !sb->s_qcop || (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) { error = -EINVAL; goto out_fmt; } /* Filesystems outside of init_user_ns not yet supported */ if (sb->s_user_ns != &init_user_ns) { error = -EINVAL; goto out_fmt; } /* Usage always has to be set... */ if (!(flags & DQUOT_USAGE_ENABLED)) { error = -EINVAL; goto out_fmt; } if (sb_has_quota_loaded(sb, type)) { error = -EBUSY; goto out_fmt; } if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { /* As we bypass the pagecache we must now flush all the * dirty data and invalidate caches so that kernel sees * changes from userspace. It is not enough to just flush * the quota file since if blocksize < pagesize, invalidation * of the cache could fail because of other unrelated dirty * data */ sync_filesystem(sb); invalidate_bdev(sb->s_bdev); } error = -EINVAL; if (!fmt->qf_ops->check_quota_file(sb, type)) goto out_fmt; dqopt->ops[type] = fmt->qf_ops; dqopt->info[type].dqi_format = fmt; dqopt->info[type].dqi_fmt_id = format_id; INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list); error = dqopt->ops[type]->read_file_info(sb, type); if (error < 0) goto out_fmt; if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) { spin_lock(&dq_data_lock); dqopt->info[type].dqi_flags |= DQF_SYS_FILE; spin_unlock(&dq_data_lock); } spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(flags, type); spin_unlock(&dq_state_lock); error = add_dquot_ref(sb, type); if (error) dquot_disable(sb, type, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); return error; out_fmt: put_quota_format(fmt); return error; } EXPORT_SYMBOL(dquot_load_quota_sb); /* * More powerful function for turning on quotas on given quota inode allowing * setting of individual quota flags */ int dquot_load_quota_inode(struct inode *inode, int type, int format_id, unsigned int flags) { int err; err = vfs_setup_quota_inode(inode, type); if (err < 0) return err; err = dquot_load_quota_sb(inode->i_sb, type, format_id, flags); if (err < 0) vfs_cleanup_quota_inode(inode->i_sb, type); return err; } EXPORT_SYMBOL(dquot_load_quota_inode); /* Reenable quotas on remount RW */ int dquot_resume(struct super_block *sb, int type) { struct quota_info *dqopt = sb_dqopt(sb); int ret = 0, cnt; unsigned int flags; rwsem_assert_held_write(&sb->s_umount); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; if (!sb_has_quota_suspended(sb, cnt)) continue; spin_lock(&dq_state_lock); flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED, cnt); dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt); spin_unlock(&dq_state_lock); flags = dquot_generic_flag(flags, cnt); ret = dquot_load_quota_sb(sb, cnt, dqopt->info[cnt].dqi_fmt_id, flags); if (ret < 0) vfs_cleanup_quota_inode(sb, cnt); } return ret; } EXPORT_SYMBOL(dquot_resume); int dquot_quota_on(struct super_block *sb, int type, int format_id, const struct path *path) { int error = security_quota_on(path->dentry); if (error) return error; /* Quota file not on the same filesystem? */ if (path->dentry->d_sb != sb) error = -EXDEV; else error = dquot_load_quota_inode(d_inode(path->dentry), type, format_id, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); return error; } EXPORT_SYMBOL(dquot_quota_on); /* * This function is used when filesystem needs to initialize quotas * during mount time. */ int dquot_quota_on_mount(struct super_block *sb, char *qf_name, int format_id, int type) { struct dentry *dentry; int error; dentry = lookup_positive_unlocked(qf_name, sb->s_root, strlen(qf_name)); if (IS_ERR(dentry)) return PTR_ERR(dentry); error = security_quota_on(dentry); if (!error) error = dquot_load_quota_inode(d_inode(dentry), type, format_id, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); dput(dentry); return error; } EXPORT_SYMBOL(dquot_quota_on_mount); static int dquot_quota_enable(struct super_block *sb, unsigned int flags) { int ret; int type; struct quota_info *dqopt = sb_dqopt(sb); if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) return -ENOSYS; /* Accounting cannot be turned on while fs is mounted */ flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT); if (!flags) return -EINVAL; for (type = 0; type < MAXQUOTAS; type++) { if (!(flags & qtype_enforce_flag(type))) continue; /* Can't enforce without accounting */ if (!sb_has_quota_usage_enabled(sb, type)) { ret = -EINVAL; goto out_err; } if (sb_has_quota_limits_enabled(sb, type)) { /* compatible with XFS */ ret = -EEXIST; goto out_err; } spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type); spin_unlock(&dq_state_lock); } return 0; out_err: /* Backout enforcement enablement we already did */ for (type--; type >= 0; type--) { if (flags & qtype_enforce_flag(type)) dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); } return ret; } static int dquot_quota_disable(struct super_block *sb, unsigned int flags) { int ret; int type; struct quota_info *dqopt = sb_dqopt(sb); if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) return -ENOSYS; /* * We don't support turning off accounting via quotactl. In principle * quota infrastructure can do this but filesystems don't expect * userspace to be able to do it. */ if (flags & (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT)) return -EOPNOTSUPP; /* Filter out limits not enabled */ for (type = 0; type < MAXQUOTAS; type++) if (!sb_has_quota_limits_enabled(sb, type)) flags &= ~qtype_enforce_flag(type); /* Nothing left? */ if (!flags) return -EEXIST; for (type = 0; type < MAXQUOTAS; type++) { if (flags & qtype_enforce_flag(type)) { ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED); if (ret < 0) goto out_err; } } return 0; out_err: /* Backout enforcement disabling we already did */ for (type--; type >= 0; type--) { if (flags & qtype_enforce_flag(type)) { spin_lock(&dq_state_lock); dqopt->flags |= dquot_state_flag(DQUOT_LIMITS_ENABLED, type); spin_unlock(&dq_state_lock); } } return ret; } /* Generic routine for getting common part of quota structure */ static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di) { struct mem_dqblk *dm = &dquot->dq_dqb; memset(di, 0, sizeof(*di)); spin_lock(&dquot->dq_dqb_lock); di->d_spc_hardlimit = dm->dqb_bhardlimit; di->d_spc_softlimit = dm->dqb_bsoftlimit; di->d_ino_hardlimit = dm->dqb_ihardlimit; di->d_ino_softlimit = dm->dqb_isoftlimit; di->d_space = dm->dqb_curspace + dm->dqb_rsvspace; di->d_ino_count = dm->dqb_curinodes; di->d_spc_timer = dm->dqb_btime; di->d_ino_timer = dm->dqb_itime; spin_unlock(&dquot->dq_dqb_lock); } int dquot_get_dqblk(struct super_block *sb, struct kqid qid, struct qc_dqblk *di) { struct dquot *dquot; dquot = dqget(sb, qid); if (IS_ERR(dquot)) return PTR_ERR(dquot); do_get_dqblk(dquot, di); dqput(dquot); return 0; } EXPORT_SYMBOL(dquot_get_dqblk); int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid, struct qc_dqblk *di) { struct dquot *dquot; int err; if (!sb->dq_op->get_next_id) return -ENOSYS; err = sb->dq_op->get_next_id(sb, qid); if (err < 0) return err; dquot = dqget(sb, *qid); if (IS_ERR(dquot)) return PTR_ERR(dquot); do_get_dqblk(dquot, di); dqput(dquot); return 0; } EXPORT_SYMBOL(dquot_get_next_dqblk); #define VFS_QC_MASK \ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \ QC_SPC_TIMER | QC_INO_TIMER) /* Generic routine for setting common part of quota structure */ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di) { struct mem_dqblk *dm = &dquot->dq_dqb; int check_blim = 0, check_ilim = 0; struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; int ret; if (di->d_fieldmask & ~VFS_QC_MASK) return -EINVAL; if (((di->d_fieldmask & QC_SPC_SOFT) && di->d_spc_softlimit > dqi->dqi_max_spc_limit) || ((di->d_fieldmask & QC_SPC_HARD) && di->d_spc_hardlimit > dqi->dqi_max_spc_limit) || ((di->d_fieldmask & QC_INO_SOFT) && (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) || ((di->d_fieldmask & QC_INO_HARD) && (di->d_ino_hardlimit > dqi->dqi_max_ino_limit))) return -ERANGE; spin_lock(&dquot->dq_dqb_lock); if (di->d_fieldmask & QC_SPACE) { dm->dqb_curspace = di->d_space - dm->dqb_rsvspace; check_blim = 1; set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); } if (di->d_fieldmask & QC_SPC_SOFT) dm->dqb_bsoftlimit = di->d_spc_softlimit; if (di->d_fieldmask & QC_SPC_HARD) dm->dqb_bhardlimit = di->d_spc_hardlimit; if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) { check_blim = 1; set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); } if (di->d_fieldmask & QC_INO_COUNT) { dm->dqb_curinodes = di->d_ino_count; check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); } if (di->d_fieldmask & QC_INO_SOFT) dm->dqb_isoftlimit = di->d_ino_softlimit; if (di->d_fieldmask & QC_INO_HARD) dm->dqb_ihardlimit = di->d_ino_hardlimit; if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) { check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); } if (di->d_fieldmask & QC_SPC_TIMER) { dm->dqb_btime = di->d_spc_timer; check_blim = 1; set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); } if (di->d_fieldmask & QC_INO_TIMER) { dm->dqb_itime = di->d_ino_timer; check_ilim = 1; set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); } if (check_blim) { if (!dm->dqb_bsoftlimit || dm->dqb_curspace + dm->dqb_rsvspace <= dm->dqb_bsoftlimit) { dm->dqb_btime = 0; clear_bit(DQ_BLKS_B, &dquot->dq_flags); } else if (!(di->d_fieldmask & QC_SPC_TIMER)) /* Set grace only if user hasn't provided his own... */ dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace; } if (check_ilim) { if (!dm->dqb_isoftlimit || dm->dqb_curinodes <= dm->dqb_isoftlimit) { dm->dqb_itime = 0; clear_bit(DQ_INODES_B, &dquot->dq_flags); } else if (!(di->d_fieldmask & QC_INO_TIMER)) /* Set grace only if user hasn't provided his own... */ dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace; } if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit) clear_bit(DQ_FAKE_B, &dquot->dq_flags); else set_bit(DQ_FAKE_B, &dquot->dq_flags); spin_unlock(&dquot->dq_dqb_lock); ret = mark_dquot_dirty(dquot); if (ret < 0) return ret; return 0; } int dquot_set_dqblk(struct super_block *sb, struct kqid qid, struct qc_dqblk *di) { struct dquot *dquot; int rc; dquot = dqget(sb, qid); if (IS_ERR(dquot)) { rc = PTR_ERR(dquot); goto out; } rc = do_set_dqblk(dquot, di); dqput(dquot); out: return rc; } EXPORT_SYMBOL(dquot_set_dqblk); /* Generic routine for getting common part of quota file information */ int dquot_get_state(struct super_block *sb, struct qc_state *state) { struct mem_dqinfo *mi; struct qc_type_state *tstate; struct quota_info *dqopt = sb_dqopt(sb); int type; memset(state, 0, sizeof(*state)); for (type = 0; type < MAXQUOTAS; type++) { if (!sb_has_quota_active(sb, type)) continue; tstate = state->s_state + type; mi = sb_dqopt(sb)->info + type; tstate->flags = QCI_ACCT_ENABLED; spin_lock(&dq_data_lock); if (mi->dqi_flags & DQF_SYS_FILE) tstate->flags |= QCI_SYSFILE; if (mi->dqi_flags & DQF_ROOT_SQUASH) tstate->flags |= QCI_ROOT_SQUASH; if (sb_has_quota_limits_enabled(sb, type)) tstate->flags |= QCI_LIMITS_ENFORCED; tstate->spc_timelimit = mi->dqi_bgrace; tstate->ino_timelimit = mi->dqi_igrace; if (dqopt->files[type]) { tstate->ino = dqopt->files[type]->i_ino; tstate->blocks = dqopt->files[type]->i_blocks; } tstate->nextents = 1; /* We don't know... */ spin_unlock(&dq_data_lock); } return 0; } EXPORT_SYMBOL(dquot_get_state); /* Generic routine for setting common part of quota file information */ int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii) { struct mem_dqinfo *mi; if ((ii->i_fieldmask & QC_WARNS_MASK) || (ii->i_fieldmask & QC_RT_SPC_TIMER)) return -EINVAL; if (!sb_has_quota_active(sb, type)) return -ESRCH; mi = sb_dqopt(sb)->info + type; if (ii->i_fieldmask & QC_FLAGS) { if ((ii->i_flags & QCI_ROOT_SQUASH && mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD)) return -EINVAL; } spin_lock(&dq_data_lock); if (ii->i_fieldmask & QC_SPC_TIMER) mi->dqi_bgrace = ii->i_spc_timelimit; if (ii->i_fieldmask & QC_INO_TIMER) mi->dqi_igrace = ii->i_ino_timelimit; if (ii->i_fieldmask & QC_FLAGS) { if (ii->i_flags & QCI_ROOT_SQUASH) mi->dqi_flags |= DQF_ROOT_SQUASH; else mi->dqi_flags &= ~DQF_ROOT_SQUASH; } spin_unlock(&dq_data_lock); mark_info_dirty(sb, type); /* Force write to disk */ return sb->dq_op->write_info(sb, type); } EXPORT_SYMBOL(dquot_set_dqinfo); const struct quotactl_ops dquot_quotactl_sysfile_ops = { .quota_enable = dquot_quota_enable, .quota_disable = dquot_quota_disable, .quota_sync = dquot_quota_sync, .get_state = dquot_get_state, .set_info = dquot_set_dqinfo, .get_dqblk = dquot_get_dqblk, .get_nextdqblk = dquot_get_next_dqblk, .set_dqblk = dquot_set_dqblk }; EXPORT_SYMBOL(dquot_quotactl_sysfile_ops); static int do_proc_dqstats(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned int type = (unsigned long *)table->data - dqstats.stat; s64 value = percpu_counter_sum(&dqstats.counter[type]); /* Filter negative values for non-monotonic counters */ if (value < 0 && (type == DQST_ALLOC_DQUOTS || type == DQST_FREE_DQUOTS)) value = 0; /* Update global table */ dqstats.stat[type] = value; return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } static const struct ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "drops", .data = &dqstats.stat[DQST_DROPS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "reads", .data = &dqstats.stat[DQST_READS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "writes", .data = &dqstats.stat[DQST_WRITES], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "cache_hits", .data = &dqstats.stat[DQST_CACHE_HITS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "allocated_dquots", .data = &dqstats.stat[DQST_ALLOC_DQUOTS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "free_dquots", .data = &dqstats.stat[DQST_FREE_DQUOTS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, { .procname = "syncs", .data = &dqstats.stat[DQST_SYNCS], .maxlen = sizeof(unsigned long), .mode = 0444, .proc_handler = do_proc_dqstats, }, #ifdef CONFIG_PRINT_QUOTA_WARNING { .procname = "warnings", .data = &flag_print_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif }; static int __init dquot_init(void) { int i, ret; unsigned long nr_hash, order; struct shrinker *dqcache_shrinker; printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__); register_sysctl_init("fs/quota", fs_dqstats_table); dquot_cachep = kmem_cache_create("dquot", sizeof(struct dquot), sizeof(unsigned long) * 4, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_PANIC), NULL); order = 0; dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order); if (!dquot_hash) panic("Cannot create dquot hash table"); ret = percpu_counter_init_many(dqstats.counter, 0, GFP_KERNEL, _DQST_DQSTAT_LAST); if (ret) panic("Cannot create dquot stat counters"); /* Find power-of-two hlist_heads which can fit into allocation */ nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head); dq_hash_bits = ilog2(nr_hash); nr_hash = 1UL << dq_hash_bits; dq_hash_mask = nr_hash - 1; for (i = 0; i < nr_hash; i++) INIT_HLIST_HEAD(dquot_hash + i); pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld," " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order)); dqcache_shrinker = shrinker_alloc(0, "dquota-cache"); if (!dqcache_shrinker) panic("Cannot allocate dquot shrinker"); dqcache_shrinker->count_objects = dqcache_shrink_count; dqcache_shrinker->scan_objects = dqcache_shrink_scan; shrinker_register(dqcache_shrinker); return 0; } fs_initcall(dquot_init);
72 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2006-2007 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_FILESTREAM_H__ #define __XFS_FILESTREAM_H__ struct xfs_mount; struct xfs_inode; struct xfs_bmalloca; struct xfs_alloc_arg; int xfs_filestream_mount(struct xfs_mount *mp); void xfs_filestream_unmount(struct xfs_mount *mp); void xfs_filestream_deassociate(struct xfs_inode *ip); int xfs_filestream_select_ag(struct xfs_bmalloca *ap, struct xfs_alloc_arg *args, xfs_extlen_t *blen); static inline int xfs_inode_is_filestream( struct xfs_inode *ip) { return xfs_has_filestreams(ip->i_mount) || (ip->i_diflags & XFS_DIFLAG_FILESTREAM); } #endif /* __XFS_FILESTREAM_H__ */
4 2 4 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 /* * linux/fs/nls/nls_cp775.c * * Charset cp775 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0106, 0x00fc, 0x00e9, 0x0101, 0x00e4, 0x0123, 0x00e5, 0x0107, 0x0142, 0x0113, 0x0156, 0x0157, 0x012b, 0x0179, 0x00c4, 0x00c5, /* 0x90*/ 0x00c9, 0x00e6, 0x00c6, 0x014d, 0x00f6, 0x0122, 0x00a2, 0x015a, 0x015b, 0x00d6, 0x00dc, 0x00f8, 0x00a3, 0x00d8, 0x00d7, 0x00a4, /* 0xa0*/ 0x0100, 0x012a, 0x00f3, 0x017b, 0x017c, 0x017a, 0x201d, 0x00a6, 0x00a9, 0x00ae, 0x00ac, 0x00bd, 0x00bc, 0x0141, 0x00ab, 0x00bb, /* 0xb0*/ 0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x0104, 0x010c, 0x0118, 0x0116, 0x2563, 0x2551, 0x2557, 0x255d, 0x012e, 0x0160, 0x2510, /* 0xc0*/ 0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x0172, 0x016a, 0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x017d, /* 0xd0*/ 0x0105, 0x010d, 0x0119, 0x0117, 0x012f, 0x0161, 0x0173, 0x016b, 0x017e, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580, /* 0xe0*/ 0x00d3, 0x00df, 0x014c, 0x0143, 0x00f5, 0x00d5, 0x00b5, 0x0144, 0x0136, 0x0137, 0x013b, 0x013c, 0x0146, 0x0112, 0x0145, 0x2019, /* 0xf0*/ 0x00ad, 0x00b1, 0x201c, 0x00be, 0x00b6, 0x00a7, 0x00f7, 0x201e, 0x00b0, 0x2219, 0x00b7, 0x00b9, 0x00b3, 0x00b2, 0x25a0, 0x00a0, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xff, 0x00, 0x96, 0x9c, 0x9f, 0x00, 0xa7, 0xf5, /* 0xa0-0xa7 */ 0x00, 0xa8, 0x00, 0xae, 0xaa, 0xf0, 0xa9, 0x00, /* 0xa8-0xaf */ 0xf8, 0xf1, 0xfd, 0xfc, 0x00, 0xe6, 0xf4, 0xfa, /* 0xb0-0xb7 */ 0x00, 0xfb, 0x00, 0xaf, 0xac, 0xab, 0xf3, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x00, /* 0xc0-0xc7 */ 0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xe0, 0x00, 0xe5, 0x99, 0x9e, /* 0xd0-0xd7 */ 0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x84, 0x86, 0x91, 0x00, /* 0xe0-0xe7 */ 0x00, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0xa2, 0x00, 0xe4, 0x94, 0xf6, /* 0xf0-0xf7 */ 0x9b, 0x00, 0x00, 0x00, 0x81, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0xa0, 0x83, 0x00, 0x00, 0xb5, 0xd0, 0x80, 0x87, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xb6, 0xd1, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0xed, 0x89, 0x00, 0x00, 0xb8, 0xd3, /* 0x10-0x17 */ 0xb7, 0xd2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x95, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0xa1, 0x8c, 0x00, 0x00, 0xbd, 0xd4, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xe9, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0xea, 0xeb, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0xad, 0x88, 0xe3, 0xe7, 0xee, 0xec, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x8b, /* 0x50-0x57 */ 0x00, 0x00, 0x97, 0x98, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xbe, 0xd5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0xc7, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8d, 0xa5, 0xa3, 0xa4, 0xcf, 0xd8, 0x00, /* 0x78-0x7f */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xef, 0x00, 0x00, 0xf2, 0xa6, 0xf7, 0x00, /* 0x18-0x1f */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ }; static const unsigned char page25[256] = { 0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0xcd, 0xba, 0x00, 0x00, 0xc9, 0x00, 0x00, 0xbb, /* 0x50-0x57 */ 0x00, 0x00, 0xc8, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x58-0x5f */ 0xcc, 0x00, 0x00, 0xb9, 0x00, 0x00, 0xcb, 0x00, /* 0x60-0x67 */ 0x00, 0xca, 0x00, 0x00, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, page22, NULL, NULL, page25, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8b, 0x8b, 0x8c, 0xa5, 0x84, 0x86, /* 0x88-0x8f */ 0x82, 0x91, 0x91, 0x93, 0x94, 0x85, 0x96, 0x98, /* 0x90-0x97 */ 0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */ 0x83, 0x8c, 0xa2, 0xa4, 0xa4, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0x88, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xd0, 0xd1, 0xd2, /* 0xb0-0xb7 */ 0xd3, 0xb9, 0xba, 0xbb, 0xbc, 0xd4, 0xd5, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xd6, 0xd7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xd8, /* 0xc8-0xcf */ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xa2, 0xe1, 0x93, 0xe7, 0xe4, 0xe4, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe9, 0xe9, 0xeb, 0xeb, 0xec, 0x89, 0xec, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x9a, 0x90, 0xa0, 0x8e, 0x95, 0x8f, 0x80, /* 0x80-0x87 */ 0xad, 0xed, 0x8a, 0x8a, 0xa1, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x92, 0x92, 0xe2, 0x99, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x97, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xe0, 0xa3, 0xa3, 0x8d, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xb5, 0xb6, 0xb7, 0xb8, 0xbd, 0xbe, 0xc6, 0xc7, /* 0xd0-0xd7 */ 0xcf, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe5, 0xe5, 0x00, 0xe3, /* 0xe0-0xe7 */ 0xe8, 0xe8, 0xea, 0xea, 0xee, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "cp775", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_cp775(void) { return register_nls(&table); } static void __exit exit_nls_cp775(void) { unregister_nls(&table); } module_init(init_nls_cp775) module_exit(exit_nls_cp775) MODULE_DESCRIPTION("NLS Codepage 775 (Baltic Rim)"); MODULE_LICENSE("Dual BSD/GPL");
34 3 15 4 17 31 207 207 225 179 26 26 225 68 217 225 67 68 2 4 27 61 5 61 4 538 504 77 296 453 437 353 35 63 69 5 45 43 24 33 60 260 245 2 24 9 20 8 10 129 128 36 36 30 3 27 2 1 1 1 1 1 1 1 1 1 1 146 114 7 140 2 114 32 121 28 14 14 14 1 2 2 9 5 2 2 1 2 2 2 1 2 19 19 19 19 19 19 5 8 11 2 8 9 9 9 9 9 17 37 21 17 35 35 12 34 35 4 4 21 6 15 1 11 1 10 1 9 128 35 2 3 125 114 2 24 73 5 2 66 15 58 23 21 16 71 28 13 69 67 6 3 2 32 12 26 26 7 25 8 26 1 5 26 26 45 45 23 44 13 43 39 22 14 1 1 7 1 12 1 1 1 4 5 9 13 13 1 9 1 2 1 3 1 1 8 1 7 2 1 5 6 2 1 1 1 1 1 3 2 1 1 2 2 2 2 2 2 2 18 18 1 1 1 1 14 6 8 1 7 7 2 5 1 4 7 7 7 18 21 21 21 1 2 16 3 18 18 11 12 3 3 18 1 3 17 18 18 45 2 6 39 39 5 5 30 30 26 1 1 1 27 28 22 2 23 5 17 1 5 4 4 42 1 2 9 19 16 1 2 39 41 1 1 39 1 208 2 4 15 1 180 44 177 44 8 201 167 40 17 42 197 14 14 4 2 3 11 1 190 13 37 201 194 1 9 9 1 1 5 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 // SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * */ #include <linux/fiemap.h> #include <linux/fs.h> #include <linux/minmax.h> #include <linux/vmalloc.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" #ifdef CONFIG_NTFS3_LZX_XPRESS #include "lib/lib.h" #endif static struct mft_inode *ni_ins_mi(struct ntfs_inode *ni, struct rb_root *tree, CLST ino, struct rb_node *ins) { struct rb_node **p = &tree->rb_node; struct rb_node *pr = NULL; while (*p) { struct mft_inode *mi; pr = *p; mi = rb_entry(pr, struct mft_inode, node); if (mi->rno > ino) p = &pr->rb_left; else if (mi->rno < ino) p = &pr->rb_right; else return mi; } if (!ins) return NULL; rb_link_node(ins, pr, p); rb_insert_color(ins, tree); return rb_entry(ins, struct mft_inode, node); } /* * ni_find_mi - Find mft_inode by record number. */ static struct mft_inode *ni_find_mi(struct ntfs_inode *ni, CLST rno) { return ni_ins_mi(ni, &ni->mi_tree, rno, NULL); } /* * ni_add_mi - Add new mft_inode into ntfs_inode. */ static void ni_add_mi(struct ntfs_inode *ni, struct mft_inode *mi) { ni_ins_mi(ni, &ni->mi_tree, mi->rno, &mi->node); } /* * ni_remove_mi - Remove mft_inode from ntfs_inode. */ void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi) { rb_erase(&mi->node, &ni->mi_tree); } /* * ni_std - Return: Pointer into std_info from primary record. */ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni) { const struct ATTRIB *attr; attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL); return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) : NULL; } /* * ni_std5 * * Return: Pointer into std_info from primary record. */ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni) { const struct ATTRIB *attr; attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL); return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) : NULL; } /* * ni_clear - Clear resources allocated by ntfs_inode. */ void ni_clear(struct ntfs_inode *ni) { struct rb_node *node; if (!ni->vfs_inode.i_nlink && ni->mi.mrec && is_rec_inuse(ni->mi.mrec) && !(ni->mi.sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) ni_delete_all(ni); al_destroy(ni); for (node = rb_first(&ni->mi_tree); node;) { struct rb_node *next = rb_next(node); struct mft_inode *mi = rb_entry(node, struct mft_inode, node); rb_erase(node, &ni->mi_tree); mi_put(mi); node = next; } /* Bad inode always has mode == S_IFREG. */ if (ni->ni_flags & NI_FLAG_DIR) indx_clear(&ni->dir); else { run_close(&ni->file.run); #ifdef CONFIG_NTFS3_LZX_XPRESS if (ni->file.offs_folio) { /* On-demand allocated page for offsets. */ folio_put(ni->file.offs_folio); ni->file.offs_folio = NULL; } #endif } mi_clear(&ni->mi); } /* * ni_load_mi_ex - Find mft_inode by record number. */ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi) { int err; struct mft_inode *r; r = ni_find_mi(ni, rno); if (r) goto out; err = mi_get(ni->mi.sbi, rno, &r); if (err) { _ntfs_bad_inode(&ni->vfs_inode); return err; } ni_add_mi(ni, r); out: if (mi) *mi = r; return 0; } /* * ni_load_mi - Load mft_inode corresponded list_entry. */ int ni_load_mi(struct ntfs_inode *ni, const struct ATTR_LIST_ENTRY *le, struct mft_inode **mi) { CLST rno; if (!le) { *mi = &ni->mi; return 0; } rno = ino_get(&le->ref); if (rno == ni->mi.rno) { *mi = &ni->mi; return 0; } return ni_load_mi_ex(ni, rno, mi); } /* * ni_find_attr * * Return: Attribute and record this attribute belongs to. */ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr, struct ATTR_LIST_ENTRY **le_o, enum ATTR_TYPE type, const __le16 *name, u8 name_len, const CLST *vcn, struct mft_inode **mi) { struct ATTR_LIST_ENTRY *le; struct mft_inode *m; if (!ni->attr_list.size || (!name_len && (type == ATTR_LIST || type == ATTR_STD))) { if (le_o) *le_o = NULL; if (mi) *mi = &ni->mi; /* Look for required attribute in primary record. */ return mi_find_attr(ni, &ni->mi, attr, type, name, name_len, NULL); } /* First look for list entry of required type. */ le = al_find_ex(ni, le_o ? *le_o : NULL, type, name, name_len, vcn); if (!le) return NULL; if (le_o) *le_o = le; /* Load record that contains this attribute. */ if (ni_load_mi(ni, le, &m)) return NULL; /* Look for required attribute. */ attr = mi_find_attr(ni, m, NULL, type, name, name_len, &le->id); if (!attr) goto out; if (!attr->non_res) { if (vcn && *vcn) goto out; } else if (!vcn) { if (attr->nres.svcn) goto out; } else if (le64_to_cpu(attr->nres.svcn) > *vcn || *vcn > le64_to_cpu(attr->nres.evcn)) { goto out; } if (mi) *mi = m; return attr; out: _ntfs_bad_inode(&ni->vfs_inode); return NULL; } /* * ni_enum_attr_ex - Enumerates attributes in ntfs_inode. */ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr, struct ATTR_LIST_ENTRY **le, struct mft_inode **mi) { struct mft_inode *mi2; struct ATTR_LIST_ENTRY *le2; /* Do we have an attribute list? */ if (!ni->attr_list.size) { *le = NULL; if (mi) *mi = &ni->mi; /* Enum attributes in primary record. */ return mi_enum_attr(ni, &ni->mi, attr); } /* Get next list entry. */ le2 = *le = al_enumerate(ni, attr ? *le : NULL); if (!le2) return NULL; /* Load record that contains the required attribute. */ if (ni_load_mi(ni, le2, &mi2)) return NULL; if (mi) *mi = mi2; /* Find attribute in loaded record. */ return rec_find_attr_le(ni, mi2, le2); } /* * ni_load_all_mi - Load all subrecords. */ int ni_load_all_mi(struct ntfs_inode *ni) { int err; struct ATTR_LIST_ENTRY *le; if (!ni->attr_list.size) return 0; le = NULL; while ((le = al_enumerate(ni, le))) { CLST rno = ino_get(&le->ref); if (rno == ni->mi.rno) continue; err = ni_load_mi_ex(ni, rno, NULL); if (err) return err; } return 0; } /* * ni_add_subrecord - Allocate + format + attach a new subrecord. */ bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi) { struct mft_inode *m; m = kzalloc(sizeof(struct mft_inode), GFP_NOFS); if (!m) return false; if (mi_format_new(m, ni->mi.sbi, rno, 0, ni->mi.rno == MFT_REC_MFT)) { mi_put(m); return false; } mi_get_ref(&ni->mi, &m->mrec->parent_ref); ni_add_mi(ni, m); *mi = m; return true; } /* * ni_remove_attr - Remove all attributes for the given type/name/id. */ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, bool base_only, const __le16 *id) { int err; struct ATTRIB *attr; struct ATTR_LIST_ENTRY *le; struct mft_inode *mi; u32 type_in; int diff; if (base_only || type == ATTR_LIST || !ni->attr_list.size) { attr = mi_find_attr(ni, &ni->mi, NULL, type, name, name_len, id); if (!attr) return -ENOENT; mi_remove_attr(ni, &ni->mi, attr); return 0; } type_in = le32_to_cpu(type); le = NULL; for (;;) { le = al_enumerate(ni, le); if (!le) return 0; next_le2: diff = le32_to_cpu(le->type) - type_in; if (diff < 0) continue; if (diff > 0) return 0; if (le->name_len != name_len) continue; if (name_len && memcmp(le_name(le), name, name_len * sizeof(short))) continue; if (id && le->id != *id) continue; err = ni_load_mi(ni, le, &mi); if (err) return err; al_remove_le(ni, le); attr = mi_find_attr(ni, mi, NULL, type, name, name_len, id); if (!attr) return -ENOENT; mi_remove_attr(ni, mi, attr); if (PtrOffset(ni->attr_list.le, le) >= ni->attr_list.size) return 0; goto next_le2; } } /* * ni_ins_new_attr - Insert the attribute into record. * * Return: Not full constructed attribute or NULL if not possible to create. */ static struct ATTRIB * ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi, struct ATTR_LIST_ENTRY *le, enum ATTR_TYPE type, const __le16 *name, u8 name_len, u32 asize, u16 name_off, CLST svcn, struct ATTR_LIST_ENTRY **ins_le) { int err; struct ATTRIB *attr; bool le_added = false; struct MFT_REF ref; mi_get_ref(mi, &ref); if (type != ATTR_LIST && !le && ni->attr_list.size) { err = al_add_le(ni, type, name, name_len, svcn, cpu_to_le16(-1), &ref, &le); if (err) { /* No memory or no space. */ return ERR_PTR(err); } le_added = true; /* * al_add_le -> attr_set_size (list) -> ni_expand_list * which moves some attributes out of primary record * this means that name may point into moved memory * reinit 'name' from le. */ name = le->name; } attr = mi_insert_attr(ni, mi, type, name, name_len, asize, name_off); if (!attr) { if (le_added) al_remove_le(ni, le); return NULL; } if (type == ATTR_LIST) { /* Attr list is not in list entry array. */ goto out; } if (!le) goto out; /* Update ATTRIB Id and record reference. */ le->id = attr->id; ni->attr_list.dirty = true; le->ref = ref; out: if (ins_le) *ins_le = le; return attr; } /* * ni_repack * * Random write access to sparsed or compressed file may result to * not optimized packed runs. * Here is the place to optimize it. */ static int ni_repack(struct ntfs_inode *ni) { #if 1 return 0; #else int err = 0; struct ntfs_sb_info *sbi = ni->mi.sbi; struct mft_inode *mi, *mi_p = NULL; struct ATTRIB *attr = NULL, *attr_p; struct ATTR_LIST_ENTRY *le = NULL, *le_p; CLST alloc = 0; u8 cluster_bits = sbi->cluster_bits; CLST svcn, evcn = 0, svcn_p, evcn_p, next_svcn; u32 roff, rs = sbi->record_size; struct runs_tree run; run_init(&run); while ((attr = ni_enum_attr_ex(ni, attr, &le, &mi))) { if (!attr->non_res) continue; svcn = le64_to_cpu(attr->nres.svcn); if (svcn != le64_to_cpu(le->vcn)) { err = -EINVAL; break; } if (!svcn) { alloc = le64_to_cpu(attr->nres.alloc_size) >> cluster_bits; mi_p = NULL; } else if (svcn != evcn + 1) { err = -EINVAL; break; } evcn = le64_to_cpu(attr->nres.evcn); if (svcn > evcn + 1) { err = -EINVAL; break; } if (!mi_p) { /* Do not try if not enough free space. */ if (le32_to_cpu(mi->mrec->used) + 8 >= rs) continue; /* Do not try if last attribute segment. */ if (evcn + 1 == alloc) continue; run_close(&run); } roff = le16_to_cpu(attr->nres.run_off); if (roff > le32_to_cpu(attr->size)) { err = -EINVAL; break; } err = run_unpack(&run, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), le32_to_cpu(attr->size) - roff); if (err < 0) break; if (!mi_p) { mi_p = mi; attr_p = attr; svcn_p = svcn; evcn_p = evcn; le_p = le; err = 0; continue; } /* * Run contains data from two records: mi_p and mi * Try to pack in one. */ err = mi_pack_runs(mi_p, attr_p, &run, evcn + 1 - svcn_p); if (err) break; next_svcn = le64_to_cpu(attr_p->nres.evcn) + 1; if (next_svcn >= evcn + 1) { /* We can remove this attribute segment. */ al_remove_le(ni, le); mi_remove_attr(NULL, mi, attr); le = le_p; continue; } attr->nres.svcn = le->vcn = cpu_to_le64(next_svcn); mi->dirty = true; ni->attr_list.dirty = true; if (evcn + 1 == alloc) { err = mi_pack_runs(mi, attr, &run, evcn + 1 - next_svcn); if (err) break; mi_p = NULL; } else { mi_p = mi; attr_p = attr; svcn_p = next_svcn; evcn_p = evcn; le_p = le; run_truncate_head(&run, next_svcn); } } if (err) { ntfs_inode_warn(&ni->vfs_inode, "repack problem"); ntfs_set_state(sbi, NTFS_DIRTY_ERROR); /* Pack loaded but not packed runs. */ if (mi_p) mi_pack_runs(mi_p, attr_p, &run, evcn_p + 1 - svcn_p); } run_close(&run); return err; #endif } /* * ni_try_remove_attr_list * * Can we remove attribute list? * Check the case when primary record contains enough space for all attributes. */ static int ni_try_remove_attr_list(struct ntfs_inode *ni) { int err = 0; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr, *attr_list, *attr_ins; struct ATTR_LIST_ENTRY *le; struct mft_inode *mi; u32 asize, free; struct MFT_REF ref; struct MFT_REC *mrec; __le16 id; if (!ni->attr_list.dirty) return 0; err = ni_repack(ni); if (err) return err; attr_list = mi_find_attr(ni, &ni->mi, NULL, ATTR_LIST, NULL, 0, NULL); if (!attr_list) return 0; asize = le32_to_cpu(attr_list->size); /* Free space in primary record without attribute list. */ free = sbi->record_size - le32_to_cpu(ni->mi.mrec->used) + asize; mi_get_ref(&ni->mi, &ref); le = NULL; while ((le = al_enumerate(ni, le))) { if (!memcmp(&le->ref, &ref, sizeof(ref))) continue; if (le->vcn) return 0; mi = ni_find_mi(ni, ino_get(&le->ref)); if (!mi) return 0; attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) return 0; asize = le32_to_cpu(attr->size); if (asize > free) return 0; free -= asize; } /* Make a copy of primary record to restore if error. */ mrec = kmemdup(ni->mi.mrec, sbi->record_size, GFP_NOFS); if (!mrec) return 0; /* Not critical. */ /* It seems that attribute list can be removed from primary record. */ mi_remove_attr(NULL, &ni->mi, attr_list); /* * Repeat the cycle above and copy all attributes to primary record. * Do not remove original attributes from subrecords! * It should be success! */ le = NULL; while ((le = al_enumerate(ni, le))) { if (!memcmp(&le->ref, &ref, sizeof(ref))) continue; mi = ni_find_mi(ni, ino_get(&le->ref)); if (!mi) { /* Should never happened, 'cause already checked. */ goto out; } attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) { /* Should never happened, 'cause already checked. */ goto out; } asize = le32_to_cpu(attr->size); /* Insert into primary record. */ attr_ins = mi_insert_attr(ni, &ni->mi, le->type, le_name(le), le->name_len, asize, le16_to_cpu(attr->name_off)); if (!attr_ins) { /* * No space in primary record (already checked). */ goto out; } /* Copy all except id. */ id = attr_ins->id; memcpy(attr_ins, attr, asize); attr_ins->id = id; } /* * Repeat the cycle above and remove all attributes from subrecords. */ le = NULL; while ((le = al_enumerate(ni, le))) { if (!memcmp(&le->ref, &ref, sizeof(ref))) continue; mi = ni_find_mi(ni, ino_get(&le->ref)); if (!mi) continue; attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) continue; /* Remove from original record. */ mi_remove_attr(NULL, mi, attr); } run_deallocate(sbi, &ni->attr_list.run, true); run_close(&ni->attr_list.run); ni->attr_list.size = 0; kvfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.dirty = false; kfree(mrec); return 0; out: /* Restore primary record. */ swap(mrec, ni->mi.mrec); kfree(mrec); return 0; } /* * ni_create_attr_list - Generates an attribute list for this primary record. */ int ni_create_attr_list(struct ntfs_inode *ni) { struct ntfs_sb_info *sbi = ni->mi.sbi; int err; u32 lsize; struct ATTRIB *attr; struct ATTRIB *arr_move[7]; struct ATTR_LIST_ENTRY *le, *le_b[7]; struct MFT_REC *rec; bool is_mft; CLST rno = 0; struct mft_inode *mi; u32 free_b, nb, to_free, rs; u16 sz; is_mft = ni->mi.rno == MFT_REC_MFT; rec = ni->mi.mrec; rs = sbi->record_size; /* * Skip estimating exact memory requirement. * Looks like one record_size is always enough. */ le = kmalloc(al_aligned(rs), GFP_NOFS); if (!le) return -ENOMEM; mi_get_ref(&ni->mi, &le->ref); ni->attr_list.le = le; attr = NULL; nb = 0; free_b = 0; attr = NULL; for (; (attr = mi_enum_attr(ni, &ni->mi, attr)); le = Add2Ptr(le, sz)) { sz = le_size(attr->name_len); le->type = attr->type; le->size = cpu_to_le16(sz); le->name_len = attr->name_len; le->name_off = offsetof(struct ATTR_LIST_ENTRY, name); le->vcn = 0; if (le != ni->attr_list.le) le->ref = ni->attr_list.le->ref; le->id = attr->id; if (attr->name_len) memcpy(le->name, attr_name(attr), sizeof(short) * attr->name_len); else if (attr->type == ATTR_STD) continue; else if (attr->type == ATTR_LIST) continue; else if (is_mft && attr->type == ATTR_DATA) continue; if (!nb || nb < ARRAY_SIZE(arr_move)) { le_b[nb] = le; arr_move[nb++] = attr; free_b += le32_to_cpu(attr->size); } } lsize = PtrOffset(ni->attr_list.le, le); ni->attr_list.size = lsize; to_free = le32_to_cpu(rec->used) + lsize + SIZEOF_RESIDENT; if (to_free <= rs) { to_free = 0; } else { to_free -= rs; if (to_free > free_b) { err = -EINVAL; goto out; } } /* Allocate child MFT. */ err = ntfs_look_free_mft(sbi, &rno, is_mft, ni, &mi); if (err) goto out; err = -EINVAL; /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */ while (to_free > 0) { struct ATTRIB *b = arr_move[--nb]; u32 asize = le32_to_cpu(b->size); u16 name_off = le16_to_cpu(b->name_off); attr = mi_insert_attr(ni, mi, b->type, Add2Ptr(b, name_off), b->name_len, asize, name_off); if (!attr) goto out; mi_get_ref(mi, &le_b[nb]->ref); le_b[nb]->id = attr->id; /* Copy all except id. */ memcpy(attr, b, asize); attr->id = le_b[nb]->id; /* Remove from primary record. */ if (!mi_remove_attr(NULL, &ni->mi, b)) goto out; if (to_free <= asize) break; to_free -= asize; if (!nb) goto out; } attr = mi_insert_attr(ni, &ni->mi, ATTR_LIST, NULL, 0, lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT); if (!attr) goto out; attr->non_res = 0; attr->flags = 0; attr->res.data_size = cpu_to_le32(lsize); attr->res.data_off = SIZEOF_RESIDENT_LE; attr->res.flags = 0; attr->res.res = 0; memcpy(resident_data_ex(attr, lsize), ni->attr_list.le, lsize); ni->attr_list.dirty = false; mark_inode_dirty(&ni->vfs_inode); return 0; out: kvfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.size = 0; return err; } /* * ni_ins_attr_ext - Add an external attribute to the ntfs_inode. */ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le, enum ATTR_TYPE type, const __le16 *name, u8 name_len, u32 asize, CLST svcn, u16 name_off, bool force_ext, struct ATTRIB **ins_attr, struct mft_inode **ins_mi, struct ATTR_LIST_ENTRY **ins_le) { struct ATTRIB *attr; struct mft_inode *mi; CLST rno; u64 vbo; struct rb_node *node; int err; bool is_mft, is_mft_data; struct ntfs_sb_info *sbi = ni->mi.sbi; is_mft = ni->mi.rno == MFT_REC_MFT; is_mft_data = is_mft && type == ATTR_DATA && !name_len; if (asize > sbi->max_bytes_per_attr) { err = -EINVAL; goto out; } /* * Standard information and attr_list cannot be made external. * The Log File cannot have any external attributes. */ if (type == ATTR_STD || type == ATTR_LIST || ni->mi.rno == MFT_REC_LOG) { err = -EINVAL; goto out; } /* Create attribute list if it is not already existed. */ if (!ni->attr_list.size) { err = ni_create_attr_list(ni); if (err) goto out; } vbo = is_mft_data ? ((u64)svcn << sbi->cluster_bits) : 0; if (force_ext) goto insert_ext; /* Load all subrecords into memory. */ err = ni_load_all_mi(ni); if (err) goto out; /* Check each of loaded subrecord. */ for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) { mi = rb_entry(node, struct mft_inode, node); if (is_mft_data && (mi_enum_attr(ni, mi, NULL) || vbo <= ((u64)mi->rno << sbi->record_bits))) { /* We can't accept this record 'cause MFT's bootstrapping. */ continue; } if (is_mft && mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, NULL)) { /* * This child record already has a ATTR_DATA. * So it can't accept any other records. */ continue; } if ((type != ATTR_NAME || name_len) && mi_find_attr(ni, mi, NULL, type, name, name_len, NULL)) { /* Only indexed attributes can share same record. */ continue; } /* * Do not try to insert this attribute * if there is no room in record. */ if (le32_to_cpu(mi->mrec->used) + asize > sbi->record_size) continue; /* Try to insert attribute into this subrecord. */ attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize, name_off, svcn, ins_le); if (!attr) continue; if (IS_ERR(attr)) return PTR_ERR(attr); if (ins_attr) *ins_attr = attr; if (ins_mi) *ins_mi = mi; return 0; } insert_ext: /* We have to allocate a new child subrecord. */ err = ntfs_look_free_mft(sbi, &rno, is_mft_data, ni, &mi); if (err) goto out; if (is_mft_data && vbo <= ((u64)rno << sbi->record_bits)) { err = -EINVAL; goto out1; } attr = ni_ins_new_attr(ni, mi, le, type, name, name_len, asize, name_off, svcn, ins_le); if (!attr) { err = -EINVAL; goto out2; } if (IS_ERR(attr)) { err = PTR_ERR(attr); goto out2; } if (ins_attr) *ins_attr = attr; if (ins_mi) *ins_mi = mi; return 0; out2: ni_remove_mi(ni, mi); mi_put(mi); out1: ntfs_mark_rec_free(sbi, rno, is_mft); out: return err; } /* * ni_insert_attr - Insert an attribute into the file. * * If the primary record has room, it will just insert the attribute. * If not, it may make the attribute external. * For $MFT::Data it may make room for the attribute by * making other attributes external. * * NOTE: * The ATTR_LIST and ATTR_STD cannot be made external. * This function does not fill new attribute full. * It only fills 'size'/'type'/'id'/'name_len' fields. */ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, u32 asize, u16 name_off, CLST svcn, struct ATTRIB **ins_attr, struct mft_inode **ins_mi, struct ATTR_LIST_ENTRY **ins_le) { struct ntfs_sb_info *sbi = ni->mi.sbi; int err; struct ATTRIB *attr, *eattr; struct MFT_REC *rec; bool is_mft; struct ATTR_LIST_ENTRY *le; u32 list_reserve, max_free, free, used, t32; __le16 id; u16 t16; is_mft = ni->mi.rno == MFT_REC_MFT; rec = ni->mi.mrec; list_reserve = SIZEOF_NONRESIDENT + 3 * (1 + 2 * sizeof(u32)); used = le32_to_cpu(rec->used); free = sbi->record_size - used; if (is_mft && type != ATTR_LIST) { /* Reserve space for the ATTRIB list. */ if (free < list_reserve) free = 0; else free -= list_reserve; } if (asize <= free) { attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize, name_off, svcn, ins_le); if (IS_ERR(attr)) { err = PTR_ERR(attr); goto out; } if (attr) { if (ins_attr) *ins_attr = attr; if (ins_mi) *ins_mi = &ni->mi; err = 0; goto out; } } if (!is_mft || type != ATTR_DATA || svcn) { /* This ATTRIB will be external. */ err = ni_ins_attr_ext(ni, NULL, type, name, name_len, asize, svcn, name_off, false, ins_attr, ins_mi, ins_le); goto out; } /* * Here we have: "is_mft && type == ATTR_DATA && !svcn" * * The first chunk of the $MFT::Data ATTRIB must be the base record. * Evict as many other attributes as possible. */ max_free = free; /* Estimate the result of moving all possible attributes away. */ attr = NULL; while ((attr = mi_enum_attr(ni, &ni->mi, attr))) { if (attr->type == ATTR_STD) continue; if (attr->type == ATTR_LIST) continue; max_free += le32_to_cpu(attr->size); } if (max_free < asize + list_reserve) { /* Impossible to insert this attribute into primary record. */ err = -EINVAL; goto out; } /* Start real attribute moving. */ attr = NULL; for (;;) { attr = mi_enum_attr(ni, &ni->mi, attr); if (!attr) { /* We should never be here 'cause we have already check this case. */ err = -EINVAL; goto out; } /* Skip attributes that MUST be primary record. */ if (attr->type == ATTR_STD || attr->type == ATTR_LIST) continue; le = NULL; if (ni->attr_list.size) { le = al_find_le(ni, NULL, attr); if (!le) { /* Really this is a serious bug. */ err = -EINVAL; goto out; } } t32 = le32_to_cpu(attr->size); t16 = le16_to_cpu(attr->name_off); err = ni_ins_attr_ext(ni, le, attr->type, Add2Ptr(attr, t16), attr->name_len, t32, attr_svcn(attr), t16, false, &eattr, NULL, NULL); if (err) return err; id = eattr->id; memcpy(eattr, attr, t32); eattr->id = id; /* Remove from primary record. */ mi_remove_attr(NULL, &ni->mi, attr); /* attr now points to next attribute. */ if (attr->type == ATTR_END) goto out; } while (asize + list_reserve > sbi->record_size - le32_to_cpu(rec->used)) ; attr = ni_ins_new_attr(ni, &ni->mi, NULL, type, name, name_len, asize, name_off, svcn, ins_le); if (!attr) { err = -EINVAL; goto out; } if (IS_ERR(attr)) { err = PTR_ERR(attr); goto out; } if (ins_attr) *ins_attr = attr; if (ins_mi) *ins_mi = &ni->mi; out: return err; } /* ni_expand_mft_list - Split ATTR_DATA of $MFT. */ static int ni_expand_mft_list(struct ntfs_inode *ni) { int err = 0; struct runs_tree *run = &ni->file.run; u32 asize, run_size, done = 0; struct ATTRIB *attr; struct rb_node *node; CLST mft_min, mft_new, svcn, evcn, plen; struct mft_inode *mi, *mi_min, *mi_new; struct ntfs_sb_info *sbi = ni->mi.sbi; /* Find the nearest MFT. */ mft_min = 0; mft_new = 0; mi_min = NULL; for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) { mi = rb_entry(node, struct mft_inode, node); attr = mi_enum_attr(ni, mi, NULL); if (!attr) { mft_min = mi->rno; mi_min = mi; break; } } if (ntfs_look_free_mft(sbi, &mft_new, true, ni, &mi_new)) { mft_new = 0; /* Really this is not critical. */ } else if (mft_min > mft_new) { mft_min = mft_new; mi_min = mi_new; } else { ntfs_mark_rec_free(sbi, mft_new, true); mft_new = 0; ni_remove_mi(ni, mi_new); } attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_DATA, NULL, 0, NULL); if (!attr) { err = -EINVAL; goto out; } asize = le32_to_cpu(attr->size); evcn = le64_to_cpu(attr->nres.evcn); svcn = bytes_to_cluster(sbi, (u64)(mft_min + 1) << sbi->record_bits); if (evcn + 1 >= svcn) { err = -EINVAL; goto out; } /* * Split primary attribute [0 evcn] in two parts [0 svcn) + [svcn evcn]. * * Update first part of ATTR_DATA in 'primary MFT. */ err = run_pack(run, 0, svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT), asize - SIZEOF_NONRESIDENT, &plen); if (err < 0) goto out; run_size = ALIGN(err, 8); err = 0; if (plen < svcn) { err = -EINVAL; goto out; } attr->nres.evcn = cpu_to_le64(svcn - 1); attr->size = cpu_to_le32(run_size + SIZEOF_NONRESIDENT); /* 'done' - How many bytes of primary MFT becomes free. */ done = asize - run_size - SIZEOF_NONRESIDENT; le32_sub_cpu(&ni->mi.mrec->used, done); /* Estimate packed size (run_buf=NULL). */ err = run_pack(run, svcn, evcn + 1 - svcn, NULL, sbi->record_size, &plen); if (err < 0) goto out; run_size = ALIGN(err, 8); err = 0; if (plen < evcn + 1 - svcn) { err = -EINVAL; goto out; } /* * This function may implicitly call expand attr_list. * Insert second part of ATTR_DATA in 'mi_min'. */ attr = ni_ins_new_attr(ni, mi_min, NULL, ATTR_DATA, NULL, 0, SIZEOF_NONRESIDENT + run_size, SIZEOF_NONRESIDENT, svcn, NULL); if (!attr) { err = -EINVAL; goto out; } if (IS_ERR(attr)) { err = PTR_ERR(attr); goto out; } attr->non_res = 1; attr->name_off = SIZEOF_NONRESIDENT_LE; attr->flags = 0; /* This function can't fail - cause already checked above. */ run_pack(run, svcn, evcn + 1 - svcn, Add2Ptr(attr, SIZEOF_NONRESIDENT), run_size, &plen); attr->nres.svcn = cpu_to_le64(svcn); attr->nres.evcn = cpu_to_le64(evcn); attr->nres.run_off = cpu_to_le16(SIZEOF_NONRESIDENT); out: if (mft_new) { ntfs_mark_rec_free(sbi, mft_new, true); ni_remove_mi(ni, mi_new); } return !err && !done ? -EOPNOTSUPP : err; } /* * ni_expand_list - Move all possible attributes out of primary record. */ int ni_expand_list(struct ntfs_inode *ni) { int err = 0; u32 asize, done = 0; struct ATTRIB *attr, *ins_attr; struct ATTR_LIST_ENTRY *le; bool is_mft = ni->mi.rno == MFT_REC_MFT; struct MFT_REF ref; mi_get_ref(&ni->mi, &ref); le = NULL; while ((le = al_enumerate(ni, le))) { if (le->type == ATTR_STD) continue; if (memcmp(&ref, &le->ref, sizeof(struct MFT_REF))) continue; if (is_mft && le->type == ATTR_DATA) continue; /* Find attribute in primary record. */ attr = rec_find_attr_le(ni, &ni->mi, le); if (!attr) { err = -EINVAL; goto out; } asize = le32_to_cpu(attr->size); /* Always insert into new record to avoid collisions (deep recursive). */ err = ni_ins_attr_ext(ni, le, attr->type, attr_name(attr), attr->name_len, asize, attr_svcn(attr), le16_to_cpu(attr->name_off), true, &ins_attr, NULL, NULL); if (err) goto out; memcpy(ins_attr, attr, asize); ins_attr->id = le->id; /* Remove from primary record. */ mi_remove_attr(NULL, &ni->mi, attr); done += asize; goto out; } if (!is_mft) { err = -EFBIG; /* Attr list is too big(?) */ goto out; } /* Split MFT data as much as possible. */ err = ni_expand_mft_list(ni); out: return !err && !done ? -EOPNOTSUPP : err; } /* * ni_insert_nonresident - Insert new nonresident attribute. */ int ni_insert_nonresident(struct ntfs_inode *ni, enum ATTR_TYPE type, const __le16 *name, u8 name_len, const struct runs_tree *run, CLST svcn, CLST len, __le16 flags, struct ATTRIB **new_attr, struct mft_inode **mi, struct ATTR_LIST_ENTRY **le) { int err; CLST plen; struct ATTRIB *attr; bool is_ext = (flags & (ATTR_FLAG_SPARSED | ATTR_FLAG_COMPRESSED)) && !svcn; u32 name_size = ALIGN(name_len * sizeof(short), 8); u32 name_off = is_ext ? SIZEOF_NONRESIDENT_EX : SIZEOF_NONRESIDENT; u32 run_off = name_off + name_size; u32 run_size, asize; struct ntfs_sb_info *sbi = ni->mi.sbi; /* Estimate packed size (run_buf=NULL). */ err = run_pack(run, svcn, len, NULL, sbi->max_bytes_per_attr - run_off, &plen); if (err < 0) goto out; run_size = ALIGN(err, 8); if (plen < len) { err = -EINVAL; goto out; } asize = run_off + run_size; if (asize > sbi->max_bytes_per_attr) { err = -EINVAL; goto out; } err = ni_insert_attr(ni, type, name, name_len, asize, name_off, svcn, &attr, mi, le); if (err) goto out; attr->non_res = 1; attr->name_off = cpu_to_le16(name_off); attr->flags = flags; /* This function can't fail - cause already checked above. */ run_pack(run, svcn, len, Add2Ptr(attr, run_off), run_size, &plen); attr->nres.svcn = cpu_to_le64(svcn); attr->nres.evcn = cpu_to_le64((u64)svcn + len - 1); if (new_attr) *new_attr = attr; *(__le64 *)&attr->nres.run_off = cpu_to_le64(run_off); attr->nres.alloc_size = svcn ? 0 : cpu_to_le64((u64)len << ni->mi.sbi->cluster_bits); attr->nres.data_size = attr->nres.alloc_size; attr->nres.valid_size = attr->nres.alloc_size; if (is_ext) { if (flags & ATTR_FLAG_COMPRESSED) attr->nres.c_unit = NTFS_LZNT_CUNIT; attr->nres.total_size = attr->nres.alloc_size; } out: return err; } /* * ni_insert_resident - Inserts new resident attribute. */ int ni_insert_resident(struct ntfs_inode *ni, u32 data_size, enum ATTR_TYPE type, const __le16 *name, u8 name_len, struct ATTRIB **new_attr, struct mft_inode **mi, struct ATTR_LIST_ENTRY **le) { int err; u32 name_size = ALIGN(name_len * sizeof(short), 8); u32 asize = SIZEOF_RESIDENT + name_size + ALIGN(data_size, 8); struct ATTRIB *attr; err = ni_insert_attr(ni, type, name, name_len, asize, SIZEOF_RESIDENT, 0, &attr, mi, le); if (err) return err; attr->non_res = 0; attr->flags = 0; attr->res.data_size = cpu_to_le32(data_size); attr->res.data_off = cpu_to_le16(SIZEOF_RESIDENT + name_size); if (type == ATTR_NAME) { attr->res.flags = RESIDENT_FLAG_INDEXED; /* is_attr_indexed(attr)) == true */ le16_add_cpu(&ni->mi.mrec->hard_links, 1); ni->mi.dirty = true; } attr->res.res = 0; if (new_attr) *new_attr = attr; return 0; } /* * ni_remove_attr_le - Remove attribute from record. */ void ni_remove_attr_le(struct ntfs_inode *ni, struct ATTRIB *attr, struct mft_inode *mi, struct ATTR_LIST_ENTRY *le) { mi_remove_attr(ni, mi, attr); if (le) al_remove_le(ni, le); } /* * ni_delete_all - Remove all attributes and frees allocates space. * * ntfs_evict_inode->ntfs_clear_inode->ni_delete_all (if no links). */ int ni_delete_all(struct ntfs_inode *ni) { int err; struct ATTR_LIST_ENTRY *le = NULL; struct ATTRIB *attr = NULL; struct rb_node *node; u16 roff; u32 asize; CLST svcn, evcn; struct ntfs_sb_info *sbi = ni->mi.sbi; bool nt3 = is_ntfs3(sbi); struct MFT_REF ref; while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) { if (!nt3 || attr->name_len) { ; } else if (attr->type == ATTR_REPARSE) { mi_get_ref(&ni->mi, &ref); ntfs_remove_reparse(sbi, 0, &ref); } else if (attr->type == ATTR_ID && !attr->non_res && le32_to_cpu(attr->res.data_size) >= sizeof(struct GUID)) { ntfs_objid_remove(sbi, resident_data(attr)); } if (!attr->non_res) continue; svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); if (evcn + 1 <= svcn) continue; asize = le32_to_cpu(attr->size); roff = le16_to_cpu(attr->nres.run_off); if (roff > asize) { /* ni_enum_attr_ex checks this case. */ continue; } /* run==1 means unpack and deallocate. */ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), asize - roff); } if (ni->attr_list.size) { run_deallocate(ni->mi.sbi, &ni->attr_list.run, true); al_destroy(ni); } /* Free all subrecords. */ for (node = rb_first(&ni->mi_tree); node;) { struct rb_node *next = rb_next(node); struct mft_inode *mi = rb_entry(node, struct mft_inode, node); clear_rec_inuse(mi->mrec); mi->dirty = true; mi_write(mi, 0); ntfs_mark_rec_free(sbi, mi->rno, false); ni_remove_mi(ni, mi); mi_put(mi); node = next; } /* Free base record. */ clear_rec_inuse(ni->mi.mrec); ni->mi.dirty = true; err = mi_write(&ni->mi, 0); ntfs_mark_rec_free(sbi, ni->mi.rno, false); return err; } /* ni_fname_name * * Return: File name attribute by its value. */ struct ATTR_FILE_NAME *ni_fname_name(struct ntfs_inode *ni, const struct le_str *uni, const struct MFT_REF *home_dir, struct mft_inode **mi, struct ATTR_LIST_ENTRY **le) { struct ATTRIB *attr = NULL; struct ATTR_FILE_NAME *fname; if (le) *le = NULL; /* Enumerate all names. */ next: attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, mi); if (!attr) return NULL; fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); if (!fname) goto next; if (home_dir && memcmp(home_dir, &fname->home, sizeof(*home_dir))) goto next; if (!uni) return fname; if (uni->len != fname->name_len) goto next; if (ntfs_cmp_names(uni->name, uni->len, fname->name, uni->len, NULL, false)) goto next; return fname; } /* * ni_fname_type * * Return: File name attribute with given type. */ struct ATTR_FILE_NAME *ni_fname_type(struct ntfs_inode *ni, u8 name_type, struct mft_inode **mi, struct ATTR_LIST_ENTRY **le) { struct ATTRIB *attr = NULL; struct ATTR_FILE_NAME *fname; *le = NULL; if (name_type == FILE_NAME_POSIX) return NULL; /* Enumerate all names. */ for (;;) { attr = ni_find_attr(ni, attr, le, ATTR_NAME, NULL, 0, NULL, mi); if (!attr) return NULL; fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); if (fname && name_type == fname->type) return fname; } } /* * ni_new_attr_flags * * Process compressed/sparsed in special way. * NOTE: You need to set ni->std_fa = new_fa * after this function to keep internal structures in consistency. */ int ni_new_attr_flags(struct ntfs_inode *ni, enum FILE_ATTRIBUTE new_fa) { struct ATTRIB *attr; struct mft_inode *mi; __le16 new_aflags; u32 new_asize; attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) return -EINVAL; new_aflags = attr->flags; if (new_fa & FILE_ATTRIBUTE_SPARSE_FILE) new_aflags |= ATTR_FLAG_SPARSED; else new_aflags &= ~ATTR_FLAG_SPARSED; if (new_fa & FILE_ATTRIBUTE_COMPRESSED) new_aflags |= ATTR_FLAG_COMPRESSED; else new_aflags &= ~ATTR_FLAG_COMPRESSED; if (new_aflags == attr->flags) return 0; if ((new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) == (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) { ntfs_inode_warn(&ni->vfs_inode, "file can't be sparsed and compressed"); return -EOPNOTSUPP; } if (!attr->non_res) goto out; if (attr->nres.data_size) { ntfs_inode_warn( &ni->vfs_inode, "one can change sparsed/compressed only for empty files"); return -EOPNOTSUPP; } /* Resize nonresident empty attribute in-place only. */ new_asize = (new_aflags & (ATTR_FLAG_COMPRESSED | ATTR_FLAG_SPARSED)) ? (SIZEOF_NONRESIDENT_EX + 8) : (SIZEOF_NONRESIDENT + 8); if (!mi_resize_attr(mi, attr, new_asize - le32_to_cpu(attr->size))) return -EOPNOTSUPP; if (new_aflags & ATTR_FLAG_SPARSED) { attr->name_off = SIZEOF_NONRESIDENT_EX_LE; /* Windows uses 16 clusters per frame but supports one cluster per frame too. */ attr->nres.c_unit = 0; ni->vfs_inode.i_mapping->a_ops = &ntfs_aops; } else if (new_aflags & ATTR_FLAG_COMPRESSED) { attr->name_off = SIZEOF_NONRESIDENT_EX_LE; /* The only allowed: 16 clusters per frame. */ attr->nres.c_unit = NTFS_LZNT_CUNIT; ni->vfs_inode.i_mapping->a_ops = &ntfs_aops_cmpr; } else { attr->name_off = SIZEOF_NONRESIDENT_LE; /* Normal files. */ attr->nres.c_unit = 0; ni->vfs_inode.i_mapping->a_ops = &ntfs_aops; } attr->nres.run_off = attr->name_off; out: attr->flags = new_aflags; mi->dirty = true; return 0; } /* * ni_parse_reparse * * buffer - memory for reparse buffer header */ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr, struct REPARSE_DATA_BUFFER *buffer) { const struct REPARSE_DATA_BUFFER *rp = NULL; u8 bits; u16 len; typeof(rp->CompressReparseBuffer) *cmpr; /* Try to estimate reparse point. */ if (!attr->non_res) { rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER)); } else if (le64_to_cpu(attr->nres.data_size) >= sizeof(struct REPARSE_DATA_BUFFER)) { struct runs_tree run; run_init(&run); if (!attr_load_runs_vcn(ni, ATTR_REPARSE, NULL, 0, &run, 0) && !ntfs_read_run_nb(ni->mi.sbi, &run, 0, buffer, sizeof(struct REPARSE_DATA_BUFFER), NULL)) { rp = buffer; } run_close(&run); } if (!rp) return REPARSE_NONE; len = le16_to_cpu(rp->ReparseDataLength); switch (rp->ReparseTag) { case (IO_REPARSE_TAG_MICROSOFT | IO_REPARSE_TAG_SYMBOLIC_LINK): break; /* Symbolic link. */ case IO_REPARSE_TAG_MOUNT_POINT: break; /* Mount points and junctions. */ case IO_REPARSE_TAG_SYMLINK: break; case IO_REPARSE_TAG_COMPRESS: /* * WOF - Windows Overlay Filter - Used to compress files with * LZX/Xpress. * * Unlike native NTFS file compression, the Windows * Overlay Filter supports only read operations. This means * that it doesn't need to sector-align each compressed chunk, * so the compressed data can be packed more tightly together. * If you open the file for writing, the WOF just decompresses * the entire file, turning it back into a plain file. * * Ntfs3 driver decompresses the entire file only on write or * change size requests. */ cmpr = &rp->CompressReparseBuffer; if (len < sizeof(*cmpr) || cmpr->WofVersion != WOF_CURRENT_VERSION || cmpr->WofProvider != WOF_PROVIDER_SYSTEM || cmpr->ProviderVer != WOF_PROVIDER_CURRENT_VERSION) { return REPARSE_NONE; } switch (cmpr->CompressionFormat) { case WOF_COMPRESSION_XPRESS4K: bits = 0xc; // 4k break; case WOF_COMPRESSION_XPRESS8K: bits = 0xd; // 8k break; case WOF_COMPRESSION_XPRESS16K: bits = 0xe; // 16k break; case WOF_COMPRESSION_LZX32K: bits = 0xf; // 32k break; default: bits = 0x10; // 64k break; } ni_set_ext_compress_bits(ni, bits); return REPARSE_COMPRESSED; case IO_REPARSE_TAG_DEDUP: ni->ni_flags |= NI_FLAG_DEDUPLICATED; return REPARSE_DEDUPLICATED; default: if (rp->ReparseTag & IO_REPARSE_TAG_NAME_SURROGATE) break; return REPARSE_NONE; } if (buffer != rp) memcpy(buffer, rp, sizeof(struct REPARSE_DATA_BUFFER)); /* Looks like normal symlink. */ return REPARSE_LINK; } /* * ni_fiemap - Helper for file_fiemap(). * * Assumed ni_lock. * TODO: Less aggressive locks. */ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo, __u64 vbo, __u64 len) { int err = 0; struct ntfs_sb_info *sbi = ni->mi.sbi; u8 cluster_bits = sbi->cluster_bits; struct runs_tree run; struct ATTRIB *attr; CLST vcn = vbo >> cluster_bits; CLST lcn, clen; u64 valid = ni->i_valid; u64 lbo, bytes; u64 end, alloc_size; size_t idx = -1; u32 flags; bool ok; run_init(&run); if (S_ISDIR(ni->vfs_inode.i_mode)) { attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME, ARRAY_SIZE(I30_NAME), NULL, NULL); } else { attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, NULL); if (!attr) { err = -EINVAL; goto out; } if (is_attr_compressed(attr)) { /* Unfortunately cp -r incorrectly treats compressed clusters. */ err = -EOPNOTSUPP; ntfs_inode_warn( &ni->vfs_inode, "fiemap is not supported for compressed file (cp -r)"); goto out; } } if (!attr || !attr->non_res) { err = fiemap_fill_next_extent( fieinfo, 0, 0, attr ? le32_to_cpu(attr->res.data_size) : 0, FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST | FIEMAP_EXTENT_MERGED); goto out; } end = vbo + len; alloc_size = le64_to_cpu(attr->nres.alloc_size); if (end > alloc_size) end = alloc_size; while (vbo < end) { if (idx == -1) { ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); } else { CLST vcn_next = vcn; ok = run_get_entry(&run, ++idx, &vcn, &lcn, &clen) && vcn == vcn_next; if (!ok) vcn = vcn_next; } if (!ok) { err = attr_load_runs_vcn(ni, attr->type, attr_name(attr), attr->name_len, &run, vcn); if (err) break; ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx); if (!ok) { err = -EINVAL; break; } } if (!clen) { err = -EINVAL; // ? break; } if (lcn == SPARSE_LCN) { vcn += clen; vbo = (u64)vcn << cluster_bits; continue; } flags = FIEMAP_EXTENT_MERGED; if (S_ISDIR(ni->vfs_inode.i_mode)) { ; } else if (is_attr_compressed(attr)) { CLST clst_data; err = attr_is_frame_compressed(ni, attr, vcn >> attr->nres.c_unit, &clst_data, &run); if (err) break; if (clst_data < NTFS_LZNT_CLUSTERS) flags |= FIEMAP_EXTENT_ENCODED; } else if (is_attr_encrypted(attr)) { flags |= FIEMAP_EXTENT_DATA_ENCRYPTED; } vbo = (u64)vcn << cluster_bits; bytes = (u64)clen << cluster_bits; lbo = (u64)lcn << cluster_bits; vcn += clen; if (vbo + bytes >= end) bytes = end - vbo; if (vbo + bytes <= valid) { ; } else if (vbo >= valid) { flags |= FIEMAP_EXTENT_UNWRITTEN; } else { /* vbo < valid && valid < vbo + bytes */ u64 dlen = valid - vbo; if (vbo + dlen >= end) flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen, flags); if (err < 0) break; if (err == 1) { err = 0; break; } vbo = valid; bytes -= dlen; if (!bytes) continue; lbo += dlen; flags |= FIEMAP_EXTENT_UNWRITTEN; } if (vbo + bytes >= end) flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags); if (err < 0) break; if (err == 1) { err = 0; break; } vbo += bytes; } out: run_close(&run); return err; } /* * ni_readpage_cmpr * * When decompressing, we typically obtain more than one page per reference. * We inject the additional pages into the page cache. */ int ni_readpage_cmpr(struct ntfs_inode *ni, struct folio *folio) { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; struct address_space *mapping = folio->mapping; pgoff_t index = folio->index; u64 frame_vbo, vbo = (u64)index << PAGE_SHIFT; struct page **pages = NULL; /* Array of at most 16 pages. stack? */ u8 frame_bits; CLST frame; u32 i, idx, frame_size, pages_per_frame; gfp_t gfp_mask; struct page *pg; if (vbo >= i_size_read(&ni->vfs_inode)) { folio_zero_range(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); err = 0; goto out; } if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { /* Xpress or LZX. */ frame_bits = ni_ext_compress_bits(ni); } else { /* LZNT compression. */ frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; } frame_size = 1u << frame_bits; frame = vbo >> frame_bits; frame_vbo = (u64)frame << frame_bits; idx = (vbo - frame_vbo) >> PAGE_SHIFT; pages_per_frame = frame_size >> PAGE_SHIFT; pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS); if (!pages) { err = -ENOMEM; goto out; } pages[idx] = &folio->page; index = frame_vbo >> PAGE_SHIFT; gfp_mask = mapping_gfp_mask(mapping); for (i = 0; i < pages_per_frame; i++, index++) { if (i == idx) continue; pg = find_or_create_page(mapping, index, gfp_mask); if (!pg) { err = -ENOMEM; goto out1; } pages[i] = pg; } err = ni_read_frame(ni, frame_vbo, pages, pages_per_frame); out1: for (i = 0; i < pages_per_frame; i++) { pg = pages[i]; if (i == idx || !pg) continue; unlock_page(pg); put_page(pg); } out: /* At this point, err contains 0 or -EIO depending on the "critical" page. */ kfree(pages); folio_unlock(folio); return err; } #ifdef CONFIG_NTFS3_LZX_XPRESS /* * ni_decompress_file - Decompress LZX/Xpress compressed file. * * Remove ATTR_DATA::WofCompressedData. * Remove ATTR_REPARSE. */ int ni_decompress_file(struct ntfs_inode *ni) { struct ntfs_sb_info *sbi = ni->mi.sbi; struct inode *inode = &ni->vfs_inode; loff_t i_size = i_size_read(inode); struct address_space *mapping = inode->i_mapping; gfp_t gfp_mask = mapping_gfp_mask(mapping); struct page **pages = NULL; struct ATTR_LIST_ENTRY *le; struct ATTRIB *attr; CLST vcn, cend, lcn, clen, end; pgoff_t index; u64 vbo; u8 frame_bits; u32 i, frame_size, pages_per_frame, bytes; struct mft_inode *mi; int err; /* Clusters for decompressed data. */ cend = bytes_to_cluster(sbi, i_size); if (!i_size) goto remove_wof; /* Check in advance. */ if (cend > wnd_zeroes(&sbi->used.bitmap)) { err = -ENOSPC; goto out; } frame_bits = ni_ext_compress_bits(ni); frame_size = 1u << frame_bits; pages_per_frame = frame_size >> PAGE_SHIFT; pages = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS); if (!pages) { err = -ENOMEM; goto out; } /* * Step 1: Decompress data and copy to new allocated clusters. */ index = 0; for (vbo = 0; vbo < i_size; vbo += bytes) { u32 nr_pages; bool new; if (vbo + frame_size > i_size) { bytes = i_size - vbo; nr_pages = (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT; } else { nr_pages = pages_per_frame; bytes = frame_size; } end = bytes_to_cluster(sbi, vbo + bytes); for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) { err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, &clen, &new, false); if (err) goto out; } for (i = 0; i < pages_per_frame; i++, index++) { struct page *pg; pg = find_or_create_page(mapping, index, gfp_mask); if (!pg) { while (i--) { unlock_page(pages[i]); put_page(pages[i]); } err = -ENOMEM; goto out; } pages[i] = pg; } err = ni_read_frame(ni, vbo, pages, pages_per_frame); if (!err) { down_read(&ni->file.run_lock); err = ntfs_bio_pages(sbi, &ni->file.run, pages, nr_pages, vbo, bytes, REQ_OP_WRITE); up_read(&ni->file.run_lock); } for (i = 0; i < pages_per_frame; i++) { unlock_page(pages[i]); put_page(pages[i]); } if (err) goto out; cond_resched(); } remove_wof: /* * Step 2: Deallocate attributes ATTR_DATA::WofCompressedData * and ATTR_REPARSE. */ attr = NULL; le = NULL; while ((attr = ni_enum_attr_ex(ni, attr, &le, NULL))) { CLST svcn, evcn; u32 asize, roff; if (attr->type == ATTR_REPARSE) { struct MFT_REF ref; mi_get_ref(&ni->mi, &ref); ntfs_remove_reparse(sbi, 0, &ref); } if (!attr->non_res) continue; if (attr->type != ATTR_REPARSE && (attr->type != ATTR_DATA || attr->name_len != ARRAY_SIZE(WOF_NAME) || memcmp(attr_name(attr), WOF_NAME, sizeof(WOF_NAME)))) continue; svcn = le64_to_cpu(attr->nres.svcn); evcn = le64_to_cpu(attr->nres.evcn); if (evcn + 1 <= svcn) continue; asize = le32_to_cpu(attr->size); roff = le16_to_cpu(attr->nres.run_off); if (roff > asize) { err = -EINVAL; goto out; } /*run==1 Means unpack and deallocate. */ run_unpack_ex(RUN_DEALLOCATE, sbi, ni->mi.rno, svcn, evcn, svcn, Add2Ptr(attr, roff), asize - roff); } /* * Step 3: Remove attribute ATTR_DATA::WofCompressedData. */ err = ni_remove_attr(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME), false, NULL); if (err) goto out; /* * Step 4: Remove ATTR_REPARSE. */ err = ni_remove_attr(ni, ATTR_REPARSE, NULL, 0, false, NULL); if (err) goto out; /* * Step 5: Remove sparse flag from data attribute. */ attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) { err = -EINVAL; goto out; } if (attr->non_res && is_attr_sparsed(attr)) { /* Sparsed attribute header is 8 bytes bigger than normal. */ struct MFT_REC *rec = mi->mrec; u32 used = le32_to_cpu(rec->used); u32 asize = le32_to_cpu(attr->size); u16 roff = le16_to_cpu(attr->nres.run_off); char *rbuf = Add2Ptr(attr, roff); memmove(rbuf - 8, rbuf, used - PtrOffset(rec, rbuf)); attr->size = cpu_to_le32(asize - 8); attr->flags &= ~ATTR_FLAG_SPARSED; attr->nres.run_off = cpu_to_le16(roff - 8); attr->nres.c_unit = 0; rec->used = cpu_to_le32(used - 8); mi->dirty = true; ni->std_fa &= ~(FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_REPARSE_POINT); mark_inode_dirty(inode); } /* Clear cached flag. */ ni->ni_flags &= ~NI_FLAG_COMPRESSED_MASK; if (ni->file.offs_folio) { folio_put(ni->file.offs_folio); ni->file.offs_folio = NULL; } mapping->a_ops = &ntfs_aops; out: kfree(pages); if (err) _ntfs_bad_inode(inode); return err; } /* * decompress_lzx_xpress - External compression LZX/Xpress. */ static int decompress_lzx_xpress(struct ntfs_sb_info *sbi, const char *cmpr, size_t cmpr_size, void *unc, size_t unc_size, u32 frame_size) { int err; void *ctx; if (cmpr_size == unc_size) { /* Frame not compressed. */ memcpy(unc, cmpr, unc_size); return 0; } err = 0; if (frame_size == 0x8000) { mutex_lock(&sbi->compress.mtx_lzx); /* LZX: Frame compressed. */ ctx = sbi->compress.lzx; if (!ctx) { /* Lazy initialize LZX decompress context. */ ctx = lzx_allocate_decompressor(); if (!ctx) { err = -ENOMEM; goto out1; } sbi->compress.lzx = ctx; } if (lzx_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) { /* Treat all errors as "invalid argument". */ err = -EINVAL; } out1: mutex_unlock(&sbi->compress.mtx_lzx); } else { /* XPRESS: Frame compressed. */ mutex_lock(&sbi->compress.mtx_xpress); ctx = sbi->compress.xpress; if (!ctx) { /* Lazy initialize Xpress decompress context. */ ctx = xpress_allocate_decompressor(); if (!ctx) { err = -ENOMEM; goto out2; } sbi->compress.xpress = ctx; } if (xpress_decompress(ctx, cmpr, cmpr_size, unc, unc_size)) { /* Treat all errors as "invalid argument". */ err = -EINVAL; } out2: mutex_unlock(&sbi->compress.mtx_xpress); } return err; } #endif /* * ni_read_frame * * Pages - Array of locked pages. */ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages, u32 pages_per_frame) { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; u8 cluster_bits = sbi->cluster_bits; char *frame_ondisk = NULL; char *frame_mem = NULL; struct page **pages_disk = NULL; struct ATTR_LIST_ENTRY *le = NULL; struct runs_tree *run = &ni->file.run; u64 valid_size = ni->i_valid; u64 vbo_disk; size_t unc_size; u32 frame_size, i, npages_disk, ondisk_size; struct page *pg; struct ATTRIB *attr; CLST frame, clst_data; /* * To simplify decompress algorithm do vmap for source * and target pages. */ for (i = 0; i < pages_per_frame; i++) kmap(pages[i]); frame_size = pages_per_frame << PAGE_SHIFT; frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL); if (!frame_mem) { err = -ENOMEM; goto out; } attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, NULL); if (!attr) { err = -ENOENT; goto out1; } if (!attr->non_res) { u32 data_size = le32_to_cpu(attr->res.data_size); memset(frame_mem, 0, frame_size); if (frame_vbo < data_size) { ondisk_size = data_size - frame_vbo; memcpy(frame_mem, resident_data(attr) + frame_vbo, min(ondisk_size, frame_size)); } err = 0; goto out1; } if (frame_vbo >= valid_size) { memset(frame_mem, 0, frame_size); err = 0; goto out1; } if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) { #ifndef CONFIG_NTFS3_LZX_XPRESS err = -EOPNOTSUPP; goto out1; #else loff_t i_size = i_size_read(&ni->vfs_inode); u32 frame_bits = ni_ext_compress_bits(ni); u64 frame64 = frame_vbo >> frame_bits; u64 frames, vbo_data; if (frame_size != (1u << frame_bits)) { err = -EINVAL; goto out1; } switch (frame_size) { case 0x1000: case 0x2000: case 0x4000: case 0x8000: break; default: /* Unknown compression. */ err = -EOPNOTSUPP; goto out1; } attr = ni_find_attr(ni, attr, &le, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME), NULL, NULL); if (!attr) { ntfs_inode_err( &ni->vfs_inode, "external compressed file should contains data attribute \"WofCompressedData\""); err = -EINVAL; goto out1; } if (!attr->non_res) { run = NULL; } else { run = run_alloc(); if (!run) { err = -ENOMEM; goto out1; } } frames = (i_size - 1) >> frame_bits; err = attr_wof_frame_info(ni, attr, run, frame64, frames, frame_bits, &ondisk_size, &vbo_data); if (err) goto out2; if (frame64 == frames) { unc_size = 1 + ((i_size - 1) & (frame_size - 1)); ondisk_size = attr_size(attr) - vbo_data; } else { unc_size = frame_size; } if (ondisk_size > frame_size) { err = -EINVAL; goto out2; } if (!attr->non_res) { if (vbo_data + ondisk_size > le32_to_cpu(attr->res.data_size)) { err = -EINVAL; goto out1; } err = decompress_lzx_xpress( sbi, Add2Ptr(resident_data(attr), vbo_data), ondisk_size, frame_mem, unc_size, frame_size); goto out1; } vbo_disk = vbo_data; /* Load all runs to read [vbo_disk-vbo_to). */ err = attr_load_runs_range(ni, ATTR_DATA, WOF_NAME, ARRAY_SIZE(WOF_NAME), run, vbo_disk, vbo_data + ondisk_size); if (err) goto out2; npages_disk = (ondisk_size + (vbo_disk & (PAGE_SIZE - 1)) + PAGE_SIZE - 1) >> PAGE_SHIFT; #endif } else if (is_attr_compressed(attr)) { /* LZNT compression. */ if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) { err = -EOPNOTSUPP; goto out1; } if (attr->nres.c_unit != NTFS_LZNT_CUNIT) { err = -EOPNOTSUPP; goto out1; } down_write(&ni->file.run_lock); run_truncate_around(run, le64_to_cpu(attr->nres.svcn)); frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT); err = attr_is_frame_compressed(ni, attr, frame, &clst_data, run); up_write(&ni->file.run_lock); if (err) goto out1; if (!clst_data) { memset(frame_mem, 0, frame_size); goto out1; } frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT; ondisk_size = clst_data << cluster_bits; if (clst_data >= NTFS_LZNT_CLUSTERS) { /* Frame is not compressed. */ down_read(&ni->file.run_lock); err = ntfs_bio_pages(sbi, run, pages, pages_per_frame, frame_vbo, ondisk_size, REQ_OP_READ); up_read(&ni->file.run_lock); goto out1; } vbo_disk = frame_vbo; npages_disk = (ondisk_size + PAGE_SIZE - 1) >> PAGE_SHIFT; } else { __builtin_unreachable(); err = -EINVAL; goto out1; } pages_disk = kcalloc(npages_disk, sizeof(*pages_disk), GFP_NOFS); if (!pages_disk) { err = -ENOMEM; goto out2; } for (i = 0; i < npages_disk; i++) { pg = alloc_page(GFP_KERNEL); if (!pg) { err = -ENOMEM; goto out3; } pages_disk[i] = pg; lock_page(pg); kmap(pg); } /* Read 'ondisk_size' bytes from disk. */ down_read(&ni->file.run_lock); err = ntfs_bio_pages(sbi, run, pages_disk, npages_disk, vbo_disk, ondisk_size, REQ_OP_READ); up_read(&ni->file.run_lock); if (err) goto out3; /* * To simplify decompress algorithm do vmap for source and target pages. */ frame_ondisk = vmap(pages_disk, npages_disk, VM_MAP, PAGE_KERNEL_RO); if (!frame_ondisk) { err = -ENOMEM; goto out3; } /* Decompress: Frame_ondisk -> frame_mem. */ #ifdef CONFIG_NTFS3_LZX_XPRESS if (run != &ni->file.run) { /* LZX or XPRESS */ err = decompress_lzx_xpress( sbi, frame_ondisk + (vbo_disk & (PAGE_SIZE - 1)), ondisk_size, frame_mem, unc_size, frame_size); } else #endif { /* LZNT - Native NTFS compression. */ unc_size = decompress_lznt(frame_ondisk, ondisk_size, frame_mem, frame_size); if ((ssize_t)unc_size < 0) err = unc_size; else if (!unc_size || unc_size > frame_size) err = -EINVAL; } if (!err && valid_size < frame_vbo + frame_size) { size_t ok = valid_size - frame_vbo; memset(frame_mem + ok, 0, frame_size - ok); } vunmap(frame_ondisk); out3: for (i = 0; i < npages_disk; i++) { pg = pages_disk[i]; if (pg) { kunmap(pg); unlock_page(pg); put_page(pg); } } kfree(pages_disk); out2: #ifdef CONFIG_NTFS3_LZX_XPRESS if (run != &ni->file.run) run_free(run); #endif out1: vunmap(frame_mem); out: for (i = 0; i < pages_per_frame; i++) { pg = pages[i]; kunmap(pg); SetPageUptodate(pg); } return err; } /* * ni_write_frame * * Pages - Array of locked pages. */ int ni_write_frame(struct ntfs_inode *ni, struct page **pages, u32 pages_per_frame) { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; struct folio *folio = page_folio(pages[0]); u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT; u64 frame_vbo = folio_pos(folio); CLST frame = frame_vbo >> frame_bits; char *frame_ondisk = NULL; struct page **pages_disk = NULL; struct ATTR_LIST_ENTRY *le = NULL; char *frame_mem; struct ATTRIB *attr; struct mft_inode *mi; u32 i; struct page *pg; size_t compr_size, ondisk_size; struct lznt *lznt; attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) { err = -ENOENT; goto out; } if (WARN_ON(!is_attr_compressed(attr))) { err = -EINVAL; goto out; } if (sbi->cluster_size > NTFS_LZNT_MAX_CLUSTER) { err = -EOPNOTSUPP; goto out; } if (!attr->non_res) { down_write(&ni->file.run_lock); err = attr_make_nonresident(ni, attr, le, mi, le32_to_cpu(attr->res.data_size), &ni->file.run, &attr, pages[0]); up_write(&ni->file.run_lock); if (err) goto out; } if (attr->nres.c_unit != NTFS_LZNT_CUNIT) { err = -EOPNOTSUPP; goto out; } pages_disk = kcalloc(pages_per_frame, sizeof(struct page *), GFP_NOFS); if (!pages_disk) { err = -ENOMEM; goto out; } for (i = 0; i < pages_per_frame; i++) { pg = alloc_page(GFP_KERNEL); if (!pg) { err = -ENOMEM; goto out1; } pages_disk[i] = pg; lock_page(pg); kmap(pg); } /* To simplify compress algorithm do vmap for source and target pages. */ frame_ondisk = vmap(pages_disk, pages_per_frame, VM_MAP, PAGE_KERNEL); if (!frame_ondisk) { err = -ENOMEM; goto out1; } for (i = 0; i < pages_per_frame; i++) kmap(pages[i]); /* Map in-memory frame for read-only. */ frame_mem = vmap(pages, pages_per_frame, VM_MAP, PAGE_KERNEL_RO); if (!frame_mem) { err = -ENOMEM; goto out2; } mutex_lock(&sbi->compress.mtx_lznt); lznt = NULL; if (!sbi->compress.lznt) { /* * LZNT implements two levels of compression: * 0 - Standard compression * 1 - Best compression, requires a lot of cpu * use mount option? */ lznt = get_lznt_ctx(0); if (!lznt) { mutex_unlock(&sbi->compress.mtx_lznt); err = -ENOMEM; goto out3; } sbi->compress.lznt = lznt; lznt = NULL; } /* Compress: frame_mem -> frame_ondisk */ compr_size = compress_lznt(frame_mem, frame_size, frame_ondisk, frame_size, sbi->compress.lznt); mutex_unlock(&sbi->compress.mtx_lznt); kfree(lznt); if (compr_size + sbi->cluster_size > frame_size) { /* Frame is not compressed. */ compr_size = frame_size; ondisk_size = frame_size; } else if (compr_size) { /* Frame is compressed. */ ondisk_size = ntfs_up_cluster(sbi, compr_size); memset(frame_ondisk + compr_size, 0, ondisk_size - compr_size); } else { /* Frame is sparsed. */ ondisk_size = 0; } down_write(&ni->file.run_lock); run_truncate_around(&ni->file.run, le64_to_cpu(attr->nres.svcn)); err = attr_allocate_frame(ni, frame, compr_size, ni->i_valid); up_write(&ni->file.run_lock); if (err) goto out2; if (!ondisk_size) goto out2; down_read(&ni->file.run_lock); err = ntfs_bio_pages(sbi, &ni->file.run, ondisk_size < frame_size ? pages_disk : pages, pages_per_frame, frame_vbo, ondisk_size, REQ_OP_WRITE); up_read(&ni->file.run_lock); out3: vunmap(frame_mem); out2: for (i = 0; i < pages_per_frame; i++) kunmap(pages[i]); vunmap(frame_ondisk); out1: for (i = 0; i < pages_per_frame; i++) { pg = pages_disk[i]; if (pg) { kunmap(pg); unlock_page(pg); put_page(pg); } } kfree(pages_disk); out: return err; } /* * ni_remove_name - Removes name 'de' from MFT and from directory. * 'de2' and 'undo_step' are used to restore MFT/dir, if error occurs. */ int ni_remove_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE **de2, int *undo_step) { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1); struct ATTR_FILE_NAME *fname; struct ATTR_LIST_ENTRY *le; struct mft_inode *mi; u16 de_key_size = le16_to_cpu(de->key_size); u8 name_type; *undo_step = 0; /* Find name in record. */ mi_get_ref(&dir_ni->mi, &de_name->home); fname = ni_fname_name(ni, (struct le_str *)&de_name->name_len, &de_name->home, &mi, &le); if (!fname) return -ENOENT; memcpy(&de_name->dup, &fname->dup, sizeof(struct NTFS_DUP_INFO)); name_type = paired_name(fname->type); /* Mark ntfs as dirty. It will be cleared at umount. */ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); /* Step 1: Remove name from directory. */ err = indx_delete_entry(&dir_ni->dir, dir_ni, fname, de_key_size, sbi); if (err) return err; /* Step 2: Remove name from MFT. */ ni_remove_attr_le(ni, attr_from_name(fname), mi, le); *undo_step = 2; /* Get paired name. */ fname = ni_fname_type(ni, name_type, &mi, &le); if (fname) { u16 de2_key_size = fname_full_size(fname); *de2 = Add2Ptr(de, 1024); (*de2)->key_size = cpu_to_le16(de2_key_size); memcpy(*de2 + 1, fname, de2_key_size); /* Step 3: Remove paired name from directory. */ err = indx_delete_entry(&dir_ni->dir, dir_ni, fname, de2_key_size, sbi); if (err) return err; /* Step 4: Remove paired name from MFT. */ ni_remove_attr_le(ni, attr_from_name(fname), mi, le); *undo_step = 4; } return 0; } /* * ni_remove_name_undo - Paired function for ni_remove_name. * * Return: True if ok */ bool ni_remove_name_undo(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *de2, int undo_step) { struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr; u16 de_key_size; switch (undo_step) { case 4: de_key_size = le16_to_cpu(de2->key_size); if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr, NULL, NULL)) return false; memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de2 + 1, de_key_size); mi_get_ref(&ni->mi, &de2->ref); de2->size = cpu_to_le16(ALIGN(de_key_size, 8) + sizeof(struct NTFS_DE)); de2->flags = 0; de2->res = 0; if (indx_insert_entry(&dir_ni->dir, dir_ni, de2, sbi, NULL, 1)) return false; fallthrough; case 2: de_key_size = le16_to_cpu(de->key_size); if (ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr, NULL, NULL)) return false; memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de + 1, de_key_size); mi_get_ref(&ni->mi, &de->ref); if (indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 1)) return false; } return true; } /* * ni_add_name - Add new name into MFT and into directory. */ int ni_add_name(struct ntfs_inode *dir_ni, struct ntfs_inode *ni, struct NTFS_DE *de) { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTRIB *attr; struct ATTR_LIST_ENTRY *le; struct mft_inode *mi; struct ATTR_FILE_NAME *fname; struct ATTR_FILE_NAME *de_name = (struct ATTR_FILE_NAME *)(de + 1); u16 de_key_size = le16_to_cpu(de->key_size); if (sbi->options->windows_names && !valid_windows_name(sbi, (struct le_str *)&de_name->name_len)) return -EINVAL; /* If option "hide_dot_files" then set hidden attribute for dot files. */ if (ni->mi.sbi->options->hide_dot_files) { if (de_name->name_len > 0 && le16_to_cpu(de_name->name[0]) == '.') ni->std_fa |= FILE_ATTRIBUTE_HIDDEN; else ni->std_fa &= ~FILE_ATTRIBUTE_HIDDEN; } mi_get_ref(&ni->mi, &de->ref); mi_get_ref(&dir_ni->mi, &de_name->home); /* Fill duplicate from any ATTR_NAME. */ fname = ni_fname_name(ni, NULL, NULL, NULL, NULL); if (fname) memcpy(&de_name->dup, &fname->dup, sizeof(fname->dup)); de_name->dup.fa = ni->std_fa; /* Insert new name into MFT. */ err = ni_insert_resident(ni, de_key_size, ATTR_NAME, NULL, 0, &attr, &mi, &le); if (err) return err; memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), de_name, de_key_size); /* Insert new name into directory. */ err = indx_insert_entry(&dir_ni->dir, dir_ni, de, sbi, NULL, 0); if (err) ni_remove_attr_le(ni, attr, mi, le); return err; } /* * ni_rename - Remove one name and insert new name. */ int ni_rename(struct ntfs_inode *dir_ni, struct ntfs_inode *new_dir_ni, struct ntfs_inode *ni, struct NTFS_DE *de, struct NTFS_DE *new_de, bool *is_bad) { int err; struct NTFS_DE *de2 = NULL; int undo = 0; /* * There are two possible ways to rename: * 1) Add new name and remove old name. * 2) Remove old name and add new name. * * In most cases (not all!) adding new name into MFT and into directory can * allocate additional cluster(s). * Second way may result to bad inode if we can't add new name * and then can't restore (add) old name. */ /* * Way 1 - Add new + remove old. */ err = ni_add_name(new_dir_ni, ni, new_de); if (!err) { err = ni_remove_name(dir_ni, ni, de, &de2, &undo); if (err && ni_remove_name(new_dir_ni, ni, new_de, &de2, &undo)) *is_bad = true; } /* * Way 2 - Remove old + add new. */ /* * err = ni_remove_name(dir_ni, ni, de, &de2, &undo); * if (!err) { * err = ni_add_name(new_dir_ni, ni, new_de); * if (err && !ni_remove_name_undo(dir_ni, ni, de, de2, undo)) * *is_bad = true; * } */ return err; } /* * ni_is_dirty - Return: True if 'ni' requires ni_write_inode. */ bool ni_is_dirty(struct inode *inode) { struct ntfs_inode *ni = ntfs_i(inode); struct rb_node *node; if (ni->mi.dirty || ni->attr_list.dirty || (ni->ni_flags & NI_FLAG_UPDATE_PARENT)) return true; for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) { if (rb_entry(node, struct mft_inode, node)->dirty) return true; } return false; } /* * ni_update_parent * * Update duplicate info of ATTR_FILE_NAME in MFT and in parent directories. */ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup, int sync) { struct ATTRIB *attr; struct mft_inode *mi; struct ATTR_LIST_ENTRY *le = NULL; struct ntfs_sb_info *sbi = ni->mi.sbi; struct super_block *sb = sbi->sb; bool re_dirty = false; if (ni->mi.mrec->flags & RECORD_FLAG_DIR) { dup->fa |= FILE_ATTRIBUTE_DIRECTORY; attr = NULL; dup->alloc_size = 0; dup->data_size = 0; } else { dup->fa &= ~FILE_ATTRIBUTE_DIRECTORY; attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) { dup->alloc_size = dup->data_size = 0; } else if (!attr->non_res) { u32 data_size = le32_to_cpu(attr->res.data_size); dup->alloc_size = cpu_to_le64(ALIGN(data_size, 8)); dup->data_size = cpu_to_le64(data_size); } else { u64 new_valid = ni->i_valid; u64 data_size = le64_to_cpu(attr->nres.data_size); __le64 valid_le; dup->alloc_size = is_attr_ext(attr) ? attr->nres.total_size : attr->nres.alloc_size; dup->data_size = attr->nres.data_size; if (new_valid > data_size) new_valid = data_size; valid_le = cpu_to_le64(new_valid); if (valid_le != attr->nres.valid_size) { attr->nres.valid_size = valid_le; mi->dirty = true; } } } /* TODO: Fill reparse info. */ dup->reparse = 0; dup->ea_size = 0; if (ni->ni_flags & NI_FLAG_EA) { attr = ni_find_attr(ni, attr, &le, ATTR_EA_INFO, NULL, 0, NULL, NULL); if (attr) { const struct EA_INFO *info; info = resident_data_ex(attr, sizeof(struct EA_INFO)); /* If ATTR_EA_INFO exists 'info' can't be NULL. */ if (info) dup->ea_size = info->size_pack; } } attr = NULL; le = NULL; while ((attr = ni_find_attr(ni, attr, &le, ATTR_NAME, NULL, 0, NULL, &mi))) { struct inode *dir; struct ATTR_FILE_NAME *fname; fname = resident_data_ex(attr, SIZEOF_ATTRIBUTE_FILENAME); if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup))) continue; /* Check simple case when parent inode equals current inode. */ if (ino_get(&fname->home) == ni->vfs_inode.i_ino) { ntfs_set_state(sbi, NTFS_DIRTY_ERROR); continue; } /* ntfs_iget5 may sleep. */ dir = ntfs_iget5(sb, &fname->home, NULL); if (IS_ERR(dir)) { ntfs_inode_warn( &ni->vfs_inode, "failed to open parent directory r=%lx to update", (long)ino_get(&fname->home)); continue; } if (!is_bad_inode(dir)) { struct ntfs_inode *dir_ni = ntfs_i(dir); if (!ni_trylock(dir_ni)) { re_dirty = true; } else { indx_update_dup(dir_ni, sbi, fname, dup, sync); ni_unlock(dir_ni); memcpy(&fname->dup, dup, sizeof(fname->dup)); mi->dirty = true; } } iput(dir); } return re_dirty; } /* * ni_write_inode - Write MFT base record and all subrecords to disk. */ int ni_write_inode(struct inode *inode, int sync, const char *hint) { int err = 0, err2; struct ntfs_inode *ni = ntfs_i(inode); struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; bool re_dirty = false; struct ATTR_STD_INFO *std; struct rb_node *node, *next; struct NTFS_DUP_INFO dup; if (is_bad_inode(inode) || sb_rdonly(sb)) return 0; if (unlikely(ntfs3_forced_shutdown(sb))) return -EIO; if (!ni_trylock(ni)) { /* 'ni' is under modification, skip for now. */ mark_inode_dirty_sync(inode); return 0; } if (!ni->mi.mrec) goto out; if (is_rec_inuse(ni->mi.mrec) && !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING) && inode->i_nlink) { bool modified = false; struct timespec64 ts; /* Update times in standard attribute. */ std = ni_std(ni); if (!std) { err = -EINVAL; goto out; } /* Update the access times if they have changed. */ ts = inode_get_mtime(inode); dup.m_time = kernel2nt(&ts); if (std->m_time != dup.m_time) { std->m_time = dup.m_time; modified = true; } ts = inode_get_ctime(inode); dup.c_time = kernel2nt(&ts); if (std->c_time != dup.c_time) { std->c_time = dup.c_time; modified = true; } ts = inode_get_atime(inode); dup.a_time = kernel2nt(&ts); if (std->a_time != dup.a_time) { std->a_time = dup.a_time; modified = true; } dup.fa = ni->std_fa; if (std->fa != dup.fa) { std->fa = dup.fa; modified = true; } /* std attribute is always in primary MFT record. */ if (modified) ni->mi.dirty = true; if (!ntfs_is_meta_file(sbi, inode->i_ino) && (modified || (ni->ni_flags & NI_FLAG_UPDATE_PARENT)) /* Avoid __wait_on_freeing_inode(inode). */ && (sb->s_flags & SB_ACTIVE)) { dup.cr_time = std->cr_time; /* Not critical if this function fail. */ re_dirty = ni_update_parent(ni, &dup, sync); if (re_dirty) ni->ni_flags |= NI_FLAG_UPDATE_PARENT; else ni->ni_flags &= ~NI_FLAG_UPDATE_PARENT; } /* Update attribute list. */ if (ni->attr_list.size && ni->attr_list.dirty) { if (inode->i_ino != MFT_REC_MFT || sync) { err = ni_try_remove_attr_list(ni); if (err) goto out; } err = al_update(ni, sync); if (err) goto out; } } for (node = rb_first(&ni->mi_tree); node; node = next) { struct mft_inode *mi = rb_entry(node, struct mft_inode, node); bool is_empty; next = rb_next(node); if (!mi->dirty) continue; is_empty = !mi_enum_attr(ni, mi, NULL); if (is_empty) clear_rec_inuse(mi->mrec); err2 = mi_write(mi, sync); if (!err && err2) err = err2; if (is_empty) { ntfs_mark_rec_free(sbi, mi->rno, false); rb_erase(node, &ni->mi_tree); mi_put(mi); } } if (ni->mi.dirty) { err2 = mi_write(&ni->mi, sync); if (!err && err2) err = err2; } out: ni_unlock(ni); if (err) { ntfs_inode_err(inode, "%s failed, %d.", hint, err); ntfs_set_state(sbi, NTFS_DIRTY_ERROR); return err; } if (re_dirty) mark_inode_dirty_sync(inode); return 0; } /* * ni_set_compress * * Helper for 'ntfs_fileattr_set'. * Changes compression for empty files and directories only. */ int ni_set_compress(struct inode *inode, bool compr) { int err; struct ntfs_inode *ni = ntfs_i(inode); struct ATTR_STD_INFO *std; const char *bad_inode; if (is_compressed(ni) == !!compr) return 0; if (is_sparsed(ni)) { /* sparse and compress not compatible. */ return -EOPNOTSUPP; } if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) { /*Skip other inodes. (symlink,fifo,...) */ return -EOPNOTSUPP; } bad_inode = NULL; ni_lock(ni); std = ni_std(ni); if (!std) { bad_inode = "no std"; goto out; } if (S_ISREG(inode->i_mode)) { err = attr_set_compress(ni, compr); if (err) { if (err == -ENOENT) { /* Fix on the fly? */ /* Each file must contain data attribute. */ bad_inode = "no data attribute"; } goto out; } } ni->std_fa = std->fa; if (compr) { std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE; std->fa |= FILE_ATTRIBUTE_COMPRESSED; } else { std->fa &= ~FILE_ATTRIBUTE_COMPRESSED; } if (ni->std_fa != std->fa) { ni->std_fa = std->fa; ni->mi.dirty = true; } /* update duplicate information and directory entries in ni_write_inode.*/ ni->ni_flags |= NI_FLAG_UPDATE_PARENT; err = 0; out: ni_unlock(ni); if (bad_inode) { ntfs_bad_inode(inode, bad_inode); err = -EINVAL; } return err; }
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat Savu driver for Linux * * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* Roccat Savu is a gamer mouse with macro keys that can be configured in * 5 profiles. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" #include "hid-roccat-savu.h" ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(buttons, 0x7, 0x2f); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(macro, 0x8, 0x0823); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x9, 0x08); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0xc, 0x04); static const struct bin_attribute *const savu_bin_attrs[] = { &bin_attr_control, &bin_attr_profile, &bin_attr_general, &bin_attr_buttons, &bin_attr_macro, &bin_attr_info, &bin_attr_sensor, NULL, }; static const struct attribute_group savu_group = { .bin_attrs_new = savu_bin_attrs, }; static const struct attribute_group *savu_groups[] = { &savu_group, NULL, }; static const struct class savu_class = { .name = "savu", .dev_groups = savu_groups, }; static int savu_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct roccat_common2_device *savu; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) { hid_set_drvdata(hdev, NULL); return 0; } savu = kzalloc(sizeof(*savu), GFP_KERNEL); if (!savu) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, savu); retval = roccat_common2_device_init_struct(usb_dev, savu); if (retval) { hid_err(hdev, "couldn't init Savu device\n"); goto exit_free; } retval = roccat_connect(&savu_class, hdev, sizeof(struct savu_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { savu->chrdev_minor = retval; savu->roccat_claimed = 1; } return 0; exit_free: kfree(savu); return retval; } static void savu_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *savu; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return; savu = hid_get_drvdata(hdev); if (savu->roccat_claimed) roccat_disconnect(savu->chrdev_minor); kfree(savu); } static int savu_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = savu_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void savu_remove(struct hid_device *hdev) { savu_remove_specials(hdev); hid_hw_stop(hdev); } static void savu_report_to_chrdev(struct roccat_common2_device const *savu, u8 const *data) { struct savu_roccat_report roccat_report; struct savu_mouse_report_special const *special_report; if (data[0] != SAVU_MOUSE_REPORT_NUMBER_SPECIAL) return; special_report = (struct savu_mouse_report_special const *)data; roccat_report.type = special_report->type; roccat_report.data[0] = special_report->data[0]; roccat_report.data[1] = special_report->data[1]; roccat_report_event(savu->chrdev_minor, (uint8_t const *)&roccat_report); } static int savu_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *savu = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (savu == NULL) return 0; if (savu->roccat_claimed) savu_report_to_chrdev(savu, data); return 0; } static const struct hid_device_id savu_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) }, { } }; MODULE_DEVICE_TABLE(hid, savu_devices); static struct hid_driver savu_driver = { .name = "savu", .id_table = savu_devices, .probe = savu_probe, .remove = savu_remove, .raw_event = savu_raw_event }; static int __init savu_init(void) { int retval; retval = class_register(&savu_class); if (retval) return retval; retval = hid_register_driver(&savu_driver); if (retval) class_unregister(&savu_class); return retval; } static void __exit savu_exit(void) { hid_unregister_driver(&savu_driver); class_unregister(&savu_class); } module_init(savu_init); module_exit(savu_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat Savu driver"); MODULE_LICENSE("GPL v2");
61 41 41 41 19 19 66 35 35 29 28 6 26 2 25 27 29 32 32 8 22 18 11 1 27 2 8 45 56 56 53 25 29 3 3 30 1 1 4 24 3 1 1 1 1 4 1 1 1 1 1 1 3 3 3 3 3 3 11 1 1 3 3 3 5 1 1 3 12 1 11 11 11 7 6 1 2 1 2 6 6 1 6 1 1 1 6 1 1 5 4 1 1 1 1 11 1 4 31 3 4 2 5 13 6 1 1 5 1 1 1 1 91 89 1 90 89 58 29 1 89 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 /* * Copyright (c) 2014, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "bearer.h" #include "link.h" #include "name_table.h" #include "socket.h" #include "node.h" #include "net.h" #include <net/genetlink.h> #include <linux/string_helpers.h> #include <linux/tipc_config.h> /* The legacy API had an artificial message length limit called * ULTRA_STRING_MAX_LEN. */ #define ULTRA_STRING_MAX_LEN 32768 #define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN) #define REPLY_TRUNCATED "<truncated>\n" struct tipc_nl_compat_msg { u16 cmd; int rep_type; int rep_size; int req_type; int req_size; struct net *net; struct sk_buff *rep; struct tlv_desc *req; struct sock *dst_sk; }; struct tipc_nl_compat_cmd_dump { int (*header)(struct tipc_nl_compat_msg *); int (*dumpit)(struct sk_buff *, struct netlink_callback *); int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs); }; struct tipc_nl_compat_cmd_doit { int (*doit)(struct sk_buff *skb, struct genl_info *info); int (*transcode)(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg); }; static int tipc_skb_tailroom(struct sk_buff *skb) { int tailroom; int limit; tailroom = skb_tailroom(skb); limit = TIPC_SKB_MAX - skb->len; if (tailroom < limit) return tailroom; return limit; } static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv) { return TLV_GET_LEN(tlv) - TLV_SPACE(0); } static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) { struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); if (tipc_skb_tailroom(skb) < TLV_SPACE(len)) return -EMSGSIZE; skb_put(skb, TLV_SPACE(len)); memset(tlv, 0, TLV_SPACE(len)); tlv->tlv_type = htons(type); tlv->tlv_len = htons(TLV_LENGTH(len)); if (len && data) memcpy(TLV_DATA(tlv), data, len); return 0; } static void tipc_tlv_init(struct sk_buff *skb, u16 type) { struct tlv_desc *tlv = (struct tlv_desc *)skb->data; TLV_SET_LEN(tlv, 0); TLV_SET_TYPE(tlv, type); skb_put(skb, sizeof(struct tlv_desc)); } static __printf(2, 3) int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...) { int n; u16 len; u32 rem; char *buf; struct tlv_desc *tlv; va_list args; rem = tipc_skb_tailroom(skb); tlv = (struct tlv_desc *)skb->data; len = TLV_GET_LEN(tlv); buf = TLV_DATA(tlv) + len; va_start(args, fmt); n = vscnprintf(buf, rem, fmt, args); va_end(args); TLV_SET_LEN(tlv, n + len); skb_put(skb, n); return n; } static struct sk_buff *tipc_tlv_alloc(int size) { int hdr_len; struct sk_buff *buf; size = TLV_SPACE(size); hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); buf = alloc_skb(hdr_len + size, GFP_KERNEL); if (!buf) return NULL; skb_reserve(buf, hdr_len); return buf; } static struct sk_buff *tipc_get_err_tlv(char *str) { int str_len = strlen(str) + 1; struct sk_buff *buf; buf = tipc_tlv_alloc(str_len); if (buf) tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len); return buf; } static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg, struct sk_buff *arg) { struct genl_dumpit_info info; int len = 0; int err; struct sk_buff *buf; struct nlmsghdr *nlmsg; struct netlink_callback cb; struct nlattr **attrbuf; memset(&cb, 0, sizeof(cb)); cb.nlh = (struct nlmsghdr *)arg->data; cb.skb = arg; cb.data = &info; buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!buf) return -ENOMEM; buf->sk = msg->dst_sk; if (__tipc_dump_start(&cb, msg->net)) { kfree_skb(buf); return -ENOMEM; } attrbuf = kcalloc(tipc_genl_family.maxattr + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!attrbuf) { err = -ENOMEM; goto err_out; } info.info.attrs = attrbuf; if (nlmsg_len(cb.nlh) > 0) { err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf, tipc_genl_family.maxattr, tipc_genl_family.policy, NULL); if (err) goto err_out; } do { int rem; len = (*cmd->dumpit)(buf, &cb); nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) { err = nlmsg_parse_deprecated(nlmsg, GENL_HDRLEN, attrbuf, tipc_genl_family.maxattr, tipc_genl_family.policy, NULL); if (err) goto err_out; err = (*cmd->format)(msg, attrbuf); if (err) goto err_out; if (tipc_skb_tailroom(msg->rep) <= 1) { err = -EMSGSIZE; goto err_out; } } skb_reset_tail_pointer(buf); buf->len = 0; } while (len); err = 0; err_out: kfree(attrbuf); tipc_dump_done(&cb); kfree_skb(buf); if (err == -EMSGSIZE) { /* The legacy API only considered messages filling * "ULTRA_STRING_MAX_LEN" to be truncated. */ if ((TIPC_SKB_MAX - msg->rep->len) <= 1) { char *tail = skb_tail_pointer(msg->rep); if (*tail != '\0') sprintf(tail - sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED); } return 0; } return err; } static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, struct tipc_nl_compat_msg *msg) { struct nlmsghdr *nlh; struct sk_buff *arg; int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) return -EINVAL; msg->rep = tipc_tlv_alloc(msg->rep_size); if (!msg->rep) return -ENOMEM; if (msg->rep_type) tipc_tlv_init(msg->rep, msg->rep_type); if (cmd->header) { err = (*cmd->header)(msg); if (err) { kfree_skb(msg->rep); msg->rep = NULL; return err; } } arg = nlmsg_new(0, GFP_KERNEL); if (!arg) { kfree_skb(msg->rep); msg->rep = NULL; return -ENOMEM; } nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI); if (!nlh) { kfree_skb(arg); kfree_skb(msg->rep); msg->rep = NULL; return -EMSGSIZE; } nlmsg_end(arg, nlh); err = __tipc_nl_compat_dumpit(cmd, msg, arg); if (err) { kfree_skb(msg->rep); msg->rep = NULL; } kfree_skb(arg); return err; } static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, struct tipc_nl_compat_msg *msg) { int err; struct sk_buff *doit_buf; struct sk_buff *trans_buf; struct nlattr **attrbuf; struct genl_info info; trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!trans_buf) return -ENOMEM; attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!attrbuf) { err = -ENOMEM; goto trans_out; } doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!doit_buf) { err = -ENOMEM; goto attrbuf_out; } memset(&info, 0, sizeof(info)); info.attrs = attrbuf; rtnl_lock(); err = (*cmd->transcode)(cmd, trans_buf, msg); if (err) goto doit_out; err = nla_parse_deprecated(attrbuf, tipc_genl_family.maxattr, (const struct nlattr *)trans_buf->data, trans_buf->len, NULL, NULL); if (err) goto doit_out; doit_buf->sk = msg->dst_sk; err = (*cmd->doit)(doit_buf, &info); doit_out: rtnl_unlock(); kfree_skb(doit_buf); attrbuf_out: kfree(attrbuf); trans_out: kfree_skb(trans_buf); return err; } static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd, struct tipc_nl_compat_msg *msg) { int err; if (msg->req_type && (!msg->req_size || !TLV_CHECK_TYPE(msg->req, msg->req_type))) return -EINVAL; err = __tipc_nl_compat_doit(cmd, msg); if (err) return err; /* The legacy API considered an empty message a success message */ msg->rep = tipc_tlv_alloc(0); if (!msg->rep) return -ENOMEM; return 0; } static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; int err; if (!attrs[TIPC_NLA_BEARER]) return -EINVAL; err = nla_parse_nested_deprecated(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], NULL, NULL); if (err) return err; return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, nla_data(bearer[TIPC_NLA_BEARER_NAME]), nla_len(bearer[TIPC_NLA_BEARER_NAME])); } static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { struct nlattr *prop; struct nlattr *bearer; struct tipc_bearer_config *b; int len; b = (struct tipc_bearer_config *)TLV_DATA(msg->req); bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER); if (!bearer) return -EMSGSIZE; len = TLV_GET_DATA_LEN(msg->req); len -= offsetof(struct tipc_bearer_config, name); if (len <= 0) return -EINVAL; len = min_t(int, len, TIPC_MAX_BEARER_NAME); if (!string_is_terminated(b->name, len)) return -EINVAL; if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) return -EMSGSIZE; if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain))) return -EMSGSIZE; if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) { prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP); if (!prop) return -EMSGSIZE; if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority))) return -EMSGSIZE; nla_nest_end(skb, prop); } nla_nest_end(skb, bearer); return 0; } static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { char *name; struct nlattr *bearer; int len; name = (char *)TLV_DATA(msg->req); bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER); if (!bearer) return -EMSGSIZE; len = TLV_GET_DATA_LEN(msg->req); if (len <= 0) return -EINVAL; len = min_t(int, len, TIPC_MAX_BEARER_NAME); if (!string_is_terminated(name, len)) return -EINVAL; if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) return -EMSGSIZE; nla_nest_end(skb, bearer); return 0; } static inline u32 perc(u32 count, u32 total) { return (count * 100 + (total / 2)) / total; } static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg, struct nlattr *prop[], struct nlattr *stats[]) { tipc_tlv_sprintf(msg->rep, " Window:%u packets\n", nla_get_u32(prop[TIPC_NLA_PROP_WIN])); tipc_tlv_sprintf(msg->rep, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]), nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES])); tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED])); tipc_tlv_sprintf(msg->rep, " Congestion link:%u Send queue max:%u avg:%u", nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]), nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]), nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE])); } static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { char *name; struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; int err; int len; if (!attrs[TIPC_NLA_LINK]) return -EINVAL; err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL, NULL); if (err) return err; if (!link[TIPC_NLA_LINK_PROP]) return -EINVAL; err = nla_parse_nested_deprecated(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], NULL, NULL); if (err) return err; if (!link[TIPC_NLA_LINK_STATS]) return -EINVAL; err = nla_parse_nested_deprecated(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], NULL, NULL); if (err) return err; name = (char *)TLV_DATA(msg->req); len = TLV_GET_DATA_LEN(msg->req); if (len <= 0) return -EINVAL; len = min_t(int, len, TIPC_MAX_LINK_NAME); if (!string_is_terminated(name, len)) return -EINVAL; if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) return 0; tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n", (char *)nla_data(link[TIPC_NLA_LINK_NAME])); if (link[TIPC_NLA_LINK_BROADCAST]) { __fill_bc_link_stat(msg, prop, stats); return 0; } if (link[TIPC_NLA_LINK_ACTIVE]) tipc_tlv_sprintf(msg->rep, " ACTIVE"); else if (link[TIPC_NLA_LINK_UP]) tipc_tlv_sprintf(msg->rep, " STANDBY"); else tipc_tlv_sprintf(msg->rep, " DEFUNCT"); tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u", nla_get_u32(link[TIPC_NLA_LINK_MTU]), nla_get_u32(prop[TIPC_NLA_PROP_PRIO])); tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n", nla_get_u32(prop[TIPC_NLA_PROP_TOL]), nla_get_u32(prop[TIPC_NLA_PROP_WIN])); tipc_tlv_sprintf(msg->rep, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(link[TIPC_NLA_LINK_RX]) - nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", nla_get_u32(link[TIPC_NLA_LINK_TX]) - nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED])); tipc_tlv_sprintf(msg->rep, " TX profile sample:%u packets average:%u octets\n", nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) / nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])); tipc_tlv_sprintf(msg->rep, " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ", perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n", perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])), perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]), nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]))); tipc_tlv_sprintf(msg->rep, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]), nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]), nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES])); tipc_tlv_sprintf(msg->rep, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n", nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]), nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]), nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]), nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED])); tipc_tlv_sprintf(msg->rep, " Congestion link:%u Send queue max:%u avg:%u", nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]), nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]), nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE])); return 0; } static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; struct tipc_link_info link_info; int err; if (!attrs[TIPC_NLA_LINK]) return -EINVAL; err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL, NULL); if (err) return err; link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME], TIPC_MAX_LINK_NAME); return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); } static int __tipc_add_link_prop(struct sk_buff *skb, struct tipc_nl_compat_msg *msg, struct tipc_link_config *lc) { switch (msg->cmd) { case TIPC_CMD_SET_LINK_PRI: return nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)); case TIPC_CMD_SET_LINK_TOL: return nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)); case TIPC_CMD_SET_LINK_WINDOW: return nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)); } return -EINVAL; } static int tipc_nl_compat_media_set(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { struct nlattr *prop; struct nlattr *media; struct tipc_link_config *lc; lc = (struct tipc_link_config *)TLV_DATA(msg->req); media = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA); if (!media) return -EMSGSIZE; if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) return -EMSGSIZE; prop = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA_PROP); if (!prop) return -EMSGSIZE; __tipc_add_link_prop(skb, msg, lc); nla_nest_end(skb, prop); nla_nest_end(skb, media); return 0; } static int tipc_nl_compat_bearer_set(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { struct nlattr *prop; struct nlattr *bearer; struct tipc_link_config *lc; lc = (struct tipc_link_config *)TLV_DATA(msg->req); bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER); if (!bearer) return -EMSGSIZE; if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) return -EMSGSIZE; prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP); if (!prop) return -EMSGSIZE; __tipc_add_link_prop(skb, msg, lc); nla_nest_end(skb, prop); nla_nest_end(skb, bearer); return 0; } static int __tipc_nl_compat_link_set(struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { struct nlattr *prop; struct nlattr *link; struct tipc_link_config *lc; lc = (struct tipc_link_config *)TLV_DATA(msg->req); link = nla_nest_start_noflag(skb, TIPC_NLA_LINK); if (!link) return -EMSGSIZE; if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name)) return -EMSGSIZE; prop = nla_nest_start_noflag(skb, TIPC_NLA_LINK_PROP); if (!prop) return -EMSGSIZE; __tipc_add_link_prop(skb, msg, lc); nla_nest_end(skb, prop); nla_nest_end(skb, link); return 0; } static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { struct tipc_link_config *lc; struct tipc_bearer *bearer; struct tipc_media *media; int len; lc = (struct tipc_link_config *)TLV_DATA(msg->req); len = TLV_GET_DATA_LEN(msg->req); len -= offsetof(struct tipc_link_config, name); if (len <= 0) return -EINVAL; len = min_t(int, len, TIPC_MAX_LINK_NAME); if (!string_is_terminated(lc->name, len)) return -EINVAL; media = tipc_media_find(lc->name); if (media) { cmd->doit = &__tipc_nl_media_set; return tipc_nl_compat_media_set(skb, msg); } bearer = tipc_bearer_find(msg->net, lc->name); if (bearer) { cmd->doit = &__tipc_nl_bearer_set; return tipc_nl_compat_bearer_set(skb, msg); } return __tipc_nl_compat_link_set(skb, msg); } static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { char *name; struct nlattr *link; int len; name = (char *)TLV_DATA(msg->req); link = nla_nest_start_noflag(skb, TIPC_NLA_LINK); if (!link) return -EMSGSIZE; len = TLV_GET_DATA_LEN(msg->req); if (len <= 0) return -EINVAL; len = min_t(int, len, TIPC_MAX_LINK_NAME); if (!string_is_terminated(name, len)) return -EINVAL; if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) return -EMSGSIZE; nla_nest_end(skb, link); return 0; } static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg) { int i; u32 depth; struct tipc_name_table_query *ntq; static const char * const header[] = { "Type ", "Lower Upper ", "Port Identity ", "Publication Scope" }; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query)) return -EINVAL; depth = ntohl(ntq->depth); if (depth > 4) depth = 4; for (i = 0; i < depth; i++) tipc_tlv_sprintf(msg->rep, header[i]); tipc_tlv_sprintf(msg->rep, "\n"); return 0; } static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { char port_str[27]; struct tipc_name_table_query *ntq; struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1]; struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; u32 node, depth, type, lowbound, upbound; static const char * const scope_str[] = {"", " zone", " cluster", " node"}; int err; if (!attrs[TIPC_NLA_NAME_TABLE]) return -EINVAL; err = nla_parse_nested_deprecated(nt, TIPC_NLA_NAME_TABLE_MAX, attrs[TIPC_NLA_NAME_TABLE], NULL, NULL); if (err) return err; if (!nt[TIPC_NLA_NAME_TABLE_PUBL]) return -EINVAL; err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], NULL, NULL); if (err) return err; ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); depth = ntohl(ntq->depth); type = ntohl(ntq->type); lowbound = ntohl(ntq->lowbound); upbound = ntohl(ntq->upbound); if (!(depth & TIPC_NTQ_ALLTYPES) && (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]))) return 0; if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]))) return 0; if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]))) return 0; tipc_tlv_sprintf(msg->rep, "%-10u ", nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])); if (depth == 1) goto out; tipc_tlv_sprintf(msg->rep, "%-10u %-10u ", nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]), nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])); if (depth == 2) goto out; node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]); sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node), tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF])); tipc_tlv_sprintf(msg->rep, "%-26s ", port_str); if (depth == 3) goto out; tipc_tlv_sprintf(msg->rep, "%-10u %s", nla_get_u32(publ[TIPC_NLA_PUBL_KEY]), scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]); out: tipc_tlv_sprintf(msg->rep, "\n"); return 0; } static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { u32 type, lower, upper; struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; int err; if (!attrs[TIPC_NLA_PUBL]) return -EINVAL; err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL, NULL); if (err) return err; type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]); if (lower == upper) tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower); else tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper); return 0; } static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock) { int err; void *hdr; struct nlattr *nest; struct sk_buff *args; struct tipc_nl_compat_cmd_dump dump; args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!args) return -ENOMEM; hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); if (!hdr) { kfree_skb(args); return -EMSGSIZE; } nest = nla_nest_start_noflag(args, TIPC_NLA_SOCK); if (!nest) { kfree_skb(args); return -EMSGSIZE; } if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) { kfree_skb(args); return -EMSGSIZE; } nla_nest_end(args, nest); genlmsg_end(args, hdr); dump.dumpit = tipc_nl_publ_dump; dump.format = __tipc_nl_compat_publ_dump; err = __tipc_nl_compat_dumpit(&dump, msg, args); kfree_skb(args); return err; } static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { int err; u32 sock_ref; struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; if (!attrs[TIPC_NLA_SOCK]) return -EINVAL; err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL, NULL); if (err) return err; sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); if (sock[TIPC_NLA_SOCK_CON]) { u32 node; struct nlattr *con[TIPC_NLA_CON_MAX + 1]; err = nla_parse_nested_deprecated(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON], NULL, NULL); if (err) return err; node = nla_get_u32(con[TIPC_NLA_CON_NODE]); tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node), tipc_node(node), nla_get_u32(con[TIPC_NLA_CON_SOCK])); if (con[TIPC_NLA_CON_FLAG]) tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n", nla_get_u32(con[TIPC_NLA_CON_TYPE]), nla_get_u32(con[TIPC_NLA_CON_INST])); else tipc_tlv_sprintf(msg->rep, "\n"); } else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) { tipc_tlv_sprintf(msg->rep, " bound to"); err = tipc_nl_compat_publ_dump(msg, sock_ref); if (err) return err; } tipc_tlv_sprintf(msg->rep, "\n"); return 0; } static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; int err; if (!attrs[TIPC_NLA_MEDIA]) return -EINVAL; err = nla_parse_nested_deprecated(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], NULL, NULL); if (err) return err; return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, nla_data(media[TIPC_NLA_MEDIA_NAME]), nla_len(media[TIPC_NLA_MEDIA_NAME])); } static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { struct tipc_node_info node_info; struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; int err; if (!attrs[TIPC_NLA_NODE]) return -EINVAL; err = nla_parse_nested_deprecated(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL, NULL); if (err) return err; node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info, sizeof(node_info)); } static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd, struct sk_buff *skb, struct tipc_nl_compat_msg *msg) { u32 val; struct nlattr *net; val = ntohl(*(__be32 *)TLV_DATA(msg->req)); net = nla_nest_start_noflag(skb, TIPC_NLA_NET); if (!net) return -EMSGSIZE; if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) { if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val)) return -EMSGSIZE; } else if (msg->cmd == TIPC_CMD_SET_NETID) { if (nla_put_u32(skb, TIPC_NLA_NET_ID, val)) return -EMSGSIZE; } nla_nest_end(skb, net); return 0; } static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, struct nlattr **attrs) { __be32 id; struct nlattr *net[TIPC_NLA_NET_MAX + 1]; int err; if (!attrs[TIPC_NLA_NET]) return -EINVAL; err = nla_parse_nested_deprecated(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL, NULL); if (err) return err; id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); } static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg) { msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN); if (!msg->rep) return -ENOMEM; tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING); tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n"); return 0; } static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg) { struct tipc_nl_compat_cmd_dump dump; struct tipc_nl_compat_cmd_doit doit; memset(&dump, 0, sizeof(dump)); memset(&doit, 0, sizeof(doit)); switch (msg->cmd) { case TIPC_CMD_NOOP: msg->rep = tipc_tlv_alloc(0); if (!msg->rep) return -ENOMEM; return 0; case TIPC_CMD_GET_BEARER_NAMES: msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME); dump.dumpit = tipc_nl_bearer_dump; dump.format = tipc_nl_compat_bearer_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_ENABLE_BEARER: msg->req_type = TIPC_TLV_BEARER_CONFIG; doit.doit = __tipc_nl_bearer_enable; doit.transcode = tipc_nl_compat_bearer_enable; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_DISABLE_BEARER: msg->req_type = TIPC_TLV_BEARER_NAME; doit.doit = __tipc_nl_bearer_disable; doit.transcode = tipc_nl_compat_bearer_disable; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_SHOW_LINK_STATS: msg->req_type = TIPC_TLV_LINK_NAME; msg->rep_size = ULTRA_STRING_MAX_LEN; msg->rep_type = TIPC_TLV_ULTRA_STRING; dump.dumpit = tipc_nl_node_dump_link; dump.format = tipc_nl_compat_link_stat_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_GET_LINKS: msg->req_type = TIPC_TLV_NET_ADDR; msg->rep_size = ULTRA_STRING_MAX_LEN; dump.dumpit = tipc_nl_node_dump_link; dump.format = tipc_nl_compat_link_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_SET_LINK_TOL: case TIPC_CMD_SET_LINK_PRI: case TIPC_CMD_SET_LINK_WINDOW: msg->req_type = TIPC_TLV_LINK_CONFIG; doit.doit = tipc_nl_node_set_link; doit.transcode = tipc_nl_compat_link_set; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_RESET_LINK_STATS: msg->req_type = TIPC_TLV_LINK_NAME; doit.doit = tipc_nl_node_reset_link_stats; doit.transcode = tipc_nl_compat_link_reset_stats; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_SHOW_NAME_TABLE: msg->req_type = TIPC_TLV_NAME_TBL_QUERY; msg->rep_size = ULTRA_STRING_MAX_LEN; msg->rep_type = TIPC_TLV_ULTRA_STRING; dump.header = tipc_nl_compat_name_table_dump_header; dump.dumpit = tipc_nl_name_table_dump; dump.format = tipc_nl_compat_name_table_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_SHOW_PORTS: msg->rep_size = ULTRA_STRING_MAX_LEN; msg->rep_type = TIPC_TLV_ULTRA_STRING; dump.dumpit = tipc_nl_sk_dump; dump.format = tipc_nl_compat_sk_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_GET_MEDIA_NAMES: msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME); dump.dumpit = tipc_nl_media_dump; dump.format = tipc_nl_compat_media_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_GET_NODES: msg->rep_size = ULTRA_STRING_MAX_LEN; dump.dumpit = tipc_nl_node_dump; dump.format = tipc_nl_compat_node_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_SET_NODE_ADDR: msg->req_type = TIPC_TLV_NET_ADDR; doit.doit = __tipc_nl_net_set; doit.transcode = tipc_nl_compat_net_set; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_SET_NETID: msg->req_type = TIPC_TLV_UNSIGNED; doit.doit = __tipc_nl_net_set; doit.transcode = tipc_nl_compat_net_set; return tipc_nl_compat_doit(&doit, msg); case TIPC_CMD_GET_NETID: msg->rep_size = sizeof(u32); dump.dumpit = tipc_nl_net_dump; dump.format = tipc_nl_compat_net_dump; return tipc_nl_compat_dumpit(&dump, msg); case TIPC_CMD_SHOW_STATS: return tipc_cmd_show_stats_compat(msg); } return -EOPNOTSUPP; } static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info) { int err; int len; struct tipc_nl_compat_msg msg; struct nlmsghdr *req_nlh; struct nlmsghdr *rep_nlh; struct tipc_genlmsghdr *req_userhdr = genl_info_userhdr(info); memset(&msg, 0, sizeof(msg)); req_nlh = (struct nlmsghdr *)skb->data; msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN; msg.cmd = req_userhdr->cmd; msg.net = genl_info_net(info); msg.dst_sk = skb->sk; if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) { msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN); err = -EACCES; goto send; } msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) { msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); err = -EOPNOTSUPP; goto send; } err = tipc_nl_compat_handle(&msg); if ((err == -EOPNOTSUPP) || (err == -EPERM)) msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); else if (err == -EINVAL) msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR); send: if (!msg.rep) return err; len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN); skb_push(msg.rep, len); rep_nlh = nlmsg_hdr(msg.rep); memcpy(rep_nlh, info->nlhdr, len); rep_nlh->nlmsg_len = msg.rep->len; genlmsg_unicast(msg.net, msg.rep, NETLINK_CB(skb).portid); return err; } static const struct genl_small_ops tipc_genl_compat_ops[] = { { .cmd = TIPC_GENL_CMD, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = tipc_nl_compat_recv, }, }; static struct genl_family tipc_genl_compat_family __ro_after_init = { .name = TIPC_GENL_NAME, .version = TIPC_GENL_VERSION, .hdrsize = TIPC_GENL_HDRLEN, .maxattr = 0, .netnsok = true, .module = THIS_MODULE, .small_ops = tipc_genl_compat_ops, .n_small_ops = ARRAY_SIZE(tipc_genl_compat_ops), .resv_start_op = TIPC_GENL_CMD + 1, }; int __init tipc_netlink_compat_start(void) { int res; res = genl_register_family(&tipc_genl_compat_family); if (res) { pr_err("Failed to register legacy compat interface\n"); return res; } return 0; } void tipc_netlink_compat_stop(void) { genl_unregister_family(&tipc_genl_compat_family); }
5 3 24 7 31 22 18 18 8 6 17 1 16 1 1 2 8 10 10 16 16 5 5 12 1 10 8 4 5 1 2 5 7 7 11 1 11 6 5 12 12 12 12 12 12 17 2 12 10 10 1 8 1 7 2 5 5 1 1 2 7 1 1 4 1 1 2 3 4 1 1 2 7 6 2 6 5 2 3 2 3 3 5 6 91 89 5 5 4 4 3 4 4 4 5 2 4 4 15 14 15 15 14 15 14 40 42 42 42 18 41 21 4 17 17 51 1 1 3 1 17 4 21 1 3 1 10 4 15 7 4 1 4 20 2 11 11 9 1 8 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <net/arp.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/init.h> #include <linux/spinlock.h> #include <net/netrom.h> #include <linux/seq_file.h> #include <linux/export.h> static unsigned int nr_neigh_no = 1; static HLIST_HEAD(nr_node_list); static DEFINE_SPINLOCK(nr_node_list_lock); static HLIST_HEAD(nr_neigh_list); static DEFINE_SPINLOCK(nr_neigh_list_lock); static struct nr_node *nr_node_get(ax25_address *callsign) { struct nr_node *found = NULL; struct nr_node *nr_node; spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_node, &nr_node_list) if (ax25cmp(callsign, &nr_node->callsign) == 0) { nr_node_hold(nr_node); found = nr_node; break; } spin_unlock_bh(&nr_node_list_lock); return found; } static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign, struct net_device *dev) { struct nr_neigh *found = NULL; struct nr_neigh *nr_neigh; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each(nr_neigh, &nr_neigh_list) if (ax25cmp(callsign, &nr_neigh->callsign) == 0 && nr_neigh->dev == dev) { nr_neigh_hold(nr_neigh); found = nr_neigh; break; } spin_unlock_bh(&nr_neigh_list_lock); return found; } static void nr_remove_neigh(struct nr_neigh *); /* re-sort the routes in quality order. */ static void re_sort_routes(struct nr_node *nr_node, int x, int y) { if (nr_node->routes[y].quality > nr_node->routes[x].quality) { if (nr_node->which == x) nr_node->which = y; else if (nr_node->which == y) nr_node->which = x; swap(nr_node->routes[x], nr_node->routes[y]); } } /* * Add a new route to a node, and in the process add the node and the * neighbour if it is new. */ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic, ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev, int quality, int obs_count) { struct nr_node *nr_node; struct nr_neigh *nr_neigh; int i, found; struct net_device *odev; if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */ dev_put(odev); return -EINVAL; } nr_node = nr_node_get(nr); nr_neigh = nr_neigh_get_dev(ax25, dev); /* * The L2 link to a neighbour has failed in the past * and now a frame comes from this neighbour. We assume * it was a temporary trouble with the link and reset the * routes now (and not wait for a node broadcast). */ if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) { struct nr_node *nr_nodet; spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_nodet, &nr_node_list) { nr_node_lock(nr_nodet); for (i = 0; i < nr_nodet->count; i++) if (nr_nodet->routes[i].neighbour == nr_neigh) if (i < nr_nodet->which) nr_nodet->which = i; nr_node_unlock(nr_nodet); } spin_unlock_bh(&nr_node_list_lock); } if (nr_neigh != NULL) nr_neigh->failed = 0; if (quality == 0 && nr_neigh != NULL && nr_node != NULL) { nr_neigh_put(nr_neigh); nr_node_put(nr_node); return 0; } if (nr_neigh == NULL) { if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) { if (nr_node) nr_node_put(nr_node); return -ENOMEM; } nr_neigh->callsign = *ax25; nr_neigh->digipeat = NULL; nr_neigh->ax25 = NULL; nr_neigh->dev = dev; nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality); nr_neigh->locked = 0; nr_neigh->count = 0; nr_neigh->number = nr_neigh_no++; nr_neigh->failed = 0; refcount_set(&nr_neigh->refcount, 1); if (ax25_digi != NULL && ax25_digi->ndigi > 0) { nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), GFP_KERNEL); if (nr_neigh->digipeat == NULL) { kfree(nr_neigh); if (nr_node) nr_node_put(nr_node); return -ENOMEM; } } spin_lock_bh(&nr_neigh_list_lock); hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); nr_neigh_hold(nr_neigh); spin_unlock_bh(&nr_neigh_list_lock); } if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked) nr_neigh->quality = quality; if (nr_node == NULL) { if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) { if (nr_neigh) nr_neigh_put(nr_neigh); return -ENOMEM; } nr_node->callsign = *nr; strscpy(nr_node->mnemonic, mnemonic); nr_node->which = 0; nr_node->count = 1; refcount_set(&nr_node->refcount, 1); spin_lock_init(&nr_node->node_lock); nr_node->routes[0].quality = quality; nr_node->routes[0].obs_count = obs_count; nr_node->routes[0].neighbour = nr_neigh; nr_neigh_hold(nr_neigh); nr_neigh->count++; spin_lock_bh(&nr_node_list_lock); hlist_add_head(&nr_node->node_node, &nr_node_list); /* refcount initialized at 1 */ spin_unlock_bh(&nr_node_list_lock); nr_neigh_put(nr_neigh); return 0; } nr_node_lock(nr_node); if (quality != 0) strscpy(nr_node->mnemonic, mnemonic); for (found = 0, i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { nr_node->routes[i].quality = quality; nr_node->routes[i].obs_count = obs_count; found = 1; break; } } if (!found) { /* We have space at the bottom, slot it in */ if (nr_node->count < 3) { nr_node->routes[2] = nr_node->routes[1]; nr_node->routes[1] = nr_node->routes[0]; nr_node->routes[0].quality = quality; nr_node->routes[0].obs_count = obs_count; nr_node->routes[0].neighbour = nr_neigh; nr_node->which++; nr_node->count++; nr_neigh_hold(nr_neigh); nr_neigh->count++; } else { /* It must be better than the worst */ if (quality > nr_node->routes[2].quality) { nr_node->routes[2].neighbour->count--; nr_neigh_put(nr_node->routes[2].neighbour); if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked) nr_remove_neigh(nr_node->routes[2].neighbour); nr_node->routes[2].quality = quality; nr_node->routes[2].obs_count = obs_count; nr_node->routes[2].neighbour = nr_neigh; nr_neigh_hold(nr_neigh); nr_neigh->count++; } } } /* Now re-sort the routes in quality order */ switch (nr_node->count) { case 3: re_sort_routes(nr_node, 0, 1); re_sort_routes(nr_node, 1, 2); fallthrough; case 2: re_sort_routes(nr_node, 0, 1); break; case 1: break; } for (i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { if (i < nr_node->which) nr_node->which = i; break; } } nr_neigh_put(nr_neigh); nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } static void nr_remove_node_locked(struct nr_node *nr_node) { lockdep_assert_held(&nr_node_list_lock); hlist_del_init(&nr_node->node_node); nr_node_put(nr_node); } static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh) { hlist_del_init(&nr_neigh->neigh_node); nr_neigh_put(nr_neigh); } #define nr_remove_neigh_locked(__neigh) \ __nr_remove_neigh(__neigh) static void nr_remove_neigh(struct nr_neigh *nr_neigh) { spin_lock_bh(&nr_neigh_list_lock); __nr_remove_neigh(nr_neigh); spin_unlock_bh(&nr_neigh_list_lock); } /* * "Delete" a node. Strictly speaking remove a route to a node. The node * is only deleted if no routes are left to it. */ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev) { struct nr_node *nr_node; struct nr_neigh *nr_neigh; int i; nr_node = nr_node_get(callsign); if (nr_node == NULL) return -EINVAL; nr_neigh = nr_neigh_get_dev(neighbour, dev); if (nr_neigh == NULL) { nr_node_put(nr_node); return -EINVAL; } spin_lock_bh(&nr_node_list_lock); nr_node_lock(nr_node); for (i = 0; i < nr_node->count; i++) { if (nr_node->routes[i].neighbour == nr_neigh) { nr_neigh->count--; nr_neigh_put(nr_neigh); if (nr_neigh->count == 0 && !nr_neigh->locked) nr_remove_neigh(nr_neigh); nr_neigh_put(nr_neigh); nr_node->count--; if (nr_node->count == 0) { nr_remove_node_locked(nr_node); } else { switch (i) { case 0: nr_node->routes[0] = nr_node->routes[1]; fallthrough; case 1: nr_node->routes[1] = nr_node->routes[2]; fallthrough; case 2: break; } nr_node_put(nr_node); } nr_node_unlock(nr_node); spin_unlock_bh(&nr_node_list_lock); return 0; } } nr_neigh_put(nr_neigh); nr_node_unlock(nr_node); spin_unlock_bh(&nr_node_list_lock); nr_node_put(nr_node); return -EINVAL; } /* * Lock a neighbour with a quality. */ static int __must_check nr_add_neigh(ax25_address *callsign, ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality) { struct nr_neigh *nr_neigh; nr_neigh = nr_neigh_get_dev(callsign, dev); if (nr_neigh) { nr_neigh->quality = quality; nr_neigh->locked = 1; nr_neigh_put(nr_neigh); return 0; } if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) return -ENOMEM; nr_neigh->callsign = *callsign; nr_neigh->digipeat = NULL; nr_neigh->ax25 = NULL; nr_neigh->dev = dev; nr_neigh->quality = quality; nr_neigh->locked = 1; nr_neigh->count = 0; nr_neigh->number = nr_neigh_no++; nr_neigh->failed = 0; refcount_set(&nr_neigh->refcount, 1); if (ax25_digi != NULL && ax25_digi->ndigi > 0) { nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi), GFP_KERNEL); if (nr_neigh->digipeat == NULL) { kfree(nr_neigh); return -ENOMEM; } } spin_lock_bh(&nr_neigh_list_lock); hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list); /* refcount is initialized at 1 */ spin_unlock_bh(&nr_neigh_list_lock); return 0; } /* * "Delete" a neighbour. The neighbour is only removed if the number * of nodes that may use it is zero. */ static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality) { struct nr_neigh *nr_neigh; nr_neigh = nr_neigh_get_dev(callsign, dev); if (nr_neigh == NULL) return -EINVAL; nr_neigh->quality = quality; nr_neigh->locked = 0; if (nr_neigh->count == 0) nr_remove_neigh(nr_neigh); nr_neigh_put(nr_neigh); return 0; } /* * Decrement the obsolescence count by one. If a route is reduced to a * count of zero, remove it. Also remove any unlocked neighbours with * zero nodes routing via it. */ static int nr_dec_obs(void) { struct nr_neigh *nr_neigh; struct nr_node *s; struct hlist_node *nodet; int i; spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(s, nodet, &nr_node_list) { nr_node_lock(s); for (i = 0; i < s->count; i++) { switch (s->routes[i].obs_count) { case 0: /* A locked entry */ break; case 1: /* From 1 -> 0 */ nr_neigh = s->routes[i].neighbour; nr_neigh->count--; nr_neigh_put(nr_neigh); if (nr_neigh->count == 0 && !nr_neigh->locked) nr_remove_neigh(nr_neigh); s->count--; switch (i) { case 0: s->routes[0] = s->routes[1]; fallthrough; case 1: s->routes[1] = s->routes[2]; break; case 2: break; } break; default: s->routes[i].obs_count--; break; } } if (s->count <= 0) nr_remove_node_locked(s); nr_node_unlock(s); } spin_unlock_bh(&nr_node_list_lock); return 0; } /* * A device has been removed. Remove its routes and neighbours. */ void nr_rt_device_down(struct net_device *dev) { struct nr_neigh *s; struct hlist_node *nodet, *node2t; struct nr_node *t; int i; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { if (s->dev == dev) { spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(t, node2t, &nr_node_list) { nr_node_lock(t); for (i = 0; i < t->count; i++) { if (t->routes[i].neighbour == s) { t->count--; switch (i) { case 0: t->routes[0] = t->routes[1]; fallthrough; case 1: t->routes[1] = t->routes[2]; break; case 2: break; } } } if (t->count <= 0) nr_remove_node_locked(t); nr_node_unlock(t); } spin_unlock_bh(&nr_node_list_lock); nr_remove_neigh_locked(s); } } spin_unlock_bh(&nr_neigh_list_lock); } /* * Check that the device given is a valid AX.25 interface that is "up". * Or a valid ethernet interface with an AX.25 callsign binding. */ static struct net_device *nr_ax25_dev_get(char *devname) { struct net_device *dev; if ((dev = dev_get_by_name(&init_net, devname)) == NULL) return NULL; if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25) return dev; dev_put(dev); return NULL; } /* * Find the first active NET/ROM device, usually "nr0". */ struct net_device *nr_dev_first(void) { struct net_device *dev, *first = NULL; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM) if (first == NULL || strncmp(dev->name, first->name, 3) < 0) first = dev; } dev_hold(first); rcu_read_unlock(); return first; } /* * Find the NET/ROM device for the given callsign. */ struct net_device *nr_dev_get(ax25_address *addr) { struct net_device *dev; rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM && ax25cmp(addr, (const ax25_address *)dev->dev_addr) == 0) { dev_hold(dev); goto out; } } dev = NULL; out: rcu_read_unlock(); return dev; } static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis, ax25_address *digipeaters) { int i; if (ndigis == 0) return NULL; for (i = 0; i < ndigis; i++) { digi->calls[i] = digipeaters[i]; digi->repeated[i] = 0; } digi->ndigi = ndigis; digi->lastrepeat = -1; return digi; } /* * Handle the ioctls that control the routing functions. */ int nr_rt_ioctl(unsigned int cmd, void __user *arg) { struct nr_route_struct nr_route; struct net_device *dev; ax25_digi digi; int ret; switch (cmd) { case SIOCADDRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; if (nr_route.ndigis > AX25_MAX_DIGIS) return -EINVAL; if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; switch (nr_route.type) { case NETROM_NODE: if (strnlen(nr_route.mnemonic, 7) == 7) { ret = -EINVAL; break; } ret = nr_add_node(&nr_route.callsign, nr_route.mnemonic, &nr_route.neighbour, nr_call_to_digi(&digi, nr_route.ndigis, nr_route.digipeaters), dev, nr_route.quality, nr_route.obs_count); break; case NETROM_NEIGH: ret = nr_add_neigh(&nr_route.callsign, nr_call_to_digi(&digi, nr_route.ndigis, nr_route.digipeaters), dev, nr_route.quality); break; default: ret = -EINVAL; } dev_put(dev); return ret; case SIOCDELRT: if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct))) return -EFAULT; if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL) return -EINVAL; switch (nr_route.type) { case NETROM_NODE: ret = nr_del_node(&nr_route.callsign, &nr_route.neighbour, dev); break; case NETROM_NEIGH: ret = nr_del_neigh(&nr_route.callsign, dev, nr_route.quality); break; default: ret = -EINVAL; } dev_put(dev); return ret; case SIOCNRDECOBS: return nr_dec_obs(); default: return -EINVAL; } return 0; } /* * A level 2 link has timed out, therefore it appears to be a poor link, * then don't use that neighbour until it is reset. */ void nr_link_failed(ax25_cb *ax25, int reason) { struct nr_neigh *s, *nr_neigh = NULL; struct nr_node *nr_node = NULL; spin_lock_bh(&nr_neigh_list_lock); nr_neigh_for_each(s, &nr_neigh_list) { if (s->ax25 == ax25) { nr_neigh_hold(s); nr_neigh = s; break; } } spin_unlock_bh(&nr_neigh_list_lock); if (nr_neigh == NULL) return; nr_neigh->ax25 = NULL; ax25_cb_put(ax25); if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) { nr_neigh_put(nr_neigh); return; } spin_lock_bh(&nr_node_list_lock); nr_node_for_each(nr_node, &nr_node_list) { nr_node_lock(nr_node); if (nr_node->which < nr_node->count && nr_node->routes[nr_node->which].neighbour == nr_neigh) nr_node->which++; nr_node_unlock(nr_node); } spin_unlock_bh(&nr_node_list_lock); nr_neigh_put(nr_neigh); } /* * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb * indicates an internally generated frame. */ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) { ax25_address *nr_src, *nr_dest; struct nr_neigh *nr_neigh; struct nr_node *nr_node; struct net_device *dev; unsigned char *dptr; ax25_cb *ax25s; int ret; struct sk_buff *skbn; /* * Reject malformed packets early. Check that it contains at least 2 * addresses and 1 byte more for Time-To-Live */ if (skb->len < 2 * sizeof(ax25_address) + 1) return 0; nr_src = (ax25_address *)(skb->data + 0); nr_dest = (ax25_address *)(skb->data + 7); if (ax25 != NULL) { ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat, ax25->ax25_dev->dev, 0, READ_ONCE(sysctl_netrom_obsolescence_count_initialiser)); if (ret) return ret; } if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */ if (ax25 == NULL) /* Its from me */ ret = nr_loopback_queue(skb); else ret = nr_rx_frame(skb, dev); dev_put(dev); return ret; } if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL) return 0; /* Its Time-To-Live has expired */ if (skb->data[14] == 1) { return 0; } nr_node = nr_node_get(nr_dest); if (nr_node == NULL) return 0; nr_node_lock(nr_node); if (nr_node->which >= nr_node->count) { nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } nr_neigh = nr_node->routes[nr_node->which].neighbour; if ((dev = nr_dev_first()) == NULL) { nr_node_unlock(nr_node); nr_node_put(nr_node); return 0; } /* We are going to change the netrom headers so we should get our own skb, we also did not know until now how much header space we had to reserve... - RXQ */ if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) { nr_node_unlock(nr_node); nr_node_put(nr_node); dev_put(dev); return 0; } kfree_skb(skb); skb=skbn; skb->data[14]--; dptr = skb_push(skb, 1); *dptr = AX25_P_NETROM; ax25s = nr_neigh->ax25; nr_neigh->ax25 = ax25_send_frame(skb, 256, (const ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); if (ax25s) ax25_cb_put(ax25s); dev_put(dev); ret = (nr_neigh->ax25 != NULL); nr_node_unlock(nr_node); nr_node_put(nr_node); return ret; } #ifdef CONFIG_PROC_FS static void *nr_node_start(struct seq_file *seq, loff_t *pos) __acquires(&nr_node_list_lock) { spin_lock_bh(&nr_node_list_lock); return seq_hlist_start_head(&nr_node_list, *pos); } static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_node_list, pos); } static void nr_node_stop(struct seq_file *seq, void *v) __releases(&nr_node_list_lock) { spin_unlock_bh(&nr_node_list_lock); } static int nr_node_show(struct seq_file *seq, void *v) { char buf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n"); else { struct nr_node *nr_node = hlist_entry(v, struct nr_node, node_node); nr_node_lock(nr_node); seq_printf(seq, "%-9s %-7s %d %d", ax2asc(buf, &nr_node->callsign), (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic, nr_node->which + 1, nr_node->count); for (i = 0; i < nr_node->count; i++) { seq_printf(seq, " %3d %d %05d", nr_node->routes[i].quality, nr_node->routes[i].obs_count, nr_node->routes[i].neighbour->number); } nr_node_unlock(nr_node); seq_puts(seq, "\n"); } return 0; } const struct seq_operations nr_node_seqops = { .start = nr_node_start, .next = nr_node_next, .stop = nr_node_stop, .show = nr_node_show, }; static void *nr_neigh_start(struct seq_file *seq, loff_t *pos) __acquires(&nr_neigh_list_lock) { spin_lock_bh(&nr_neigh_list_lock); return seq_hlist_start_head(&nr_neigh_list, *pos); } static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_neigh_list, pos); } static void nr_neigh_stop(struct seq_file *seq, void *v) __releases(&nr_neigh_list_lock) { spin_unlock_bh(&nr_neigh_list_lock); } static int nr_neigh_show(struct seq_file *seq, void *v) { char buf[11]; int i; if (v == SEQ_START_TOKEN) seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n"); else { struct nr_neigh *nr_neigh; nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node); seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d", nr_neigh->number, ax2asc(buf, &nr_neigh->callsign), nr_neigh->dev ? nr_neigh->dev->name : "???", nr_neigh->quality, nr_neigh->locked, nr_neigh->count, nr_neigh->failed); if (nr_neigh->digipeat != NULL) { for (i = 0; i < nr_neigh->digipeat->ndigi; i++) seq_printf(seq, " %s", ax2asc(buf, &nr_neigh->digipeat->calls[i])); } seq_puts(seq, "\n"); } return 0; } const struct seq_operations nr_neigh_seqops = { .start = nr_neigh_start, .next = nr_neigh_next, .stop = nr_neigh_stop, .show = nr_neigh_show, }; #endif /* * Free all memory associated with the nodes and routes lists. */ void nr_rt_free(void) { struct nr_neigh *s = NULL; struct nr_node *t = NULL; struct hlist_node *nodet; spin_lock_bh(&nr_neigh_list_lock); spin_lock_bh(&nr_node_list_lock); nr_node_for_each_safe(t, nodet, &nr_node_list) { nr_node_lock(t); nr_remove_node_locked(t); nr_node_unlock(t); } nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) { while(s->count) { s->count--; nr_neigh_put(s); } nr_remove_neigh_locked(s); } spin_unlock_bh(&nr_node_list_lock); spin_unlock_bh(&nr_neigh_list_lock); }
3 3 3 3 7 7 7 1 3 8 8 8 8 3 3 8 8 8 11 11 8 3 2 2 2 3 3 3 2 10 2 8 1 1 6 16 16 2 14 14 1 10 2 14 4 4 1 1 1 2 6 1 5 5 4 1 2 3 3 1 2 7 1 1 5 4 1 4 6 6 6 6 1 5 1 1 1 1 1 1 1 18 18 1 6 1 1 4 1 1 2 4 1 1 7 1 1 1 1 1 1 1 15 15 1 7 1 1 1 1 3 37 37 3 34 13 12 2 9 1 11 11 6 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 /* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &rfcomm_pi(sk)->src, NULL); sk->sk_state_change(sk); } release_sock(sk); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; sk_for_each(sk, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel != channel) continue; if (bacmp(&rfcomm_pi(sk)->src, src)) continue; if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN) break; } return sk ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&rfcomm_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } } read_unlock(&rfcomm_sk_list.lock); return sk ? sk : sk1; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); fallthrough; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags); pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; security_sk_clone(parent, sk); } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct rfcomm_dlc *d; struct sock *sk; d = rfcomm_dlc_alloc(prio); if (!d) return NULL; sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); if (!sk) { rfcomm_dlc_free(d); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_rc sa; struct sock *sk = sock->sk; int len, err = 0; if (!addr || addr_len < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&sa, 0, sizeof(sa)); len = min_t(unsigned int, sizeof(sa), addr_len); memcpy(&sa, addr, len); BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock(&rfcomm_sk_list.lock); if (sa.rc_channel && __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr); rfcomm_pi(sk)->channel = sa.rc_channel; sk->sk_state = BT_BOUND; } write_unlock(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_rc) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ release_sock(sk); err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, sa->rc_channel); lock_sock(sk); if (!err && !sock_flag(sk, SOCK_ZAPPED)) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); sock_put(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &rfcomm_pi(sk)->src; u8 channel; err = -EINVAL; write_lock(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_listen_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } nsk = bt_accept_dequeue(sk, newsock); if (nsk) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); if (peer && sk->sk_state != BT_CONNECTED && sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) return -ENOTCONN; memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst); else bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src); return sizeof(struct sockaddr_rc); } static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); sent = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); if (sent) return sent; skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, RFCOMM_SKB_TAIL_RESERVE); if (IS_ERR(skb)) return PTR_ERR(skb); sent = rfcomm_dlc_send(d, skb); if (sent < 0) kfree_skb(skb); return sent; } static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } len = bt_sock_stream_recvmsg(sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt & RFCOMM_LM_FIPS) { err = -EINVAL; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); if (err) break; if (sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct l2cap_conn *conn; struct rfcomm_conninfo cinfo; int err = 0; size_t len; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; case BT_SECURITY_FIPS: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE | RFCOMM_LM_FIPS; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; conn = l2cap_pi(l2cap_sk)->chan->conn; memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int err = 0; size_t len; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; sec.key_size = 0; len = min(len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); #else err = -EOPNOTSUPP; #endif } return err; } #ifdef CONFIG_COMPAT static int rfcomm_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return rfcomm_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; release_sock(sk); __rfcomm_sock_close(sk); lock_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0); if (!sk) goto done; bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM); rfcomm_sock_init(sk, parent); bacpy(&rfcomm_pi(sk)->src, &src); bacpy(&rfcomm_pi(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk, true); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: release_sock(parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) parent->sk_state_change(parent); return result; } static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { seq_printf(f, "%pMR %pMR %d %d\n", &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst, sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock(&rfcomm_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(rfcomm_sock_debugfs); static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .gettstamp = sock_gettstamp, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = rfcomm_sock_compat_ioctl, #endif }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr)); err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) { BT_ERR("RFCOMM socket layer registration failed"); goto error; } err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create RFCOMM proc file"); bt_sock_unregister(BTPROTO_RFCOMM); goto error; } BT_INFO("RFCOMM socket layer initialized"); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); return 0; error: proto_unregister(&rfcomm_proto); return err; } void __exit rfcomm_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "rfcomm"); debugfs_remove(rfcomm_sock_debugfs); bt_sock_unregister(BTPROTO_RFCOMM); proto_unregister(&rfcomm_proto); }
44 54 53 38 38 38 38 40 39 9 3 32 39 4 1 4 4 39 39 35 5 39 39 40 32 9 9 9 9 40 9 42 38 1 5 1 22 18 17 4 1 1 1 4 12 12 12 5 5 5 4 12 12 12 12 11 35 4 31 30 31 31 31 2 1 1 8 8 8 8 8 8 8 54 10 2 42 42 2 42 37 40 44 1 3 1 1 1 1 1 2 2 2 2 2 2 6 1 2 3 2 4 5 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Simon Wunderlich */ #include "bridge_loop_avoidance.h" #include "main.h" #include <linux/atomic.h> #include <linux/byteorder/generic.h> #include <linux/compiler.h> #include <linux/container_of.h> #include <linux/crc16.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sprintf.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/string_choices.h> #include <linux/workqueue.h> #include <net/arp.h> #include <net/genetlink.h> #include <net/netlink.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> #include "hard-interface.h" #include "hash.h" #include "log.h" #include "netlink.h" #include "originator.h" #include "translation-table.h" static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05}; static void batadv_bla_periodic_work(struct work_struct *work); static void batadv_bla_send_announce(struct batadv_priv *bat_priv, struct batadv_bla_backbone_gw *backbone_gw); /** * batadv_choose_claim() - choose the right bucket for a claim. * @data: data to hash * @size: size of the hash table * * Return: the hash index of the claim */ static inline u32 batadv_choose_claim(const void *data, u32 size) { const struct batadv_bla_claim *claim = data; u32 hash = 0; hash = jhash(&claim->addr, sizeof(claim->addr), hash); hash = jhash(&claim->vid, sizeof(claim->vid), hash); return hash % size; } /** * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway. * @data: data to hash * @size: size of the hash table * * Return: the hash index of the backbone gateway */ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) { const struct batadv_bla_backbone_gw *gw; u32 hash = 0; gw = data; hash = jhash(&gw->orig, sizeof(gw->orig), hash); hash = jhash(&gw->vid, sizeof(gw->vid), hash); return hash % size; } /** * batadv_compare_backbone_gw() - compare address and vid of two backbone gws * @node: list node of the first entry to compare * @data2: pointer to the second backbone gateway * * Return: true if the backbones have the same data, false otherwise */ static bool batadv_compare_backbone_gw(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_bla_backbone_gw, hash_entry); const struct batadv_bla_backbone_gw *gw1 = data1; const struct batadv_bla_backbone_gw *gw2 = data2; if (!batadv_compare_eth(gw1->orig, gw2->orig)) return false; if (gw1->vid != gw2->vid) return false; return true; } /** * batadv_compare_claim() - compare address and vid of two claims * @node: list node of the first entry to compare * @data2: pointer to the second claims * * Return: true if the claim have the same data, 0 otherwise */ static bool batadv_compare_claim(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct batadv_bla_claim, hash_entry); const struct batadv_bla_claim *cl1 = data1; const struct batadv_bla_claim *cl2 = data2; if (!batadv_compare_eth(cl1->addr, cl2->addr)) return false; if (cl1->vid != cl2->vid) return false; return true; } /** * batadv_backbone_gw_release() - release backbone gw from lists and queue for * free after rcu grace period * @ref: kref pointer of the backbone gw */ static void batadv_backbone_gw_release(struct kref *ref) { struct batadv_bla_backbone_gw *backbone_gw; backbone_gw = container_of(ref, struct batadv_bla_backbone_gw, refcount); kfree_rcu(backbone_gw, rcu); } /** * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly * release it * @backbone_gw: backbone gateway to be free'd */ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw) { if (!backbone_gw) return; kref_put(&backbone_gw->refcount, batadv_backbone_gw_release); } /** * batadv_claim_release() - release claim from lists and queue for free after * rcu grace period * @ref: kref pointer of the claim */ static void batadv_claim_release(struct kref *ref) { struct batadv_bla_claim *claim; struct batadv_bla_backbone_gw *old_backbone_gw; claim = container_of(ref, struct batadv_bla_claim, refcount); spin_lock_bh(&claim->backbone_lock); old_backbone_gw = claim->backbone_gw; claim->backbone_gw = NULL; spin_unlock_bh(&claim->backbone_lock); spin_lock_bh(&old_backbone_gw->crc_lock); old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); spin_unlock_bh(&old_backbone_gw->crc_lock); batadv_backbone_gw_put(old_backbone_gw); kfree_rcu(claim, rcu); } /** * batadv_claim_put() - decrement the claim refcounter and possibly release it * @claim: claim to be free'd */ static void batadv_claim_put(struct batadv_bla_claim *claim) { if (!claim) return; kref_put(&claim->refcount, batadv_claim_release); } /** * batadv_claim_hash_find() - looks for a claim in the claim hash * @bat_priv: the bat priv with all the mesh interface information * @data: search data (may be local/static data) * * Return: claim if found or NULL otherwise. */ static struct batadv_bla_claim * batadv_claim_hash_find(struct batadv_priv *bat_priv, struct batadv_bla_claim *data) { struct batadv_hashtable *hash = bat_priv->bla.claim_hash; struct hlist_head *head; struct batadv_bla_claim *claim; struct batadv_bla_claim *claim_tmp = NULL; int index; if (!hash) return NULL; index = batadv_choose_claim(data, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { if (!batadv_compare_claim(&claim->hash_entry, data)) continue; if (!kref_get_unless_zero(&claim->refcount)) continue; claim_tmp = claim; break; } rcu_read_unlock(); return claim_tmp; } /** * batadv_backbone_hash_find() - looks for a backbone gateway in the hash * @bat_priv: the bat priv with all the mesh interface information * @addr: the address of the originator * @vid: the VLAN ID * * Return: backbone gateway if found or NULL otherwise */ static struct batadv_bla_backbone_gw * batadv_backbone_hash_find(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; struct batadv_bla_backbone_gw search_entry, *backbone_gw; struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL; int index; if (!hash) return NULL; ether_addr_copy(search_entry.orig, addr); search_entry.vid = vid; index = batadv_choose_backbone_gw(&search_entry, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry, &search_entry)) continue; if (!kref_get_unless_zero(&backbone_gw->refcount)) continue; backbone_gw_tmp = backbone_gw; break; } rcu_read_unlock(); return backbone_gw_tmp; } /** * batadv_bla_del_backbone_claims() - delete all claims for a backbone * @backbone_gw: backbone gateway where the claims should be removed */ static void batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) { struct batadv_hashtable *hash; struct hlist_node *node_tmp; struct hlist_head *head; struct batadv_bla_claim *claim; int i; spinlock_t *list_lock; /* protects write access to the hash lists */ hash = backbone_gw->bat_priv->bla.claim_hash; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(claim, node_tmp, head, hash_entry) { if (claim->backbone_gw != backbone_gw) continue; batadv_claim_put(claim); hlist_del_rcu(&claim->hash_entry); } spin_unlock_bh(list_lock); } /* all claims gone, initialize CRC */ spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc = BATADV_BLA_CRC_INIT; spin_unlock_bh(&backbone_gw->crc_lock); } /** * batadv_bla_send_claim() - sends a claim frame according to the provided info * @bat_priv: the bat priv with all the mesh interface information * @mac: the mac address to be announced within the claim * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) */ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, unsigned short vid, int claimtype) { struct sk_buff *skb; struct ethhdr *ethhdr; struct batadv_hard_iface *primary_if; struct net_device *mesh_iface; u8 *hw_src; struct batadv_bla_claim_dst local_claim_dest; __be32 zeroip = 0; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return; memcpy(&local_claim_dest, &bat_priv->bla.claim_dest, sizeof(local_claim_dest)); local_claim_dest.type = claimtype; mesh_iface = primary_if->mesh_iface; skb = arp_create(ARPOP_REPLY, ETH_P_ARP, /* IP DST: 0.0.0.0 */ zeroip, primary_if->mesh_iface, /* IP SRC: 0.0.0.0 */ zeroip, /* Ethernet DST: Broadcast */ NULL, /* Ethernet SRC/HW SRC: originator mac */ primary_if->net_dev->dev_addr, /* HW DST: FF:43:05:XX:YY:YY * with XX = claim type * and YY:YY = group id */ (u8 *)&local_claim_dest); if (!skb) goto out; ethhdr = (struct ethhdr *)skb->data; hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr); /* now we pretend that the client would have sent this ... */ switch (claimtype) { case BATADV_CLAIM_TYPE_CLAIM: /* normal claim frame * set Ethernet SRC to the clients mac */ ether_addr_copy(ethhdr->h_source, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): CLAIM %pM on vid %d\n", __func__, mac, batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_UNCLAIM: /* unclaim frame * set HW SRC to the clients mac */ ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): UNCLAIM %pM on vid %d\n", __func__, mac, batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_ANNOUNCE: /* announcement frame * set HW SRC to the special mac containing the crc */ ether_addr_copy(hw_src, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): ANNOUNCE of %pM on vid %d\n", __func__, ethhdr->h_source, batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_REQUEST: /* request frame * set HW SRC and header destination to the receiving backbone * gws mac */ ether_addr_copy(hw_src, mac); ether_addr_copy(ethhdr->h_dest, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): REQUEST of %pM to %pM on vid %d\n", __func__, ethhdr->h_source, ethhdr->h_dest, batadv_print_vid(vid)); break; case BATADV_CLAIM_TYPE_LOOPDETECT: ether_addr_copy(ethhdr->h_source, mac); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): LOOPDETECT of %pM to %pM on vid %d\n", __func__, ethhdr->h_source, ethhdr->h_dest, batadv_print_vid(vid)); break; } if (vid & BATADV_VLAN_HAS_TAG) { skb = vlan_insert_tag(skb, htons(ETH_P_8021Q), vid & VLAN_VID_MASK); if (!skb) goto out; } skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, mesh_iface); batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); netif_rx(skb); out: batadv_hardif_put(primary_if); } /** * batadv_bla_loopdetect_report() - worker for reporting the loop * @work: work queue item * * Throws an uevent, as the loopdetect check function can't do that itself * since the kernel may sleep while throwing uevents. */ static void batadv_bla_loopdetect_report(struct work_struct *work) { struct batadv_bla_backbone_gw *backbone_gw; struct batadv_priv *bat_priv; char vid_str[6] = { '\0' }; backbone_gw = container_of(work, struct batadv_bla_backbone_gw, report_work); bat_priv = backbone_gw->bat_priv; batadv_info(bat_priv->mesh_iface, "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n", batadv_print_vid(backbone_gw->vid)); snprintf(vid_str, sizeof(vid_str), "%d", batadv_print_vid(backbone_gw->vid)); vid_str[sizeof(vid_str) - 1] = 0; batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT, vid_str); batadv_backbone_gw_put(backbone_gw); } /** * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway * @bat_priv: the bat priv with all the mesh interface information * @orig: the mac address of the originator * @vid: the VLAN ID * @own_backbone: set if the requested backbone is local * * Return: the (possibly created) backbone gateway or NULL on error */ static struct batadv_bla_backbone_gw * batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, unsigned short vid, bool own_backbone) { struct batadv_bla_backbone_gw *entry; struct batadv_orig_node *orig_node; int hash_added; entry = batadv_backbone_hash_find(bat_priv, orig, vid); if (entry) return entry; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): not found (%pM, %d), creating new entry\n", __func__, orig, batadv_print_vid(vid)); entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return NULL; entry->vid = vid; entry->lasttime = jiffies; entry->crc = BATADV_BLA_CRC_INIT; entry->bat_priv = bat_priv; spin_lock_init(&entry->crc_lock); atomic_set(&entry->request_sent, 0); atomic_set(&entry->wait_periods, 0); ether_addr_copy(entry->orig, orig); INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report); kref_init(&entry->refcount); kref_get(&entry->refcount); hash_added = batadv_hash_add(bat_priv->bla.backbone_hash, batadv_compare_backbone_gw, batadv_choose_backbone_gw, entry, &entry->hash_entry); if (unlikely(hash_added != 0)) { /* hash failed, free the structure */ kfree(entry); return NULL; } /* this is a gateway now, remove any TT entry on this VLAN */ orig_node = batadv_orig_hash_find(bat_priv, orig); if (orig_node) { batadv_tt_global_del_orig(bat_priv, orig_node, vid, "became a backbone gateway"); batadv_orig_node_put(orig_node); } if (own_backbone) { batadv_bla_send_announce(bat_priv, entry); /* this will be decreased in the worker thread */ atomic_inc(&entry->request_sent); atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS); atomic_inc(&bat_priv->bla.num_requests); } return entry; } /** * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the selected primary interface * @vid: VLAN identifier * * update or add the own backbone gw to make sure we announce * where we receive other backbone gws */ static void batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; backbone_gw = batadv_bla_get_backbone_gw(bat_priv, primary_if->net_dev->dev_addr, vid, true); if (unlikely(!backbone_gw)) return; backbone_gw->lasttime = jiffies; batadv_backbone_gw_put(backbone_gw); } /** * batadv_bla_answer_request() - answer a bla request by sending own claims * @bat_priv: the bat priv with all the mesh interface information * @primary_if: interface where the request came on * @vid: the vid where the request came on * * Repeat all of our own claims, and finally send an ANNOUNCE frame * to allow the requester another check if the CRC is correct now. */ static void batadv_bla_answer_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, unsigned short vid) { struct hlist_head *head; struct batadv_hashtable *hash; struct batadv_bla_claim *claim; struct batadv_bla_backbone_gw *backbone_gw; int i; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): received a claim request, send all of our own claims again\n", __func__); backbone_gw = batadv_backbone_hash_find(bat_priv, primary_if->net_dev->dev_addr, vid); if (!backbone_gw) return; hash = bat_priv->bla.claim_hash; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { /* only own claims are interesting */ if (claim->backbone_gw != backbone_gw) continue; batadv_bla_send_claim(bat_priv, claim->addr, claim->vid, BATADV_CLAIM_TYPE_CLAIM); } rcu_read_unlock(); } /* finally, send an announcement frame */ batadv_bla_send_announce(bat_priv, backbone_gw); batadv_backbone_gw_put(backbone_gw); } /** * batadv_bla_send_request() - send a request to repeat claims * @backbone_gw: the backbone gateway from whom we are out of sync * * When the crc is wrong, ask the backbone gateway for a full table update. * After the request, it will repeat all of his own claims and finally * send an announcement claim with which we can check again. */ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw) { /* first, remove all old entries */ batadv_bla_del_backbone_claims(backbone_gw); batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "Sending REQUEST to %pM\n", backbone_gw->orig); /* send request */ batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig, backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST); /* no local broadcasts should be sent or received, for now. */ if (!atomic_read(&backbone_gw->request_sent)) { atomic_inc(&backbone_gw->bat_priv->bla.num_requests); atomic_set(&backbone_gw->request_sent, 1); } } /** * batadv_bla_send_announce() - Send an announcement frame * @bat_priv: the bat priv with all the mesh interface information * @backbone_gw: our backbone gateway which should be announced */ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, struct batadv_bla_backbone_gw *backbone_gw) { u8 mac[ETH_ALEN]; __be16 crc; memcpy(mac, batadv_announce_mac, 4); spin_lock_bh(&backbone_gw->crc_lock); crc = htons(backbone_gw->crc); spin_unlock_bh(&backbone_gw->crc_lock); memcpy(&mac[4], &crc, 2); batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid, BATADV_CLAIM_TYPE_ANNOUNCE); } /** * batadv_bla_add_claim() - Adds a claim in the claim hash * @bat_priv: the bat priv with all the mesh interface information * @mac: the mac address of the claim * @vid: the VLAN ID of the frame * @backbone_gw: the backbone gateway which claims it */ static void batadv_bla_add_claim(struct batadv_priv *bat_priv, const u8 *mac, const unsigned short vid, struct batadv_bla_backbone_gw *backbone_gw) { struct batadv_bla_backbone_gw *old_backbone_gw; struct batadv_bla_claim *claim; struct batadv_bla_claim search_claim; bool remove_crc = false; int hash_added; ether_addr_copy(search_claim.addr, mac); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); /* create a new claim entry if it does not exist yet. */ if (!claim) { claim = kzalloc(sizeof(*claim), GFP_ATOMIC); if (!claim) return; ether_addr_copy(claim->addr, mac); spin_lock_init(&claim->backbone_lock); claim->vid = vid; claim->lasttime = jiffies; kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; kref_init(&claim->refcount); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): adding new entry %pM, vid %d to hash ...\n", __func__, mac, batadv_print_vid(vid)); kref_get(&claim->refcount); hash_added = batadv_hash_add(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim, &claim->hash_entry); if (unlikely(hash_added != 0)) { /* only local changes happened. */ kfree(claim); return; } } else { claim->lasttime = jiffies; if (claim->backbone_gw == backbone_gw) /* no need to register a new backbone */ goto claim_free_ref; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): changing ownership for %pM, vid %d to gw %pM\n", __func__, mac, batadv_print_vid(vid), backbone_gw->orig); remove_crc = true; } /* replace backbone_gw atomically and adjust reference counters */ spin_lock_bh(&claim->backbone_lock); old_backbone_gw = claim->backbone_gw; kref_get(&backbone_gw->refcount); claim->backbone_gw = backbone_gw; spin_unlock_bh(&claim->backbone_lock); if (remove_crc) { /* remove claim address from old backbone_gw */ spin_lock_bh(&old_backbone_gw->crc_lock); old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); spin_unlock_bh(&old_backbone_gw->crc_lock); } batadv_backbone_gw_put(old_backbone_gw); /* add claim address to new backbone_gw */ spin_lock_bh(&backbone_gw->crc_lock); backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN); spin_unlock_bh(&backbone_gw->crc_lock); backbone_gw->lasttime = jiffies; claim_free_ref: batadv_claim_put(claim); } /** * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of * claim * @claim: claim whose backbone_gw should be returned * * Return: valid reference to claim::backbone_gw */ static struct batadv_bla_backbone_gw * batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) { struct batadv_bla_backbone_gw *backbone_gw; spin_lock_bh(&claim->backbone_lock); backbone_gw = claim->backbone_gw; kref_get(&backbone_gw->refcount); spin_unlock_bh(&claim->backbone_lock); return backbone_gw; } /** * batadv_bla_del_claim() - delete a claim from the claim hash * @bat_priv: the bat priv with all the mesh interface information * @mac: mac address of the claim to be removed * @vid: VLAN id for the claim to be removed */ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, const u8 *mac, const unsigned short vid) { struct batadv_bla_claim search_claim, *claim; struct batadv_bla_claim *claim_removed_entry; struct hlist_node *claim_removed_node; ether_addr_copy(search_claim.addr, mac); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); if (!claim) return; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, mac, batadv_print_vid(vid)); claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, batadv_choose_claim, claim); if (!claim_removed_node) goto free_claim; /* reference from the hash is gone */ claim_removed_entry = hlist_entry(claim_removed_node, struct batadv_bla_claim, hash_entry); batadv_claim_put(claim_removed_entry); free_claim: /* don't need the reference from hash_find() anymore */ batadv_claim_put(claim); } /** * batadv_handle_announce() - check for ANNOUNCE frame * @bat_priv: the bat priv with all the mesh interface information * @an_addr: announcement mac address (ARP Sender HW address) * @backbone_addr: originator address of the sender (Ethernet source MAC) * @vid: the VLAN ID of the frame * * Return: true if handled */ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, u8 *backbone_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; u16 backbone_crc, crc; if (memcmp(an_addr, batadv_announce_mac, 4) != 0) return false; backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, false); if (unlikely(!backbone_gw)) return true; /* handle as ANNOUNCE frame */ backbone_gw->lasttime = jiffies; crc = ntohs(*((__force __be16 *)(&an_addr[4]))); batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n", __func__, batadv_print_vid(vid), backbone_gw->orig, crc); spin_lock_bh(&backbone_gw->crc_lock); backbone_crc = backbone_gw->crc; spin_unlock_bh(&backbone_gw->crc_lock); if (backbone_crc != crc) { batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n", __func__, backbone_gw->orig, batadv_print_vid(backbone_gw->vid), backbone_crc, crc); batadv_bla_send_request(backbone_gw); } else { /* if we have sent a request and the crc was OK, * we can allow traffic again. */ if (atomic_read(&backbone_gw->request_sent)) { atomic_dec(&backbone_gw->bat_priv->bla.num_requests); atomic_set(&backbone_gw->request_sent, 0); } } batadv_backbone_gw_put(backbone_gw); return true; } /** * batadv_handle_request() - check for REQUEST frame * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: backbone address to be requested (ARP sender HW MAC) * @ethhdr: ethernet header of a packet * @vid: the VLAN ID of the frame * * Return: true if handled */ static bool batadv_handle_request(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, u8 *backbone_addr, struct ethhdr *ethhdr, unsigned short vid) { /* check for REQUEST frame */ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest)) return false; /* sanity check, this should not happen on a normal switch, * we ignore it in this case. */ if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr)) return true; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): REQUEST vid %d (sent by %pM)...\n", __func__, batadv_print_vid(vid), ethhdr->h_source); batadv_bla_answer_request(bat_priv, primary_if, vid); return true; } /** * batadv_handle_unclaim() - check for UNCLAIM frame * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: originator address of the backbone (Ethernet source) * @claim_addr: Client to be unclaimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame * * Return: true if handled */ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; /* unclaim in any case if it is our own */ if (primary_if && batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) batadv_bla_send_claim(bat_priv, claim_addr, vid, BATADV_CLAIM_TYPE_UNCLAIM); backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid); if (!backbone_gw) return true; /* this must be an UNCLAIM frame */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__, claim_addr, batadv_print_vid(vid), backbone_gw->orig); batadv_bla_del_claim(bat_priv, claim_addr, vid); batadv_backbone_gw_put(backbone_gw); return true; } /** * batadv_handle_claim() - check for CLAIM frame * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: originator address of the backbone (Ethernet Source) * @claim_addr: client mac address to be claimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame * * Return: true if handled */ static bool batadv_handle_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, const u8 *backbone_addr, const u8 *claim_addr, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; /* register the gateway if not yet available, and add the claim. */ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid, false); if (unlikely(!backbone_gw)) return true; /* this must be a CLAIM frame */ batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw); if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) batadv_bla_send_claim(bat_priv, claim_addr, vid, BATADV_CLAIM_TYPE_CLAIM); /* TODO: we could call something like tt_local_del() here. */ batadv_backbone_gw_put(backbone_gw); return true; } /** * batadv_check_claim_group() - check for claim group membership * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary interface of this batman interface * @hw_src: the Hardware source in the ARP Header * @hw_dst: the Hardware destination in the ARP Header * @ethhdr: pointer to the Ethernet header of the claim frame * * checks if it is a claim packet and if it's on the same group. * This function also applies the group ID of the sender * if it is in the same mesh. * * Return: * 2 - if it is a claim packet and on the same group * 1 - if is a claim packet from another group * 0 - if it is not a claim packet */ static int batadv_check_claim_group(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, u8 *hw_src, u8 *hw_dst, struct ethhdr *ethhdr) { u8 *backbone_addr; struct batadv_orig_node *orig_node; struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; bla_dst = (struct batadv_bla_claim_dst *)hw_dst; bla_dst_own = &bat_priv->bla.claim_dest; /* if announcement packet, use the source, * otherwise assume it is in the hw_src */ switch (bla_dst->type) { case BATADV_CLAIM_TYPE_CLAIM: backbone_addr = hw_src; break; case BATADV_CLAIM_TYPE_REQUEST: case BATADV_CLAIM_TYPE_ANNOUNCE: case BATADV_CLAIM_TYPE_UNCLAIM: backbone_addr = ethhdr->h_source; break; default: return 0; } /* don't accept claim frames from ourselves */ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr)) return 0; /* if its already the same group, it is fine. */ if (bla_dst->group == bla_dst_own->group) return 2; /* lets see if this originator is in our mesh */ orig_node = batadv_orig_hash_find(bat_priv, backbone_addr); /* don't accept claims from gateways which are not in * the same mesh or group. */ if (!orig_node) return 1; /* if our mesh friends mac is bigger, use it for ourselves. */ if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) { batadv_dbg(BATADV_DBG_BLA, bat_priv, "taking other backbones claim group: %#.4x\n", ntohs(bla_dst->group)); bla_dst_own->group = bla_dst->group; } batadv_orig_node_put(orig_node); return 2; } /** * batadv_bla_process_claim() - Check if this is a claim frame, and process it * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary hard interface of this batman mesh interface * @skb: the frame to be checked * * Return: true if it was a claim frame, otherwise return false to * tell the callee that it can use the frame on its own. */ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct sk_buff *skb) { struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; u8 *hw_src, *hw_dst; struct vlan_hdr *vhdr, vhdr_buf; struct ethhdr *ethhdr; struct arphdr *arphdr; unsigned short vid; int vlan_depth = 0; __be16 proto; int headlen; int ret; vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); proto = ethhdr->h_proto; headlen = ETH_HLEN; if (vid & BATADV_VLAN_HAS_TAG) { /* Traverse the VLAN/Ethertypes. * * At this point it is known that the first protocol is a VLAN * header, so start checking at the encapsulated protocol. * * The depth of the VLAN headers is recorded to drop BLA claim * frames encapsulated into multiple VLAN headers (QinQ). */ do { vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, &vhdr_buf); if (!vhdr) return false; proto = vhdr->h_vlan_encapsulated_proto; headlen += VLAN_HLEN; vlan_depth++; } while (proto == htons(ETH_P_8021Q)); } if (proto != htons(ETH_P_ARP)) return false; /* not a claim frame */ /* this must be a ARP frame. check if it is a claim. */ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) return false; /* pskb_may_pull() may have modified the pointers, get ethhdr again */ ethhdr = eth_hdr(skb); arphdr = (struct arphdr *)((u8 *)ethhdr + headlen); /* Check whether the ARP frame carries a valid * IP information */ if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) return false; if (arphdr->ar_pro != htons(ETH_P_IP)) return false; if (arphdr->ar_hln != ETH_ALEN) return false; if (arphdr->ar_pln != 4) return false; hw_src = (u8 *)arphdr + sizeof(struct arphdr); hw_dst = hw_src + ETH_ALEN + 4; bla_dst = (struct batadv_bla_claim_dst *)hw_dst; bla_dst_own = &bat_priv->bla.claim_dest; /* check if it is a claim frame in general */ if (memcmp(bla_dst->magic, bla_dst_own->magic, sizeof(bla_dst->magic)) != 0) return false; /* check if there is a claim frame encapsulated deeper in (QinQ) and * drop that, as this is not supported by BLA but should also not be * sent via the mesh. */ if (vlan_depth > 1) return true; /* Let the loopdetect frames on the mesh in any case. */ if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT) return false; /* check if it is a claim frame. */ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); if (ret == 1) batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst); if (ret < 2) return !!ret; /* become a backbone gw ourselves on this vlan if not happened yet */ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); /* check for the different types of claim frames ... */ switch (bla_dst->type) { case BATADV_CLAIM_TYPE_CLAIM: if (batadv_handle_claim(bat_priv, primary_if, hw_src, ethhdr->h_source, vid)) return true; break; case BATADV_CLAIM_TYPE_UNCLAIM: if (batadv_handle_unclaim(bat_priv, primary_if, ethhdr->h_source, hw_src, vid)) return true; break; case BATADV_CLAIM_TYPE_ANNOUNCE: if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) return true; break; case BATADV_CLAIM_TYPE_REQUEST: if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) return true; break; } batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src, hw_dst); return true; } /** * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or * immediately * @bat_priv: the bat priv with all the mesh interface information * @now: whether the whole hash shall be wiped now * * Check when we last heard from other nodes, and remove them in case of * a time out, or clean all backbone gws if now is set. */ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) { struct batadv_bla_backbone_gw *backbone_gw; struct hlist_node *node_tmp; struct hlist_head *head; struct batadv_hashtable *hash; spinlock_t *list_lock; /* protects write access to the hash lists */ int i; hash = bat_priv->bla.backbone_hash; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(backbone_gw, node_tmp, head, hash_entry) { if (now) goto purge_now; if (!batadv_has_timed_out(backbone_gw->lasttime, BATADV_BLA_BACKBONE_TIMEOUT)) continue; batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv, "%s(): backbone gw %pM timed out\n", __func__, backbone_gw->orig); purge_now: /* don't wait for the pending request anymore */ if (atomic_read(&backbone_gw->request_sent)) atomic_dec(&bat_priv->bla.num_requests); batadv_bla_del_backbone_claims(backbone_gw); hlist_del_rcu(&backbone_gw->hash_entry); batadv_backbone_gw_put(backbone_gw); } spin_unlock_bh(list_lock); } } /** * batadv_bla_purge_claims() - Remove claims after a timeout or immediately * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the selected primary interface, may be NULL if now is set * @now: whether the whole hash shall be wiped now * * Check when we heard last time from our own claims, and remove them in case of * a time out, or clean all claims if now is set */ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, int now) { struct batadv_bla_backbone_gw *backbone_gw; struct batadv_bla_claim *claim; struct hlist_head *head; struct batadv_hashtable *hash; int i; hash = bat_priv->bla.claim_hash; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(claim, head, hash_entry) { backbone_gw = batadv_bla_claim_get_backbone_gw(claim); if (now) goto purge_now; if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) goto skip; if (!batadv_has_timed_out(claim->lasttime, BATADV_BLA_CLAIM_TIMEOUT)) goto skip; batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): timed out.\n", __func__); purge_now: batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, claim->addr, claim->vid); batadv_handle_unclaim(bat_priv, primary_if, backbone_gw->orig, claim->addr, claim->vid); skip: batadv_backbone_gw_put(backbone_gw); } rcu_read_unlock(); } } /** * batadv_bla_update_orig_address() - Update the backbone gateways when the own * originator address changes * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the new selected primary_if * @oldif: the old primary interface, may be NULL */ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif) { struct batadv_bla_backbone_gw *backbone_gw; struct hlist_head *head; struct batadv_hashtable *hash; __be16 group; int i; /* reset bridge loop avoidance group id */ group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN)); bat_priv->bla.claim_dest.group = group; /* purge everything when bridge loop avoidance is turned off */ if (!atomic_read(&bat_priv->bridge_loop_avoidance)) oldif = NULL; if (!oldif) { batadv_bla_purge_claims(bat_priv, NULL, 1); batadv_bla_purge_backbone_gw(bat_priv, 1); return; } hash = bat_priv->bla.backbone_hash; if (!hash) return; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { /* own orig still holds the old value. */ if (!batadv_compare_eth(backbone_gw->orig, oldif->net_dev->dev_addr)) continue; ether_addr_copy(backbone_gw->orig, primary_if->net_dev->dev_addr); /* send an announce frame so others will ask for our * claims and update their tables. */ batadv_bla_send_announce(bat_priv, backbone_gw); } rcu_read_unlock(); } } /** * batadv_bla_send_loopdetect() - send a loopdetect frame * @bat_priv: the bat priv with all the mesh interface information * @backbone_gw: the backbone gateway for which a loop should be detected * * To detect loops that the bridge loop avoidance can't handle, send a loop * detection packet on the backbone. Unlike other BLA frames, this frame will * be allowed on the mesh by other nodes. If it is received on the mesh, this * indicates that there is a loop. */ static void batadv_bla_send_loopdetect(struct batadv_priv *bat_priv, struct batadv_bla_backbone_gw *backbone_gw) { batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n", backbone_gw->vid); batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr, backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT); } /** * batadv_bla_status_update() - purge bla interfaces if necessary * @net_dev: the mesh interface net device */ void batadv_bla_status_update(struct net_device *net_dev) { struct batadv_priv *bat_priv = netdev_priv(net_dev); struct batadv_hard_iface *primary_if; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return; /* this function already purges everything when bla is disabled, * so just call that one. */ batadv_bla_update_orig_address(bat_priv, primary_if, primary_if); batadv_hardif_put(primary_if); } /** * batadv_bla_periodic_work() - performs periodic bla work * @work: kernel work struct * * periodic work to do: * * purge structures when they are too old * * send announcements */ static void batadv_bla_periodic_work(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_priv *bat_priv; struct batadv_priv_bla *priv_bla; struct hlist_head *head; struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hashtable *hash; struct batadv_hard_iface *primary_if; bool send_loopdetect = false; int i; delayed_work = to_delayed_work(work); priv_bla = container_of(delayed_work, struct batadv_priv_bla, work); bat_priv = container_of(priv_bla, struct batadv_priv, bla); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; batadv_bla_purge_claims(bat_priv, primary_if, 0); batadv_bla_purge_backbone_gw(bat_priv, 0); if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto out; if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) { /* set a new random mac address for the next bridge loop * detection frames. Set the locally administered bit to avoid * collisions with users mac addresses. */ eth_random_addr(bat_priv->bla.loopdetect_addr); bat_priv->bla.loopdetect_addr[0] = 0xba; bat_priv->bla.loopdetect_addr[1] = 0xbe; bat_priv->bla.loopdetect_lasttime = jiffies; atomic_set(&bat_priv->bla.loopdetect_next, BATADV_BLA_LOOPDETECT_PERIODS); /* mark for sending loop detect on all VLANs */ send_loopdetect = true; } hash = bat_priv->bla.backbone_hash; if (!hash) goto out; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (!batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr)) continue; backbone_gw->lasttime = jiffies; batadv_bla_send_announce(bat_priv, backbone_gw); if (send_loopdetect) batadv_bla_send_loopdetect(bat_priv, backbone_gw); /* request_sent is only set after creation to avoid * problems when we are not yet known as backbone gw * in the backbone. * * We can reset this now after we waited some periods * to give bridge forward delays and bla group forming * some grace time. */ if (atomic_read(&backbone_gw->request_sent) == 0) continue; if (!atomic_dec_and_test(&backbone_gw->wait_periods)) continue; atomic_dec(&backbone_gw->bat_priv->bla.num_requests); atomic_set(&backbone_gw->request_sent, 0); } rcu_read_unlock(); } out: batadv_hardif_put(primary_if); queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); } /* The hash for claim and backbone hash receive the same key because they * are getting initialized by hash_new with the same key. Reinitializing * them with to different keys to allow nested locking without generating * lockdep warnings */ static struct lock_class_key batadv_claim_hash_lock_class_key; static struct lock_class_key batadv_backbone_hash_lock_class_key; /** * batadv_bla_init() - initialize all bla structures * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success, < 0 on error. */ int batadv_bla_init(struct batadv_priv *bat_priv) { int i; u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; struct batadv_hard_iface *primary_if; u16 crc; unsigned long entrytime; spin_lock_init(&bat_priv->bla.bcast_duplist_lock); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n"); /* setting claim destination address */ memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3); bat_priv->bla.claim_dest.type = 0; primary_if = batadv_primary_if_get_selected(bat_priv); if (primary_if) { crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN); bat_priv->bla.claim_dest.group = htons(crc); batadv_hardif_put(primary_if); } else { bat_priv->bla.claim_dest.group = 0; /* will be set later */ } /* initialize the duplicate list */ entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT); for (i = 0; i < BATADV_DUPLIST_SIZE; i++) bat_priv->bla.bcast_duplist[i].entrytime = entrytime; bat_priv->bla.bcast_duplist_curr = 0; atomic_set(&bat_priv->bla.loopdetect_next, BATADV_BLA_LOOPDETECT_PERIODS); if (bat_priv->bla.claim_hash) return 0; bat_priv->bla.claim_hash = batadv_hash_new(128); if (!bat_priv->bla.claim_hash) return -ENOMEM; bat_priv->bla.backbone_hash = batadv_hash_new(32); if (!bat_priv->bla.backbone_hash) { batadv_hash_destroy(bat_priv->bla.claim_hash); return -ENOMEM; } batadv_hash_set_lock_class(bat_priv->bla.claim_hash, &batadv_claim_hash_lock_class_key); batadv_hash_set_lock_class(bat_priv->bla.backbone_hash, &batadv_backbone_hash_lock_class_key); batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n"); INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work); queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work, msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH)); return 0; } /** * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the multicast packet to be checked * @payload_ptr: pointer to position inside the head buffer of the skb * marking the start of the data to be CRC'ed * @orig: originator mac address, NULL if unknown * * Check if it is on our broadcast list. Another gateway might have sent the * same packet because it is connected to the same backbone, so we have to * remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore * sent by another host, drop it. We allow equal packets from * the same host however as this might be intended. * * Return: true if a packet is in the duplicate list, false otherwise. */ static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb, u8 *payload_ptr, const u8 *orig) { struct batadv_bcast_duplist_entry *entry; bool ret = false; int i, curr; __be32 crc; /* calculate the crc ... */ crc = batadv_skb_crc32(skb, payload_ptr); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); for (i = 0; i < BATADV_DUPLIST_SIZE; i++) { curr = (bat_priv->bla.bcast_duplist_curr + i); curr %= BATADV_DUPLIST_SIZE; entry = &bat_priv->bla.bcast_duplist[curr]; /* we can stop searching if the entry is too old ; * later entries will be even older */ if (batadv_has_timed_out(entry->entrytime, BATADV_DUPLIST_TIMEOUT)) break; if (entry->crc != crc) continue; /* are the originators both known and not anonymous? */ if (orig && !is_zero_ether_addr(orig) && !is_zero_ether_addr(entry->orig)) { /* If known, check if the new frame came from * the same originator: * We are safe to take identical frames from the * same orig, if known, as multiplications in * the mesh are detected via the (orig, seqno) pair. * So we can be a bit more liberal here and allow * identical frames from the same orig which the source * host might have sent multiple times on purpose. */ if (batadv_compare_eth(entry->orig, orig)) continue; } /* this entry seems to match: same crc, not too old, * and from another gw. therefore return true to forbid it. */ ret = true; goto out; } /* not found, add a new entry (overwrite the oldest entry) * and allow it, its the first occurrence. */ curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); curr %= BATADV_DUPLIST_SIZE; entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; /* known originator */ if (orig) ether_addr_copy(entry->orig, orig); /* anonymous originator */ else eth_zero_addr(entry->orig); bat_priv->bla.bcast_duplist_curr = curr; out: spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock); return ret; } /** * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the multicast packet to be checked, decapsulated from a * unicast_packet * * Check if it is on our broadcast list. Another gateway might have sent the * same packet because it is connected to the same backbone, so we have to * remove this duplicate. * * Return: true if a packet is in the duplicate list, false otherwise. */ static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); } /** * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the bcast_packet to be checked * * Check if it is on our broadcast list. Another gateway might have sent the * same packet because it is connected to the same backbone, so we have to * remove this duplicate. * * Return: true if a packet is in the duplicate list, false otherwise. */ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_bcast_packet *bcast_packet; u8 *payload_ptr; bcast_packet = (struct batadv_bcast_packet *)skb->data; payload_ptr = (u8 *)(bcast_packet + 1); return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, bcast_packet->orig); } /** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. * @bat_priv: the bat priv with all the mesh interface information * @orig: originator mac address * @vid: VLAN identifier * * Return: true if orig is a backbone for this vid, false otherwise. */ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid) { struct batadv_hashtable *hash = bat_priv->bla.backbone_hash; struct hlist_head *head; struct batadv_bla_backbone_gw *backbone_gw; int i; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) return false; if (!hash) return false; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) { if (batadv_compare_eth(backbone_gw->orig, orig) && backbone_gw->vid == vid) { rcu_read_unlock(); return true; } } rcu_read_unlock(); } return false; } /** * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN * @skb: the frame to be checked * @orig_node: the orig_node of the frame * @hdr_size: maximum length of the frame * * Return: true if the orig_node is also a gateway on the mesh interface, * otherwise it returns false. */ bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size) { struct batadv_bla_backbone_gw *backbone_gw; unsigned short vid; if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance)) return false; /* first, find out the vid. */ if (!pskb_may_pull(skb, hdr_size + ETH_HLEN)) return false; vid = batadv_get_vid(skb, hdr_size); /* see if this originator is a backbone gw for this VLAN */ backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv, orig_node->orig, vid); if (!backbone_gw) return false; batadv_backbone_gw_put(backbone_gw); return true; } /** * batadv_bla_free() - free all bla structures * @bat_priv: the bat priv with all the mesh interface information * * for meshinterface free or module unload */ void batadv_bla_free(struct batadv_priv *bat_priv) { struct batadv_hard_iface *primary_if; cancel_delayed_work_sync(&bat_priv->bla.work); primary_if = batadv_primary_if_get_selected(bat_priv); if (bat_priv->bla.claim_hash) { batadv_bla_purge_claims(bat_priv, primary_if, 1); batadv_hash_destroy(bat_priv->bla.claim_hash); bat_priv->bla.claim_hash = NULL; } if (bat_priv->bla.backbone_hash) { batadv_bla_purge_backbone_gw(bat_priv, 1); batadv_hash_destroy(bat_priv->bla.backbone_hash); bat_priv->bla.backbone_hash = NULL; } batadv_hardif_put(primary_if); } /** * batadv_bla_loopdetect_check() - check and handle a detected loop * @bat_priv: the bat priv with all the mesh interface information * @skb: the packet to check * @primary_if: interface where the request came on * @vid: the VLAN ID of the frame * * Checks if this packet is a loop detect frame which has been sent by us, * throws an uevent and logs the event if that is the case. * * Return: true if it is a loop detect frame which is to be dropped, false * otherwise. */ static bool batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_hard_iface *primary_if, unsigned short vid) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; bool ret; ethhdr = eth_hdr(skb); /* Only check for the MAC address and skip more checks here for * performance reasons - this function is on the hotpath, after all. */ if (!batadv_compare_eth(ethhdr->h_source, bat_priv->bla.loopdetect_addr)) return false; /* If the packet came too late, don't forward it on the mesh * but don't consider that as loop. It might be a coincidence. */ if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime, BATADV_BLA_LOOPDETECT_TIMEOUT)) return true; backbone_gw = batadv_bla_get_backbone_gw(bat_priv, primary_if->net_dev->dev_addr, vid, true); if (unlikely(!backbone_gw)) return true; ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work); /* backbone_gw is unreferenced in the report work function * if queue_work() call was successful */ if (!ret) batadv_backbone_gw_put(backbone_gw); return true; } /** * batadv_bla_rx() - check packets coming from the mesh. * @bat_priv: the bat priv with all the mesh interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @packet_type: the batman packet type this frame came in * * batadv_bla_rx avoidance checks if: * * we have to race for a claim * * if the frame is allowed on the LAN * * In these cases, the skb is further handled by this function * * Return: true if handled, otherwise it returns false and the caller shall * further process the skb. */ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, int packet_type) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; bool own_claim; bool ret; ethhdr = eth_hdr(skb); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto handled; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto allow; if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid)) goto handled; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow multicast packets while requests are in flight */ if (is_multicast_ether_addr(ethhdr->h_dest)) /* Both broadcast flooding or multicast-via-unicasts * delivery might send to multiple backbone gateways * sharing the same LAN and therefore need to coordinate * which backbone gateway forwards into the LAN, * by claiming the payload source address. * * Broadcast flooding and multicast-via-unicasts * delivery use the following two batman packet types. * Note: explicitly exclude BATADV_UNICAST_4ADDR, * as the DHCP gateway feature will send explicitly * to only one BLA gateway, so the claiming process * should be avoided there. */ if (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST) goto handled; /* potential duplicates from foreign BLA backbone gateways via * multicast-in-unicast packets */ if (is_multicast_ether_addr(ethhdr->h_dest) && packet_type == BATADV_UNICAST && batadv_bla_check_ucast_duplist(bat_priv, skb)) goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); if (!claim) { bool local = batadv_is_my_client(bat_priv, ethhdr->h_source, vid); /* possible optimization: race for a claim */ /* No claim exists yet, claim it for us! */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n", __func__, ethhdr->h_source, str_yes_no(local)); batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } /* if it is our own claim ... */ backbone_gw = batadv_bla_claim_get_backbone_gw(claim); own_claim = batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr); batadv_backbone_gw_put(backbone_gw); if (own_claim) { /* ... allow it in any case */ claim->lasttime = jiffies; goto allow; } /* if it is a multicast ... */ if (is_multicast_ether_addr(ethhdr->h_dest) && (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { /* ... drop it. the responsible gateway is in charge. * * We need to check packet type because with the gateway * feature, broadcasts (like DHCP requests) may be sent * using a unicast 4 address packet type. See comment above. */ goto handled; } else { /* seems the client considers us as its best gateway. * send a claim and update the claim table * immediately. */ batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); ret = false; goto out; handled: kfree_skb(skb); ret = true; out: batadv_hardif_put(primary_if); batadv_claim_put(claim); return ret; } /** * batadv_bla_tx() - check packets going into the mesh * @bat_priv: the bat priv with all the mesh interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * * batadv_bla_tx checks if: * * a claim was received which has to be processed * * the frame is allowed on the mesh * * in these cases, the skb is further handled by this function. * * This call might reallocate skb data. * * Return: true if handled, otherwise it returns false and the caller shall * further process the skb. */ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_bla_backbone_gw *backbone_gw; struct batadv_hard_iface *primary_if; bool client_roamed; bool ret = false; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto allow; if (batadv_bla_process_claim(bat_priv, primary_if, skb)) goto handled; ethhdr = eth_hdr(skb); if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ if (is_multicast_ether_addr(ethhdr->h_dest)) goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); /* if no claim exists, allow it. */ if (!claim) goto allow; /* check if we are responsible. */ backbone_gw = batadv_bla_claim_get_backbone_gw(claim); client_roamed = batadv_compare_eth(backbone_gw->orig, primary_if->net_dev->dev_addr); batadv_backbone_gw_put(backbone_gw); if (client_roamed) { /* if yes, the client has roamed and we have * to unclaim it. */ if (batadv_has_timed_out(claim->lasttime, 100)) { /* only unclaim if the last claim entry is * older than 100 ms to make sure we really * have a roaming client here. */ batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n", __func__, ethhdr->h_source); batadv_handle_unclaim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } else { batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n", __func__, ethhdr->h_source); goto handled; } } /* check if it is a multicast/broadcast frame */ if (is_multicast_ether_addr(ethhdr->h_dest)) { /* drop it. the responsible gateway has forwarded it into * the backbone network. */ goto handled; } else { /* we must allow it. at least if we are * responsible for the DESTINATION. */ goto allow; } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); ret = false; goto out; handled: ret = true; out: batadv_hardif_put(primary_if); batadv_claim_put(claim); return ret; } /** * batadv_bla_claim_dump_entry() - dump one entry of the claim table * to a netlink socket * @msg: buffer for the message * @portid: netlink port * @cb: Control block containing additional options * @primary_if: primary interface * @claim: entry to dump * * Return: 0 or error code. */ static int batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_hard_iface *primary_if, struct batadv_bla_claim *claim) { const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; void *hdr; int ret = -EINVAL; hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM); if (!hdr) { ret = -ENOBUFS; goto out; } genl_dump_check_consistent(cb, hdr); is_own = batadv_compare_eth(claim->backbone_gw->orig, primary_addr); spin_lock_bh(&claim->backbone_gw->crc_lock); backbone_crc = claim->backbone_gw->crc; spin_unlock_bh(&claim->backbone_gw->crc_lock); if (is_own) if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { genlmsg_cancel(msg, hdr); goto out; } if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) || nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) || nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, claim->backbone_gw->orig) || nla_put_u16(msg, BATADV_ATTR_BLA_CRC, backbone_crc)) { genlmsg_cancel(msg, hdr); goto out; } genlmsg_end(msg, hdr); ret = 0; out: return ret; } /** * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table * to a netlink socket * @msg: buffer for the message * @portid: netlink port * @cb: Control block containing additional options * @primary_if: primary interface * @hash: hash to dump * @bucket: bucket index to dump * @idx_skip: How many entries to skip * * Return: always 0. */ static int batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_hard_iface *primary_if, struct batadv_hashtable *hash, unsigned int bucket, int *idx_skip) { struct batadv_bla_claim *claim; int idx = 0; int ret = 0; spin_lock_bh(&hash->list_locks[bucket]); cb->seq = atomic_read(&hash->generation) << 1 | 1; hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) { if (idx++ < *idx_skip) continue; ret = batadv_bla_claim_dump_entry(msg, portid, cb, primary_if, claim); if (ret) { *idx_skip = idx - 1; goto unlock; } } *idx_skip = 0; unlock: spin_unlock_bh(&hash->list_locks[bucket]); return ret; } /** * batadv_bla_claim_dump() - dump claim table to a netlink socket * @msg: buffer for the message * @cb: callback structure containing arguments * * Return: message length. */ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; int portid = NETLINK_CB(cb->skb).portid; struct net_device *mesh_iface; struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; int idx = cb->args[1]; int ret = 0; mesh_iface = batadv_netlink_get_meshif(cb); if (IS_ERR(mesh_iface)) return PTR_ERR(mesh_iface); bat_priv = netdev_priv(mesh_iface); hash = bat_priv->bla.claim_hash; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out; } while (bucket < hash->size) { if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if, hash, bucket, &idx)) break; bucket++; } cb->args[0] = bucket; cb->args[1] = idx; ret = msg->len; out: batadv_hardif_put(primary_if); dev_put(mesh_iface); return ret; } /** * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a * netlink socket * @msg: buffer for the message * @portid: netlink port * @cb: Control block containing additional options * @primary_if: primary interface * @backbone_gw: entry to dump * * Return: 0 or error code. */ static int batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_hard_iface *primary_if, struct batadv_bla_backbone_gw *backbone_gw) { const u8 *primary_addr = primary_if->net_dev->dev_addr; u16 backbone_crc; bool is_own; int msecs; void *hdr; int ret = -EINVAL; hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE); if (!hdr) { ret = -ENOBUFS; goto out; } genl_dump_check_consistent(cb, hdr); is_own = batadv_compare_eth(backbone_gw->orig, primary_addr); spin_lock_bh(&backbone_gw->crc_lock); backbone_crc = backbone_gw->crc; spin_unlock_bh(&backbone_gw->crc_lock); msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime); if (is_own) if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) { genlmsg_cancel(msg, hdr); goto out; } if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN, backbone_gw->orig) || nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) || nla_put_u16(msg, BATADV_ATTR_BLA_CRC, backbone_crc) || nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) { genlmsg_cancel(msg, hdr); goto out; } genlmsg_end(msg, hdr); ret = 0; out: return ret; } /** * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to * a netlink socket * @msg: buffer for the message * @portid: netlink port * @cb: Control block containing additional options * @primary_if: primary interface * @hash: hash to dump * @bucket: bucket index to dump * @idx_skip: How many entries to skip * * Return: always 0. */ static int batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, struct netlink_callback *cb, struct batadv_hard_iface *primary_if, struct batadv_hashtable *hash, unsigned int bucket, int *idx_skip) { struct batadv_bla_backbone_gw *backbone_gw; int idx = 0; int ret = 0; spin_lock_bh(&hash->list_locks[bucket]); cb->seq = atomic_read(&hash->generation) << 1 | 1; hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) { if (idx++ < *idx_skip) continue; ret = batadv_bla_backbone_dump_entry(msg, portid, cb, primary_if, backbone_gw); if (ret) { *idx_skip = idx - 1; goto unlock; } } *idx_skip = 0; unlock: spin_unlock_bh(&hash->list_locks[bucket]); return ret; } /** * batadv_bla_backbone_dump() - dump backbone table to a netlink socket * @msg: buffer for the message * @cb: callback structure containing arguments * * Return: message length. */ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; int portid = NETLINK_CB(cb->skb).portid; struct net_device *mesh_iface; struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; int idx = cb->args[1]; int ret = 0; mesh_iface = batadv_netlink_get_meshif(cb); if (IS_ERR(mesh_iface)) return PTR_ERR(mesh_iface); bat_priv = netdev_priv(mesh_iface); hash = bat_priv->bla.backbone_hash; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; goto out; } while (bucket < hash->size) { if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if, hash, bucket, &idx)) break; bucket++; } cb->args[0] = bucket; cb->args[1] = idx; ret = msg->len; out: batadv_hardif_put(primary_if); dev_put(mesh_iface); return ret; } #ifdef CONFIG_BATMAN_ADV_DAT /** * batadv_bla_check_claim() - check if address is claimed * * @bat_priv: the bat priv with all the mesh interface information * @addr: mac address of which the claim status is checked * @vid: the VLAN ID * * addr is checked if this address is claimed by the local device itself. * * Return: true if bla is disabled or the mac is claimed by the device, * false if the device addr is already claimed by another gateway */ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid) { struct batadv_bla_claim search_claim; struct batadv_bla_claim *claim = NULL; struct batadv_hard_iface *primary_if = NULL; bool ret = true; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) return ret; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) return ret; /* First look if the mac address is claimed */ ether_addr_copy(search_claim.addr, addr); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); /* If there is a claim and we are not owner of the claim, * return false. */ if (claim) { if (!batadv_compare_eth(claim->backbone_gw->orig, primary_if->net_dev->dev_addr)) ret = false; batadv_claim_put(claim); } batadv_hardif_put(primary_if); return ret; } #endif
31 24 33 24 24 24 24 24 23 23 24 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KVM_X86_MMU_INTERNAL_H #define __KVM_X86_MMU_INTERNAL_H #include <linux/types.h> #include <linux/kvm_host.h> #include <asm/kvm_host.h> #include "mmu.h" #ifdef CONFIG_KVM_PROVE_MMU #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) #else #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x) #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1)) #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level)) /* * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT * bit, and thus are guaranteed to be non-zero when valid. And, when a guest * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use * '0' instead of INVALID_PAGE to indicate an invalid PAE root. */ #define INVALID_PAE_ROOT 0 #define IS_VALID_PAE_ROOT(x) (!!(x)) static inline hpa_t kvm_mmu_get_dummy_root(void) { return my_zero_pfn(0) << PAGE_SHIFT; } static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) { return is_zero_pfn(shadow_page >> PAGE_SHIFT); } typedef u64 __rcu *tdp_ptep_t; struct kvm_mmu_page { /* * Note, "link" through "spt" fit in a single 64 byte cache line on * 64-bit kernels, keep it that way unless there's a reason not to. */ struct list_head link; struct hlist_node hash_link; bool tdp_mmu_page; bool unsync; union { u8 mmu_valid_gen; /* Only accessed under slots_lock. */ bool tdp_mmu_scheduled_root_to_zap; }; /* * The shadow page can't be replaced by an equivalent huge page * because it is being used to map an executable page in the guest * and the NX huge page mitigation is enabled. */ bool nx_huge_page_disallowed; /* * The following two entries are used to key the shadow page in the * hash table. */ union kvm_mmu_page_role role; gfn_t gfn; u64 *spt; /* * Stores the result of the guest translation being shadowed by each * SPTE. KVM shadows two types of guest translations: nGPA -> GPA * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both * cases the result of the translation is a GPA and a set of access * constraints. * * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed * access permissions are stored in the lower bits. Note, for * convenience and uniformity across guests, the access permissions are * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format. */ u64 *shadowed_translation; /* Currently serving as active root */ union { int root_count; refcount_t tdp_mmu_root_count; }; union { /* These two members aren't used for TDP MMU */ struct { unsigned int unsync_children; /* * Number of writes since the last time traversal * visited this page. */ atomic_t write_flooding_count; }; /* * Page table page of external PT. * Passed to TDX module, not accessed by KVM. */ void *external_spt; }; union { struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ tdp_ptep_t ptep; }; DECLARE_BITMAP(unsync_child_bitmap, 512); /* * Tracks shadow pages that, if zapped, would allow KVM to create an NX * huge page. A shadow page will have nx_huge_page_disallowed set but * not be on the list if a huge page is disallowed for other reasons, * e.g. because KVM is shadowing a PTE at the same gfn, the memslot * isn't properly aligned, etc... */ struct list_head possible_nx_huge_page_link; #ifdef CONFIG_X86_32 /* * Used out of the mmu-lock to avoid reading spte values while an * update is in progress; see the comments in __get_spte_lockless(). */ int clear_spte_count; #endif #ifdef CONFIG_X86_64 /* Used for freeing the page asynchronously if it is a TDP MMU page. */ struct rcu_head rcu_head; #endif }; extern struct kmem_cache *mmu_page_header_cache; static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) { return role.smm ? 1 : 0; } static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) { return kvm_mmu_role_as_id(sp->role); } static inline bool is_mirror_sp(const struct kvm_mmu_page *sp) { return sp->role.is_mirror; } static inline void kvm_mmu_alloc_external_spt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { /* * external_spt is allocated for TDX module to hold private EPT mappings, * TDX module will initialize the page by itself. * Therefore, KVM does not need to initialize or access external_spt. * KVM only interacts with sp->spt for private EPT operations. */ sp->external_spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_external_spt_cache); } static inline gfn_t kvm_gfn_root_bits(const struct kvm *kvm, const struct kvm_mmu_page *root) { /* * Since mirror SPs are used only for TDX, which maps private memory * at its "natural" GFN, no mask needs to be applied to them - and, dually, * we expect that the bits is only used for the shared PT. */ if (is_mirror_sp(root)) return 0; return kvm_gfn_direct_bits(kvm); } static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) { /* * When using the EPT page-modification log, the GPAs in the CPU dirty * log would come from L2 rather than L1. Therefore, we need to rely * on write protection to record dirty pages, which bypasses PML, since * writes now result in a vmexit. Note, the check on CPU dirty logging * being enabled is mandatory as the bits used to denote WP-only SPTEs * are reserved for PAE paging (32-bit KVM). */ return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) { return gfn & -KVM_PAGES_PER_HPAGE(level); } int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool synchronizing, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level); /* Flush the given page (huge or not) of guest memory. */ static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) { kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), KVM_PAGES_PER_HPAGE(level)); } unsigned int pte_list_count(struct kvm_rmap_head *rmap_head); extern int nx_huge_pages; static inline bool is_nx_huge_page_enabled(struct kvm *kvm) { return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; } struct kvm_page_fault { /* arguments to kvm_mmu_do_page_fault. */ const gpa_t addr; const u64 error_code; const bool prefetch; /* Derived from error_code. */ const bool exec; const bool write; const bool present; const bool rsvd; const bool user; /* Derived from mmu and global state. */ const bool is_tdp; const bool is_private; const bool nx_huge_page_workaround_enabled; /* * Whether a >4KB mapping can be created or is forbidden due to NX * hugepages. */ bool huge_page_disallowed; /* * Maximum page size that can be created for this fault; input to * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). */ u8 max_level; /* * Page size that can be created based on the max_level and the * page size used by the host mapping. */ u8 req_level; /* * Page size that will be created based on the req_level and * huge_page_disallowed. */ u8 goal_level; /* * Shifted addr, or result of guest page table walk if addr is a gva. In * the case of VM where memslot's can be mapped at multiple GPA aliases * (i.e. TDX), the gfn field does not contain the bit that selects between * the aliases (i.e. the shared bit for TDX). */ gfn_t gfn; /* The memslot containing gfn. May be NULL. */ struct kvm_memory_slot *slot; /* Outputs of kvm_mmu_faultin_pfn(). */ unsigned long mmu_seq; kvm_pfn_t pfn; struct page *refcounted_page; bool map_writable; /* * Indicates the guest is trying to write a gfn that contains one or * more of the PTEs used to translate the write itself, i.e. the access * is changing its own translation in the guest page tables. */ bool write_fault_to_shadow_pgtable; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); /* * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), * and of course kvm_mmu_do_page_fault(). * * RET_PF_CONTINUE: So far, so good, keep handling the page fault. * RET_PF_RETRY: let CPU fault again on the address. * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the * gfn and retry, or emulate the instruction directly. * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. * RET_PF_FIXED: The faulting entry has been fixed. * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. * * Any names added to this enum should be exported to userspace for use in * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h * * Note, all values must be greater than or equal to zero so as not to encroach * on -errno return values. */ enum { RET_PF_CONTINUE = 0, RET_PF_RETRY, RET_PF_EMULATE, RET_PF_WRITE_PROTECTED, RET_PF_INVALID, RET_PF_FIXED, RET_PF_SPURIOUS, }; /* * Define RET_PF_CONTINUE as 0 to allow for * - efficient machine code when checking for CONTINUE, e.g. * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero, * - kvm_mmu_do_page_fault() to return other RET_PF_* as a positive value. */ static_assert(RET_PF_CONTINUE == 0); static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, PAGE_SIZE, fault->write, fault->exec, fault->is_private); } static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err, bool prefetch, int *emulation_type, u8 *level) { struct kvm_page_fault fault = { .addr = cr2_or_gpa, .error_code = err, .exec = err & PFERR_FETCH_MASK, .write = err & PFERR_WRITE_MASK, .present = err & PFERR_PRESENT_MASK, .rsvd = err & PFERR_RSVD_MASK, .user = err & PFERR_USER_MASK, .prefetch = prefetch, .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm), .max_level = KVM_MAX_HUGEPAGE_LEVEL, .req_level = PG_LEVEL_4K, .goal_level = PG_LEVEL_4K, .is_private = err & PFERR_PRIVATE_ACCESS, .pfn = KVM_PFN_ERR_FAULT, }; int r; if (vcpu->arch.mmu->root_role.direct) { /* * Things like memslots don't understand the concept of a shared * bit. Strip it so that the GFN can be used like normal, and the * fault.addr can be used when the shared bit is needed. */ fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); } /* * With retpoline being active an indirect call is rather expensive, * so do a direct call in the most common case. */ if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) r = kvm_tdp_page_fault(vcpu, &fault); else r = vcpu->arch.mmu->page_fault(vcpu, &fault); /* * Not sure what's happening, but punt to userspace and hope that * they can fix it by changing memory to shared, or they can * provide a better error. */ if (r == RET_PF_EMULATE && fault.is_private) { pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n"); kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); return -EFAULT; } if (fault.write_fault_to_shadow_pgtable && emulation_type) *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; if (level) *level = fault.goal_level; return r; } int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); #endif /* __KVM_X86_MMU_INTERNAL_H */
110 378 377 111 111 111 108 111 378 111 110 376 379 378 376 375 374 375 375 375 376 375 376 373 376 18 18 18 111 111 111 111 110 110 107 3 108 108 108 1140 1138 9278 51 3692 8179 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 // SPDX-License-Identifier: GPL-2.0-only #include <linux/blkdev.h> #include <linux/wait.h> #include <linux/rbtree.h> #include <linux/kthread.h> #include <linux/backing-dev.h> #include <linux/blk-cgroup.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/writeback.h> #include <linux/device.h> #include <trace/events/writeback.h> #include "internal.h" struct backing_dev_info noop_backing_dev_info; EXPORT_SYMBOL_GPL(noop_backing_dev_info); static const char *bdi_unknown_name = "(unknown)"; /* * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU * reader side locking. */ DEFINE_SPINLOCK(bdi_lock); static u64 bdi_id_cursor; static struct rb_root bdi_tree = RB_ROOT; LIST_HEAD(bdi_list); /* bdi_wq serves all asynchronous writeback tasks */ struct workqueue_struct *bdi_wq; #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> #include <linux/seq_file.h> struct wb_stats { unsigned long nr_dirty; unsigned long nr_io; unsigned long nr_more_io; unsigned long nr_dirty_time; unsigned long nr_writeback; unsigned long nr_reclaimable; unsigned long nr_dirtied; unsigned long nr_written; unsigned long dirty_thresh; unsigned long wb_thresh; }; static struct dentry *bdi_debug_root; static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); } static void collect_wb_stats(struct wb_stats *stats, struct bdi_writeback *wb) { struct inode *inode; spin_lock(&wb->list_lock); list_for_each_entry(inode, &wb->b_dirty, i_io_list) stats->nr_dirty++; list_for_each_entry(inode, &wb->b_io, i_io_list) stats->nr_io++; list_for_each_entry(inode, &wb->b_more_io, i_io_list) stats->nr_more_io++; list_for_each_entry(inode, &wb->b_dirty_time, i_io_list) if (inode->i_state & I_DIRTY_TIME) stats->nr_dirty_time++; spin_unlock(&wb->list_lock); stats->nr_writeback += wb_stat(wb, WB_WRITEBACK); stats->nr_reclaimable += wb_stat(wb, WB_RECLAIMABLE); stats->nr_dirtied += wb_stat(wb, WB_DIRTIED); stats->nr_written += wb_stat(wb, WB_WRITTEN); stats->wb_thresh += wb_calc_thresh(wb, stats->dirty_thresh); } #ifdef CONFIG_CGROUP_WRITEBACK static void bdi_collect_stats(struct backing_dev_info *bdi, struct wb_stats *stats) { struct bdi_writeback *wb; rcu_read_lock(); list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) { if (!wb_tryget(wb)) continue; collect_wb_stats(stats, wb); wb_put(wb); } rcu_read_unlock(); } #else static void bdi_collect_stats(struct backing_dev_info *bdi, struct wb_stats *stats) { collect_wb_stats(stats, &bdi->wb); } #endif static int bdi_debug_stats_show(struct seq_file *m, void *v) { struct backing_dev_info *bdi = m->private; unsigned long background_thresh; unsigned long dirty_thresh; struct wb_stats stats; unsigned long tot_bw; global_dirty_limits(&background_thresh, &dirty_thresh); memset(&stats, 0, sizeof(stats)); stats.dirty_thresh = dirty_thresh; bdi_collect_stats(bdi, &stats); tot_bw = atomic_long_read(&bdi->tot_write_bandwidth); seq_printf(m, "BdiWriteback: %10lu kB\n" "BdiReclaimable: %10lu kB\n" "BdiDirtyThresh: %10lu kB\n" "DirtyThresh: %10lu kB\n" "BackgroundThresh: %10lu kB\n" "BdiDirtied: %10lu kB\n" "BdiWritten: %10lu kB\n" "BdiWriteBandwidth: %10lu kBps\n" "b_dirty: %10lu\n" "b_io: %10lu\n" "b_more_io: %10lu\n" "b_dirty_time: %10lu\n" "bdi_list: %10u\n" "state: %10lx\n", K(stats.nr_writeback), K(stats.nr_reclaimable), K(stats.wb_thresh), K(dirty_thresh), K(background_thresh), K(stats.nr_dirtied), K(stats.nr_written), K(tot_bw), stats.nr_dirty, stats.nr_io, stats.nr_more_io, stats.nr_dirty_time, !list_empty(&bdi->bdi_list), bdi->wb.state); return 0; } DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); static void wb_stats_show(struct seq_file *m, struct bdi_writeback *wb, struct wb_stats *stats) { seq_printf(m, "WbCgIno: %10lu\n" "WbWriteback: %10lu kB\n" "WbReclaimable: %10lu kB\n" "WbDirtyThresh: %10lu kB\n" "WbDirtied: %10lu kB\n" "WbWritten: %10lu kB\n" "WbWriteBandwidth: %10lu kBps\n" "b_dirty: %10lu\n" "b_io: %10lu\n" "b_more_io: %10lu\n" "b_dirty_time: %10lu\n" "state: %10lx\n\n", #ifdef CONFIG_CGROUP_WRITEBACK cgroup_ino(wb->memcg_css->cgroup), #else 1ul, #endif K(stats->nr_writeback), K(stats->nr_reclaimable), K(stats->wb_thresh), K(stats->nr_dirtied), K(stats->nr_written), K(wb->avg_write_bandwidth), stats->nr_dirty, stats->nr_io, stats->nr_more_io, stats->nr_dirty_time, wb->state); } static int cgwb_debug_stats_show(struct seq_file *m, void *v) { struct backing_dev_info *bdi = m->private; unsigned long background_thresh; unsigned long dirty_thresh; struct bdi_writeback *wb; global_dirty_limits(&background_thresh, &dirty_thresh); rcu_read_lock(); list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) { struct wb_stats stats = { .dirty_thresh = dirty_thresh }; if (!wb_tryget(wb)) continue; collect_wb_stats(&stats, wb); /* * Calculate thresh of wb in writeback cgroup which is min of * thresh in global domain and thresh in cgroup domain. Drop * rcu lock because cgwb_calc_thresh may sleep in * cgroup_rstat_flush. We can do so here because we have a ref. */ if (mem_cgroup_wb_domain(wb)) { rcu_read_unlock(); stats.wb_thresh = min(stats.wb_thresh, cgwb_calc_thresh(wb)); rcu_read_lock(); } wb_stats_show(m, wb, &stats); wb_put(wb); } rcu_read_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(cgwb_debug_stats); static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, &bdi_debug_stats_fops); debugfs_create_file("wb_stats", 0444, bdi->debug_dir, bdi, &cgwb_debug_stats_fops); } static void bdi_debug_unregister(struct backing_dev_info *bdi) { debugfs_remove_recursive(bdi->debug_dir); } #else /* CONFIG_DEBUG_FS */ static inline void bdi_debug_init(void) { } static inline void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { } static inline void bdi_debug_unregister(struct backing_dev_info *bdi) { } #endif /* CONFIG_DEBUG_FS */ static ssize_t read_ahead_kb_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned long read_ahead_kb; ssize_t ret; ret = kstrtoul(buf, 10, &read_ahead_kb); if (ret < 0) return ret; bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); return count; } #define BDI_SHOW(name, expr) \ static ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct backing_dev_info *bdi = dev_get_drvdata(dev); \ \ return sysfs_emit(buf, "%lld\n", (long long)expr); \ } \ static DEVICE_ATTR_RW(name); BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) static ssize_t min_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_min_ratio(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(min_ratio, bdi->min_ratio / BDI_RATIO_SCALE) static ssize_t min_ratio_fine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_min_ratio_no_scale(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(min_ratio_fine, bdi->min_ratio) static ssize_t max_ratio_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_max_ratio(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(max_ratio, bdi->max_ratio / BDI_RATIO_SCALE) static ssize_t max_ratio_fine_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int ratio; ssize_t ret; ret = kstrtouint(buf, 10, &ratio); if (ret < 0) return ret; ret = bdi_set_max_ratio_no_scale(bdi, ratio); if (!ret) ret = count; return ret; } BDI_SHOW(max_ratio_fine, bdi->max_ratio) static ssize_t min_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%llu\n", bdi_get_min_bytes(bdi)); } static ssize_t min_bytes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); u64 bytes; ssize_t ret; ret = kstrtoull(buf, 10, &bytes); if (ret < 0) return ret; ret = bdi_set_min_bytes(bdi, bytes); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(min_bytes); static ssize_t max_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%llu\n", bdi_get_max_bytes(bdi)); } static ssize_t max_bytes_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); u64 bytes; ssize_t ret; ret = kstrtoull(buf, 10, &bytes); if (ret < 0) return ret; ret = bdi_set_max_bytes(bdi, bytes); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(max_bytes); static ssize_t stable_pages_required_show(struct device *dev, struct device_attribute *attr, char *buf) { dev_warn_once(dev, "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n"); return sysfs_emit(buf, "%d\n", 0); } static DEVICE_ATTR_RO(stable_pages_required); static ssize_t strict_limit_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct backing_dev_info *bdi = dev_get_drvdata(dev); unsigned int strict_limit; ssize_t ret; ret = kstrtouint(buf, 10, &strict_limit); if (ret < 0) return ret; ret = bdi_set_strict_limit(bdi, strict_limit); if (!ret) ret = count; return ret; } static ssize_t strict_limit_show(struct device *dev, struct device_attribute *attr, char *buf) { struct backing_dev_info *bdi = dev_get_drvdata(dev); return sysfs_emit(buf, "%d\n", !!(bdi->capabilities & BDI_CAP_STRICTLIMIT)); } static DEVICE_ATTR_RW(strict_limit); static struct attribute *bdi_dev_attrs[] = { &dev_attr_read_ahead_kb.attr, &dev_attr_min_ratio.attr, &dev_attr_min_ratio_fine.attr, &dev_attr_max_ratio.attr, &dev_attr_max_ratio_fine.attr, &dev_attr_min_bytes.attr, &dev_attr_max_bytes.attr, &dev_attr_stable_pages_required.attr, &dev_attr_strict_limit.attr, NULL, }; ATTRIBUTE_GROUPS(bdi_dev); static const struct class bdi_class = { .name = "bdi", .dev_groups = bdi_dev_groups, }; static __init int bdi_class_init(void) { int ret; ret = class_register(&bdi_class); if (ret) return ret; bdi_debug_init(); return 0; } postcore_initcall(bdi_class_init); static int __init default_bdi_init(void) { bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!bdi_wq) return -ENOMEM; return 0; } subsys_initcall(default_bdi_init); static void wb_update_bandwidth_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(to_delayed_work(work), struct bdi_writeback, bw_dwork); wb_update_bandwidth(wb); } /* * Initial write bandwidth: 100 MB/s */ #define INIT_BW (100 << (20 - PAGE_SHIFT)) static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, gfp_t gfp) { int err; memset(wb, 0, sizeof(*wb)); wb->bdi = bdi; wb->last_old_flush = jiffies; INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); INIT_LIST_HEAD(&wb->b_dirty_time); spin_lock_init(&wb->list_lock); atomic_set(&wb->writeback_inodes, 0); wb->bw_time_stamp = jiffies; wb->balanced_dirty_ratelimit = INIT_BW; wb->dirty_ratelimit = INIT_BW; wb->write_bandwidth = INIT_BW; wb->avg_write_bandwidth = INIT_BW; spin_lock_init(&wb->work_lock); INIT_LIST_HEAD(&wb->work_list); INIT_DELAYED_WORK(&wb->dwork, wb_workfn); INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn); err = fprop_local_init_percpu(&wb->completions, gfp); if (err) return err; err = percpu_counter_init_many(wb->stat, 0, gfp, NR_WB_STAT_ITEMS); if (err) fprop_local_destroy_percpu(&wb->completions); return err; } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb); /* * Remove bdi from the global list and shutdown any threads we have running */ static void wb_shutdown(struct bdi_writeback *wb) { /* Make sure nobody queues further work */ spin_lock_irq(&wb->work_lock); if (!test_and_clear_bit(WB_registered, &wb->state)) { spin_unlock_irq(&wb->work_lock); return; } spin_unlock_irq(&wb->work_lock); cgwb_remove_from_bdi_list(wb); /* * Drain work list and shutdown the delayed_work. !WB_registered * tells wb_workfn() that @wb is dying and its work_list needs to * be drained no matter what. */ mod_delayed_work(bdi_wq, &wb->dwork, 0); flush_delayed_work(&wb->dwork); WARN_ON(!list_empty(&wb->work_list)); flush_delayed_work(&wb->bw_dwork); } static void wb_exit(struct bdi_writeback *wb) { WARN_ON(delayed_work_pending(&wb->dwork)); percpu_counter_destroy_many(wb->stat, NR_WB_STAT_ITEMS); fprop_local_destroy_percpu(&wb->completions); } #ifdef CONFIG_CGROUP_WRITEBACK #include <linux/memcontrol.h> /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. */ static DEFINE_SPINLOCK(cgwb_lock); static struct workqueue_struct *cgwb_release_wq; static LIST_HEAD(offline_cgwbs); static void cleanup_offline_cgwbs_workfn(struct work_struct *work); static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); static void cgwb_free_rcu(struct rcu_head *rcu_head) { struct bdi_writeback *wb = container_of(rcu_head, struct bdi_writeback, rcu); percpu_ref_exit(&wb->refcnt); kfree(wb); } static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, release_work); struct backing_dev_info *bdi = wb->bdi; mutex_lock(&wb->bdi->cgwb_release_mutex); wb_shutdown(wb); css_put(wb->memcg_css); css_put(wb->blkcg_css); mutex_unlock(&wb->bdi->cgwb_release_mutex); /* triggers blkg destruction if no online users left */ blkcg_unpin_online(wb->blkcg_css); fprop_local_destroy_percpu(&wb->memcg_completions); spin_lock_irq(&cgwb_lock); list_del(&wb->offline_node); spin_unlock_irq(&cgwb_lock); wb_exit(wb); bdi_put(bdi); WARN_ON_ONCE(!list_empty(&wb->b_attached)); call_rcu(&wb->rcu, cgwb_free_rcu); } static void cgwb_release(struct percpu_ref *refcnt) { struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback, refcnt); queue_work(cgwb_release_wq, &wb->release_work); } static void cgwb_kill(struct bdi_writeback *wb) { lockdep_assert_held(&cgwb_lock); WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id)); list_del(&wb->memcg_node); list_del(&wb->blkcg_node); list_add(&wb->offline_node, &offline_cgwbs); percpu_ref_kill(&wb->refcnt); } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { spin_lock_irq(&cgwb_lock); list_del_rcu(&wb->bdi_node); spin_unlock_irq(&cgwb_lock); } static int cgwb_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp) { struct mem_cgroup *memcg; struct cgroup_subsys_state *blkcg_css; struct list_head *memcg_cgwb_list, *blkcg_cgwb_list; struct bdi_writeback *wb; unsigned long flags; int ret = 0; memcg = mem_cgroup_from_css(memcg_css); blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); /* look up again under lock and discard on blkcg mismatch */ spin_lock_irqsave(&cgwb_lock, flags); wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); if (wb && wb->blkcg_css != blkcg_css) { cgwb_kill(wb); wb = NULL; } spin_unlock_irqrestore(&cgwb_lock, flags); if (wb) goto out_put; /* need to create a new one */ wb = kmalloc(sizeof(*wb), gfp); if (!wb) { ret = -ENOMEM; goto out_put; } ret = wb_init(wb, bdi, gfp); if (ret) goto err_free; ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp); if (ret) goto err_wb_exit; ret = fprop_local_init_percpu(&wb->memcg_completions, gfp); if (ret) goto err_ref_exit; wb->memcg_css = memcg_css; wb->blkcg_css = blkcg_css; INIT_LIST_HEAD(&wb->b_attached); INIT_WORK(&wb->release_work, cgwb_release_workfn); set_bit(WB_registered, &wb->state); bdi_get(bdi); /* * The root wb determines the registered state of the whole bdi and * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate * whether they're still online. Don't link @wb if any is dead. * See wb_memcg_offline() and wb_blkcg_offline(). */ ret = -ENODEV; spin_lock_irqsave(&cgwb_lock, flags); if (test_bit(WB_registered, &bdi->wb.state) && blkcg_cgwb_list->next && memcg_cgwb_list->next) { /* we might have raced another instance of this function */ ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); if (!ret) { list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); blkcg_pin_online(blkcg_css); css_get(memcg_css); css_get(blkcg_css); } } spin_unlock_irqrestore(&cgwb_lock, flags); if (ret) { if (ret == -EEXIST) ret = 0; goto err_fprop_exit; } goto out_put; err_fprop_exit: bdi_put(bdi); fprop_local_destroy_percpu(&wb->memcg_completions); err_ref_exit: percpu_ref_exit(&wb->refcnt); err_wb_exit: wb_exit(wb); err_free: kfree(wb); out_put: css_put(blkcg_css); return ret; } /** * wb_get_lookup - get wb for a given memcg * @bdi: target bdi * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) * * Try to get the wb for @memcg_css on @bdi. The returned wb has its * refcount incremented. * * This function uses css_get() on @memcg_css and thus expects its refcnt * to be positive on invocation. IOW, rcu_read_lock() protection on * @memcg_css isn't enough. try_get it before calling this function. * * A wb is keyed by its associated memcg. As blkcg implicitly enables * memcg on the default hierarchy, memcg association is guaranteed to be * more specific (equal or descendant to the associated blkcg) and thus can * identify both the memcg and blkcg associations. * * Because the blkcg associated with a memcg may change as blkcg is enabled * and disabled closer to root in the hierarchy, each wb keeps track of * both the memcg and blkcg associated with it and verifies the blkcg on * each lookup. On mismatch, the existing wb is discarded and a new one is * created. */ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css) { struct bdi_writeback *wb; if (!memcg_css->parent) return &bdi->wb; rcu_read_lock(); wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); if (wb) { struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); } rcu_read_unlock(); return wb; } /** * wb_get_create - get wb for a given memcg, create if necessary * @bdi: target bdi * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref) * @gfp: allocation mask to use * * Try to get the wb for @memcg_css on @bdi. If it doesn't exist, try to * create one. See wb_get_lookup() for more details. */ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css, gfp_t gfp) { struct bdi_writeback *wb; might_alloc(gfp); do { wb = wb_get_lookup(bdi, memcg_css); } while (!wb && !cgwb_create(bdi, memcg_css, gfp)); return wb; } static int cgwb_bdi_init(struct backing_dev_info *bdi) { int ret; INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); mutex_init(&bdi->cgwb_release_mutex); init_rwsem(&bdi->wb_switch_rwsem); ret = wb_init(&bdi->wb, bdi, GFP_KERNEL); if (!ret) { bdi->wb.memcg_css = &root_mem_cgroup->css; bdi->wb.blkcg_css = blkcg_root_css; } return ret; } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { struct radix_tree_iter iter; void **slot; struct bdi_writeback *wb; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); spin_lock_irq(&cgwb_lock); radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); spin_unlock_irq(&cgwb_lock); mutex_lock(&bdi->cgwb_release_mutex); spin_lock_irq(&cgwb_lock); while (!list_empty(&bdi->wb_list)) { wb = list_first_entry(&bdi->wb_list, struct bdi_writeback, bdi_node); spin_unlock_irq(&cgwb_lock); wb_shutdown(wb); spin_lock_irq(&cgwb_lock); } spin_unlock_irq(&cgwb_lock); mutex_unlock(&bdi->cgwb_release_mutex); } /* * cleanup_offline_cgwbs_workfn - try to release dying cgwbs * * Try to release dying cgwbs by switching attached inodes to the nearest * living ancestor's writeback. Processed wbs are placed at the end * of the list to guarantee the forward progress. */ static void cleanup_offline_cgwbs_workfn(struct work_struct *work) { struct bdi_writeback *wb; LIST_HEAD(processed); spin_lock_irq(&cgwb_lock); while (!list_empty(&offline_cgwbs)) { wb = list_first_entry(&offline_cgwbs, struct bdi_writeback, offline_node); list_move(&wb->offline_node, &processed); /* * If wb is dirty, cleaning up the writeback by switching * attached inodes will result in an effective removal of any * bandwidth restrictions, which isn't the goal. Instead, * it can be postponed until the next time, when all io * will be likely completed. If in the meantime some inodes * will get re-dirtied, they should be eventually switched to * a new cgwb. */ if (wb_has_dirty_io(wb)) continue; if (!wb_tryget(wb)) continue; spin_unlock_irq(&cgwb_lock); while (cleanup_offline_cgwb(wb)) cond_resched(); spin_lock_irq(&cgwb_lock); wb_put(wb); } if (!list_empty(&processed)) list_splice_tail(&processed, &offline_cgwbs); spin_unlock_irq(&cgwb_lock); } /** * wb_memcg_offline - kill all wb's associated with a memcg being offlined * @memcg: memcg being offlined * * Also prevents creation of any new wb's associated with @memcg. */ void wb_memcg_offline(struct mem_cgroup *memcg) { struct list_head *memcg_cgwb_list = &memcg->cgwb_list; struct bdi_writeback *wb, *next; spin_lock_irq(&cgwb_lock); list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node) cgwb_kill(wb); memcg_cgwb_list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work); } /** * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined * @css: blkcg being offlined * * Also prevents creation of any new wb's associated with @blkcg. */ void wb_blkcg_offline(struct cgroup_subsys_state *css) { struct bdi_writeback *wb, *next; struct list_head *list = blkcg_get_cgwb_list(css); spin_lock_irq(&cgwb_lock); list_for_each_entry_safe(wb, next, list, blkcg_node) cgwb_kill(wb); list->next = NULL; /* prevent new wb's */ spin_unlock_irq(&cgwb_lock); } static void cgwb_bdi_register(struct backing_dev_info *bdi) { spin_lock_irq(&cgwb_lock); list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); spin_unlock_irq(&cgwb_lock); } static int __init cgwb_init(void) { /* * There can be many concurrent release work items overwhelming * system_wq. Put them in a separate wq and limit concurrency. * There's no point in executing many of these in parallel. */ cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1); if (!cgwb_release_wq) return -ENOMEM; return 0; } subsys_initcall(cgwb_init); #else /* CONFIG_CGROUP_WRITEBACK */ static int cgwb_bdi_init(struct backing_dev_info *bdi) { return wb_init(&bdi->wb, bdi, GFP_KERNEL); } static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { } static void cgwb_bdi_register(struct backing_dev_info *bdi) { list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); } static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) { list_del_rcu(&wb->bdi_node); } #endif /* CONFIG_CGROUP_WRITEBACK */ int bdi_init(struct backing_dev_info *bdi) { bdi->dev = NULL; kref_init(&bdi->refcnt); bdi->min_ratio = 0; bdi->max_ratio = 100 * BDI_RATIO_SCALE; bdi->max_prop_frac = FPROP_FRAC_BASE; INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->wb_list); init_waitqueue_head(&bdi->wb_waitq); bdi->last_bdp_sleep = jiffies; return cgwb_bdi_init(bdi); } struct backing_dev_info *bdi_alloc(int node_id) { struct backing_dev_info *bdi; bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id); if (!bdi) return NULL; if (bdi_init(bdi)) { kfree(bdi); return NULL; } bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; bdi->ra_pages = VM_READAHEAD_PAGES; bdi->io_pages = VM_READAHEAD_PAGES; timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); return bdi; } EXPORT_SYMBOL(bdi_alloc); static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp) { struct rb_node **p = &bdi_tree.rb_node; struct rb_node *parent = NULL; struct backing_dev_info *bdi; lockdep_assert_held(&bdi_lock); while (*p) { parent = *p; bdi = rb_entry(parent, struct backing_dev_info, rb_node); if (bdi->id > id) p = &(*p)->rb_left; else if (bdi->id < id) p = &(*p)->rb_right; else break; } if (parentp) *parentp = parent; return p; } /** * bdi_get_by_id - lookup and get bdi from its id * @id: bdi id to lookup * * Find bdi matching @id and get it. Returns NULL if the matching bdi * doesn't exist or is already unregistered. */ struct backing_dev_info *bdi_get_by_id(u64 id) { struct backing_dev_info *bdi = NULL; struct rb_node **p; spin_lock_bh(&bdi_lock); p = bdi_lookup_rb_node(id, NULL); if (*p) { bdi = rb_entry(*p, struct backing_dev_info, rb_node); bdi_get(bdi); } spin_unlock_bh(&bdi_lock); return bdi; } int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) { struct device *dev; struct rb_node *parent, **p; if (bdi->dev) /* The driver needs to use separate queues per device */ return 0; vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); dev = device_create(&bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); if (IS_ERR(dev)) return PTR_ERR(dev); cgwb_bdi_register(bdi); bdi->dev = dev; bdi_debug_register(bdi, dev_name(dev)); set_bit(WB_registered, &bdi->wb.state); spin_lock_bh(&bdi_lock); bdi->id = ++bdi_id_cursor; p = bdi_lookup_rb_node(bdi->id, &parent); rb_link_node(&bdi->rb_node, parent, p); rb_insert_color(&bdi->rb_node, &bdi_tree); list_add_tail_rcu(&bdi->bdi_list, &bdi_list); spin_unlock_bh(&bdi_lock); trace_writeback_bdi_register(bdi); return 0; } int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...) { va_list args; int ret; va_start(args, fmt); ret = bdi_register_va(bdi, fmt, args); va_end(args); return ret; } EXPORT_SYMBOL(bdi_register); void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner) { WARN_ON_ONCE(bdi->owner); bdi->owner = owner; get_device(owner); } /* * Remove bdi from bdi_list, and ensure that it is no longer visible */ static void bdi_remove_from_list(struct backing_dev_info *bdi) { spin_lock_bh(&bdi_lock); rb_erase(&bdi->rb_node, &bdi_tree); list_del_rcu(&bdi->bdi_list); spin_unlock_bh(&bdi_lock); synchronize_rcu_expedited(); } void bdi_unregister(struct backing_dev_info *bdi) { timer_delete_sync(&bdi->laptop_mode_wb_timer); /* make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); wb_shutdown(&bdi->wb); cgwb_bdi_unregister(bdi); /* * If this BDI's min ratio has been set, use bdi_set_min_ratio() to * update the global bdi_min_ratio. */ if (bdi->min_ratio) bdi_set_min_ratio(bdi, 0); if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); bdi->dev = NULL; } if (bdi->owner) { put_device(bdi->owner); bdi->owner = NULL; } } EXPORT_SYMBOL(bdi_unregister); static void release_bdi(struct kref *ref) { struct backing_dev_info *bdi = container_of(ref, struct backing_dev_info, refcnt); WARN_ON_ONCE(test_bit(WB_registered, &bdi->wb.state)); WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); kfree(bdi); } void bdi_put(struct backing_dev_info *bdi) { kref_put(&bdi->refcnt, release_bdi); } EXPORT_SYMBOL(bdi_put); struct backing_dev_info *inode_to_bdi(struct inode *inode) { struct super_block *sb; if (!inode) return &noop_backing_dev_info; sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) return I_BDEV(inode)->bd_disk->bdi; #endif return sb->s_bdi; } EXPORT_SYMBOL(inode_to_bdi); const char *bdi_dev_name(struct backing_dev_info *bdi) { if (!bdi || !bdi->dev) return bdi_unknown_name; return bdi->dev_name; } EXPORT_SYMBOL_GPL(bdi_dev_name);
1 1 1 1 24 1 1 1 1 1 1 1 1 1 1 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 /* * Copyright (c) 2014-2016, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU Lesser General Public License, * version 2.1, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for * more details. */ #ifndef __NDCTL_H__ #define __NDCTL_H__ #include <linux/types.h> struct nd_cmd_dimm_flags { __u32 status; __u32 flags; } __packed; struct nd_cmd_get_config_size { __u32 status; __u32 config_size; __u32 max_xfer; } __packed; struct nd_cmd_get_config_data_hdr { __u32 in_offset; __u32 in_length; __u32 status; __u8 out_buf[]; } __packed; struct nd_cmd_set_config_hdr { __u32 in_offset; __u32 in_length; __u8 in_buf[]; } __packed; struct nd_cmd_vendor_hdr { __u32 opcode; __u32 in_length; __u8 in_buf[]; } __packed; struct nd_cmd_vendor_tail { __u32 status; __u32 out_length; __u8 out_buf[]; } __packed; struct nd_cmd_ars_cap { __u64 address; __u64 length; __u32 status; __u32 max_ars_out; __u32 clear_err_unit; __u16 flags; __u16 reserved; } __packed; struct nd_cmd_ars_start { __u64 address; __u64 length; __u16 type; __u8 flags; __u8 reserved[5]; __u32 status; __u32 scrub_time; } __packed; struct nd_cmd_ars_status { __u32 status; __u32 out_length; __u64 address; __u64 length; __u64 restart_address; __u64 restart_length; __u16 type; __u16 flags; __u32 num_records; struct nd_ars_record { __u32 handle; __u32 reserved; __u64 err_address; __u64 length; } __packed records[]; } __packed; struct nd_cmd_clear_error { __u64 address; __u64 length; __u32 status; __u8 reserved[4]; __u64 cleared; } __packed; enum { ND_CMD_IMPLEMENTED = 0, /* bus commands */ ND_CMD_ARS_CAP = 1, ND_CMD_ARS_START = 2, ND_CMD_ARS_STATUS = 3, ND_CMD_CLEAR_ERROR = 4, /* per-dimm commands */ ND_CMD_SMART = 1, ND_CMD_SMART_THRESHOLD = 2, ND_CMD_DIMM_FLAGS = 3, ND_CMD_GET_CONFIG_SIZE = 4, ND_CMD_GET_CONFIG_DATA = 5, ND_CMD_SET_CONFIG_DATA = 6, ND_CMD_VENDOR_EFFECT_LOG_SIZE = 7, ND_CMD_VENDOR_EFFECT_LOG = 8, ND_CMD_VENDOR = 9, ND_CMD_CALL = 10, }; enum { ND_ARS_VOLATILE = 1, ND_ARS_PERSISTENT = 2, ND_ARS_RETURN_PREV_DATA = 1 << 1, ND_CONFIG_LOCKED = 1, }; static inline const char *nvdimm_bus_cmd_name(unsigned cmd) { switch (cmd) { case ND_CMD_ARS_CAP: return "ars_cap"; case ND_CMD_ARS_START: return "ars_start"; case ND_CMD_ARS_STATUS: return "ars_status"; case ND_CMD_CLEAR_ERROR: return "clear_error"; case ND_CMD_CALL: return "cmd_call"; default: return "unknown"; } } static inline const char *nvdimm_cmd_name(unsigned cmd) { switch (cmd) { case ND_CMD_SMART: return "smart"; case ND_CMD_SMART_THRESHOLD: return "smart_thresh"; case ND_CMD_DIMM_FLAGS: return "flags"; case ND_CMD_GET_CONFIG_SIZE: return "get_size"; case ND_CMD_GET_CONFIG_DATA: return "get_data"; case ND_CMD_SET_CONFIG_DATA: return "set_data"; case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size"; case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log"; case ND_CMD_VENDOR: return "vendor"; case ND_CMD_CALL: return "cmd_call"; default: return "unknown"; } } #define ND_IOCTL 'N' #define ND_IOCTL_DIMM_FLAGS _IOWR(ND_IOCTL, ND_CMD_DIMM_FLAGS,\ struct nd_cmd_dimm_flags) #define ND_IOCTL_GET_CONFIG_SIZE _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_SIZE,\ struct nd_cmd_get_config_size) #define ND_IOCTL_GET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_GET_CONFIG_DATA,\ struct nd_cmd_get_config_data_hdr) #define ND_IOCTL_SET_CONFIG_DATA _IOWR(ND_IOCTL, ND_CMD_SET_CONFIG_DATA,\ struct nd_cmd_set_config_hdr) #define ND_IOCTL_VENDOR _IOWR(ND_IOCTL, ND_CMD_VENDOR,\ struct nd_cmd_vendor_hdr) #define ND_IOCTL_ARS_CAP _IOWR(ND_IOCTL, ND_CMD_ARS_CAP,\ struct nd_cmd_ars_cap) #define ND_IOCTL_ARS_START _IOWR(ND_IOCTL, ND_CMD_ARS_START,\ struct nd_cmd_ars_start) #define ND_IOCTL_ARS_STATUS _IOWR(ND_IOCTL, ND_CMD_ARS_STATUS,\ struct nd_cmd_ars_status) #define ND_IOCTL_CLEAR_ERROR _IOWR(ND_IOCTL, ND_CMD_CLEAR_ERROR,\ struct nd_cmd_clear_error) #define ND_DEVICE_DIMM 1 /* nd_dimm: container for "config data" */ #define ND_DEVICE_REGION_PMEM 2 /* nd_region: (parent of PMEM namespaces) */ #define ND_DEVICE_REGION_BLK 3 /* nd_region: (parent of BLK namespaces) */ #define ND_DEVICE_NAMESPACE_IO 4 /* legacy persistent memory */ #define ND_DEVICE_NAMESPACE_PMEM 5 /* PMEM namespace (may alias with BLK) */ #define ND_DEVICE_DAX_PMEM 7 /* Device DAX interface to pmem */ enum nd_driver_flags { ND_DRIVER_DIMM = 1 << ND_DEVICE_DIMM, ND_DRIVER_REGION_PMEM = 1 << ND_DEVICE_REGION_PMEM, ND_DRIVER_REGION_BLK = 1 << ND_DEVICE_REGION_BLK, ND_DRIVER_NAMESPACE_IO = 1 << ND_DEVICE_NAMESPACE_IO, ND_DRIVER_NAMESPACE_PMEM = 1 << ND_DEVICE_NAMESPACE_PMEM, ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM, }; enum ars_masks { ARS_STATUS_MASK = 0x0000FFFF, ARS_EXT_STATUS_SHIFT = 16, }; /* * struct nd_cmd_pkg * * is a wrapper to a quasi pass thru interface for invoking firmware * associated with nvdimms. * * INPUT PARAMETERS * * nd_family corresponds to the firmware (e.g. DSM) interface. * * nd_command are the function index advertised by the firmware. * * nd_size_in is the size of the input parameters being passed to firmware * * OUTPUT PARAMETERS * * nd_fw_size is the size of the data firmware wants to return for * the call. If nd_fw_size is greater than size of nd_size_out, only * the first nd_size_out bytes are returned. */ struct nd_cmd_pkg { __u64 nd_family; /* family of commands */ __u64 nd_command; __u32 nd_size_in; /* INPUT: size of input args */ __u32 nd_size_out; /* INPUT: size of payload */ __u32 nd_reserved2[9]; /* reserved must be zero */ __u32 nd_fw_size; /* OUTPUT: size fw wants to return */ unsigned char nd_payload[]; /* Contents of call */ }; /* These NVDIMM families represent pre-standardization command sets */ #define NVDIMM_FAMILY_INTEL 0 #define NVDIMM_FAMILY_HPE1 1 #define NVDIMM_FAMILY_HPE2 2 #define NVDIMM_FAMILY_MSFT 3 #define NVDIMM_FAMILY_HYPERV 4 #define NVDIMM_FAMILY_PAPR 5 #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_PAPR #define NVDIMM_BUS_FAMILY_NFIT 0 #define NVDIMM_BUS_FAMILY_INTEL 1 #define NVDIMM_BUS_FAMILY_MAX NVDIMM_BUS_FAMILY_INTEL #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) #endif /* __NDCTL_H__ */
406 405 407 407 406 272 418 384 316 261 151 406 450 3 161 272 414 2 203 18 151 511 382 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_ALLOC_BACKGROUND_H #define _BCACHEFS_ALLOC_BACKGROUND_H #include "bcachefs.h" #include "alloc_types.h" #include "buckets.h" #include "debug.h" #include "super.h" /* How out of date a pointer gen is allowed to be: */ #define BUCKET_GC_GEN_MAX 96U static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos) { rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode); bool ret = ca && bucket_valid(ca, pos.offset); rcu_read_unlock(); return ret; } static inline u64 bucket_to_u64(struct bpos bucket) { return (bucket.inode << 48) | bucket.offset; } static inline struct bpos u64_to_bucket(u64 bucket) { return POS(bucket >> 48, bucket & ~(~0ULL << 48)); } static inline u8 alloc_gc_gen(struct bch_alloc_v4 a) { return a.gen - a.oldest_gen; } static inline void alloc_to_bucket(struct bucket *dst, struct bch_alloc_v4 src) { dst->gen = src.gen; dst->data_type = src.data_type; dst->stripe_sectors = src.stripe_sectors; dst->dirty_sectors = src.dirty_sectors; dst->cached_sectors = src.cached_sectors; dst->stripe = src.stripe; } static inline void __bucket_m_to_alloc(struct bch_alloc_v4 *dst, struct bucket src) { dst->gen = src.gen; dst->data_type = src.data_type; dst->stripe_sectors = src.stripe_sectors; dst->dirty_sectors = src.dirty_sectors; dst->cached_sectors = src.cached_sectors; dst->stripe = src.stripe; } static inline struct bch_alloc_v4 bucket_m_to_alloc(struct bucket b) { struct bch_alloc_v4 ret = {}; __bucket_m_to_alloc(&ret, b); return ret; } static inline enum bch_data_type bucket_data_type(enum bch_data_type data_type) { switch (data_type) { case BCH_DATA_cached: case BCH_DATA_stripe: return BCH_DATA_user; default: return data_type; } } static inline bool bucket_data_type_mismatch(enum bch_data_type bucket, enum bch_data_type ptr) { return !data_type_is_empty(bucket) && bucket_data_type(bucket) != bucket_data_type(ptr); } /* * It is my general preference to use unsigned types for unsigned quantities - * however, these helpers are used in disk accounting calculations run by * triggers where the output will be negated and added to an s64. unsigned is * right out even though all these quantities will fit in 32 bits, since it * won't be sign extended correctly; u64 will negate "correctly", but s64 is the * simpler option here. */ static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a) { return a.stripe_sectors + a.dirty_sectors + a.cached_sectors; } static inline s64 bch2_bucket_sectors_dirty(struct bch_alloc_v4 a) { return a.stripe_sectors + a.dirty_sectors; } static inline s64 bch2_bucket_sectors(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.cached_sectors : bch2_bucket_sectors_dirty(a); } static inline s64 bch2_bucket_sectors_fragmented(struct bch_dev *ca, struct bch_alloc_v4 a) { int d = bch2_bucket_sectors(a); return d ? max(0, ca->mi.bucket_size - d) : 0; } static inline s64 bch2_gc_bucket_sectors_fragmented(struct bch_dev *ca, struct bucket a) { int d = a.stripe_sectors + a.dirty_sectors; return d ? max(0, ca->mi.bucket_size - d) : 0; } static inline s64 bch2_bucket_sectors_unstriped(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_stripe ? a.dirty_sectors : 0; } static inline enum bch_data_type alloc_data_type(struct bch_alloc_v4 a, enum bch_data_type data_type) { if (a.stripe) return data_type == BCH_DATA_parity ? data_type : BCH_DATA_stripe; if (bch2_bucket_sectors_dirty(a)) return bucket_data_type(data_type); if (a.cached_sectors) return BCH_DATA_cached; if (BCH_ALLOC_V4_NEED_DISCARD(&a)) return BCH_DATA_need_discard; if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX) return BCH_DATA_need_gc_gens; return BCH_DATA_free; } static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_type data_type) { a->data_type = alloc_data_type(*a, data_type); } static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a) { return a.data_type == BCH_DATA_cached ? a.io_time[READ] & LRU_TIME_MAX : 0; } #define DATA_TYPES_MOVABLE \ ((1U << BCH_DATA_btree)| \ (1U << BCH_DATA_user)| \ (1U << BCH_DATA_stripe)) static inline bool data_type_movable(enum bch_data_type type) { return (1U << type) & DATA_TYPES_MOVABLE; } static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a, struct bch_dev *ca) { if (a.data_type >= BCH_DATA_NR) return 0; if (!data_type_movable(a.data_type) || !bch2_bucket_sectors_fragmented(ca, a)) return 0; /* * avoid overflowing LRU_TIME_BITS on a corrupted fs, when * bucket_sectors_dirty is (much) bigger than bucket_size */ u64 d = min_t(s64, bch2_bucket_sectors_dirty(a), ca->mi.bucket_size); return div_u64(d * (1ULL << 31), ca->mi.bucket_size); } static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a) { return ((u64) alloc_gc_gen(a) >> 4) << 56; } static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a) { pos.offset |= alloc_freespace_genbits(a); return pos; } static inline unsigned alloc_v4_u64s_noerror(const struct bch_alloc_v4 *a) { return (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?: BCH_ALLOC_V4_U64s_V0) + BCH_ALLOC_V4_NR_BACKPOINTERS(a) * (sizeof(struct bch_backpointer) / sizeof(u64)); } static inline unsigned alloc_v4_u64s(const struct bch_alloc_v4 *a) { unsigned ret = alloc_v4_u64s_noerror(a); BUG_ON(ret > U8_MAX - BKEY_U64s); return ret; } static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a) { set_bkey_val_u64s(&a->k, alloc_v4_u64s(&a->v)); } struct bkey_i_alloc_v4 * bch2_trans_start_alloc_update_noupdate(struct btree_trans *, struct btree_iter *, struct bpos); struct bkey_i_alloc_v4 * bch2_trans_start_alloc_update(struct btree_trans *, struct bpos, enum btree_iter_update_trigger_flags); void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *); static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert) { const struct bch_alloc_v4 *ret; if (unlikely(k.k->type != KEY_TYPE_alloc_v4)) goto slowpath; ret = bkey_s_c_to_alloc_v4(k).v; if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s) goto slowpath; return ret; slowpath: __bch2_alloc_to_v4(k, convert); return convert; } struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c); int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_alloc_v4_swab(struct bkey_s); void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_alloc ((struct bkey_ops) { \ .key_validate = bch2_alloc_v1_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 8, \ }) #define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v2_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 8, \ }) #define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v3_validate, \ .val_to_text = bch2_alloc_to_text, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 16, \ }) #define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \ .key_validate = bch2_alloc_v4_validate, \ .val_to_text = bch2_alloc_to_text, \ .swab = bch2_alloc_v4_swab, \ .trigger = bch2_trigger_alloc, \ .min_val_size = 48, \ }) int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \ .key_validate = bch2_bucket_gens_validate, \ .val_to_text = bch2_bucket_gens_to_text, \ }) int bch2_bucket_gens_init(struct bch_fs *); static inline bool bkey_is_alloc(const struct bkey *k) { return k->type == KEY_TYPE_alloc || k->type == KEY_TYPE_alloc_v2 || k->type == KEY_TYPE_alloc_v3; } int bch2_alloc_read(struct bch_fs *); int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *, const struct bch_alloc_v4 *, const struct bch_alloc_v4 *, unsigned); int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); int bch2_check_discard_freespace_key(struct btree_trans *, struct btree_iter *, u8 *, bool); int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *); void bch2_dev_do_discards(struct bch_dev *); void bch2_do_discards(struct bch_fs *); static inline u64 should_invalidate_buckets(struct bch_dev *ca, struct bch_dev_usage u) { u64 want_free = ca->mi.nbuckets >> 7; u64 free = max_t(s64, 0, u.buckets[BCH_DATA_free] + u.buckets[BCH_DATA_need_discard] - bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe)); return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]); } void bch2_dev_do_invalidates(struct bch_dev *); void bch2_do_invalidates(struct bch_fs *); static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a) { return (void *) ((u64 *) &a->v + (BCH_ALLOC_V4_BACKPOINTERS_START(a) ?: BCH_ALLOC_V4_U64s_V0)); } static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct bch_alloc_v4 *a) { return (void *) ((u64 *) &a->v + BCH_ALLOC_V4_BACKPOINTERS_START(a)); } int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64); int bch2_fs_freespace_init(struct bch_fs *); int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *); void bch2_recalc_capacity(struct bch_fs *); u64 bch2_min_rw_member_capacity(struct bch_fs *); void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_background_exit(struct bch_dev *); void bch2_dev_allocator_background_init(struct bch_dev *); void bch2_fs_allocator_background_init(struct bch_fs *); #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
3 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> * * Based on ipt_random and ipt_nth by Fabrice MARIE <fabrice@netfilter.org>. */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/net.h> #include <linux/slab.h> #include <linux/netfilter/xt_statistic.h> #include <linux/netfilter/x_tables.h> #include <linux/module.h> struct xt_statistic_priv { atomic_t count; } ____cacheline_aligned_in_smp; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); MODULE_ALIAS("ipt_statistic"); MODULE_ALIAS("ip6t_statistic"); static bool statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_statistic_info *info = par->matchinfo; bool ret = info->flags & XT_STATISTIC_INVERT; int nval, oval; switch (info->mode) { case XT_STATISTIC_MODE_RANDOM: if ((get_random_u32() & 0x7FFFFFFF) < info->u.random.probability) ret = !ret; break; case XT_STATISTIC_MODE_NTH: do { oval = atomic_read(&info->master->count); nval = (oval == info->u.nth.every) ? 0 : oval + 1; } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval); if (nval == 0) ret = !ret; break; } return ret; } static int statistic_mt_check(const struct xt_mtchk_param *par) { struct xt_statistic_info *info = par->matchinfo; if (info->mode > XT_STATISTIC_MODE_MAX || info->flags & ~XT_STATISTIC_MASK) return -EINVAL; info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); if (info->master == NULL) return -ENOMEM; atomic_set(&info->master->count, info->u.nth.count); return 0; } static void statistic_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_statistic_info *info = par->matchinfo; kfree(info->master); } static struct xt_match xt_statistic_mt_reg __read_mostly = { .name = "statistic", .revision = 0, .family = NFPROTO_UNSPEC, .match = statistic_mt, .checkentry = statistic_mt_check, .destroy = statistic_mt_destroy, .matchsize = sizeof(struct xt_statistic_info), .usersize = offsetof(struct xt_statistic_info, master), .me = THIS_MODULE, }; static int __init statistic_mt_init(void) { return xt_register_match(&xt_statistic_mt_reg); } static void __exit statistic_mt_exit(void) { xt_unregister_match(&xt_statistic_mt_reg); } module_init(statistic_mt_init); module_exit(statistic_mt_exit);
61 6 61 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 // SPDX-License-Identifier: GPL-2.0+ #include <linux/kernel.h> #include <linux/minmax.h> #include <drm/drm_blend.h> #include <drm/drm_rect.h> #include <drm/drm_fixed.h> #include "vkms_formats.h" /** * packed_pixels_offset() - Get the offset of the block containing the pixel at coordinates x/y * * @frame_info: Buffer metadata * @x: The x coordinate of the wanted pixel in the buffer * @y: The y coordinate of the wanted pixel in the buffer * @plane_index: The index of the plane to use * @offset: The returned offset inside the buffer of the block * @rem_x: The returned X coordinate of the requested pixel in the block * @rem_y: The returned Y coordinate of the requested pixel in the block * * As some pixel formats store multiple pixels in a block (DRM_FORMAT_R* for example), some * pixels are not individually addressable. This function return 3 values: the offset of the * whole block, and the coordinate of the requested pixel inside this block. * For example, if the format is DRM_FORMAT_R1 and the requested coordinate is 13,5, the offset * will point to the byte 5*pitches + 13/8 (second byte of the 5th line), and the rem_x/rem_y * coordinates will be (13 % 8, 5 % 1) = (5, 0) * * With this function, the caller just have to extract the correct pixel from the block. */ static void packed_pixels_offset(const struct vkms_frame_info *frame_info, int x, int y, int plane_index, int *offset, int *rem_x, int *rem_y) { struct drm_framebuffer *fb = frame_info->fb; const struct drm_format_info *format = frame_info->fb->format; /* Directly using x and y to multiply pitches and format->ccp is not sufficient because * in some formats a block can represent multiple pixels. * * Dividing x and y by the block size allows to extract the correct offset of the block * containing the pixel. */ int block_x = x / drm_format_info_block_width(format, plane_index); int block_y = y / drm_format_info_block_height(format, plane_index); int block_pitch = fb->pitches[plane_index] * drm_format_info_block_height(format, plane_index); *rem_x = x % drm_format_info_block_width(format, plane_index); *rem_y = y % drm_format_info_block_height(format, plane_index); *offset = fb->offsets[plane_index] + block_y * block_pitch + block_x * format->char_per_block[plane_index]; } /** * packed_pixels_addr() - Get the pointer to the block containing the pixel at the given * coordinates * * @frame_info: Buffer metadata * @x: The x (width) coordinate inside the plane * @y: The y (height) coordinate inside the plane * @plane_index: The index of the plane * @addr: The returned pointer * @rem_x: The returned X coordinate of the requested pixel in the block * @rem_y: The returned Y coordinate of the requested pixel in the block * * Takes the information stored in the frame_info, a pair of coordinates, and returns the address * of the block containing this pixel and the pixel position inside this block. * * See @packed_pixels_offset for details about rem_x/rem_y behavior. */ static void packed_pixels_addr(const struct vkms_frame_info *frame_info, int x, int y, int plane_index, u8 **addr, int *rem_x, int *rem_y) { int offset; packed_pixels_offset(frame_info, x, y, plane_index, &offset, rem_x, rem_y); *addr = (u8 *)frame_info->map[0].vaddr + offset; } /** * get_block_step_bytes() - Common helper to compute the correct step value between each pixel block * to read in a certain direction. * * @fb: Framebuffer to iter on * @direction: Direction of the reading * @plane_index: Plane to get the step from * * As the returned count is the number of bytes between two consecutive blocks in a direction, * the caller may have to read multiple pixels before using the next one (for example, to read from * left to right in a DRM_FORMAT_R1 plane, each block contains 8 pixels, so the step must be used * only every 8 pixels). */ static int get_block_step_bytes(struct drm_framebuffer *fb, enum pixel_read_direction direction, int plane_index) { switch (direction) { case READ_LEFT_TO_RIGHT: return fb->format->char_per_block[plane_index]; case READ_RIGHT_TO_LEFT: return -fb->format->char_per_block[plane_index]; case READ_TOP_TO_BOTTOM: return (int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format, plane_index); case READ_BOTTOM_TO_TOP: return -(int)fb->pitches[plane_index] * drm_format_info_block_width(fb->format, plane_index); } return 0; } /** * packed_pixels_addr_1x1() - Get the pointer to the block containing the pixel at the given * coordinates * * @frame_info: Buffer metadata * @x: The x (width) coordinate inside the plane * @y: The y (height) coordinate inside the plane * @plane_index: The index of the plane * @addr: The returned pointer * * This function can only be used with format where block_h == block_w == 1. */ static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info, int x, int y, int plane_index, u8 **addr) { int offset, rem_x, rem_y; WARN_ONCE(drm_format_info_block_width(frame_info->fb->format, plane_index) != 1, "%s() only support formats with block_w == 1", __func__); WARN_ONCE(drm_format_info_block_height(frame_info->fb->format, plane_index) != 1, "%s() only support formats with block_h == 1", __func__); packed_pixels_offset(frame_info, x, y, plane_index, &offset, &rem_x, &rem_y); *addr = (u8 *)frame_info->map[0].vaddr + offset; } /* * The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to * &struct pixel_argb_u16 * * They are used in the `read_line`s functions to avoid duplicate work for some pixel formats. */ static struct pixel_argb_u16 argb_u16_from_u8888(u8 a, u8 r, u8 g, u8 b) { struct pixel_argb_u16 out_pixel; /* * The 257 is the "conversion ratio". This number is obtained by the * (2^16 - 1) / (2^8 - 1) division. Which, in this case, tries to get * the best color value in a pixel format with more possibilities. * A similar idea applies to others RGB color conversions. */ out_pixel.a = (u16)a * 257; out_pixel.r = (u16)r * 257; out_pixel.g = (u16)g * 257; out_pixel.b = (u16)b * 257; return out_pixel; } static struct pixel_argb_u16 argb_u16_from_u16161616(u16 a, u16 r, u16 g, u16 b) { struct pixel_argb_u16 out_pixel; out_pixel.a = a; out_pixel.r = r; out_pixel.g = g; out_pixel.b = b; return out_pixel; } static struct pixel_argb_u16 argb_u16_from_le16161616(__le16 a, __le16 r, __le16 g, __le16 b) { return argb_u16_from_u16161616(le16_to_cpu(a), le16_to_cpu(r), le16_to_cpu(g), le16_to_cpu(b)); } static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel) { struct pixel_argb_u16 out_pixel; s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31)); s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63)); u16 rgb_565 = le16_to_cpu(*pixel); s64 fp_r = drm_int2fixp((rgb_565 >> 11) & 0x1f); s64 fp_g = drm_int2fixp((rgb_565 >> 5) & 0x3f); s64 fp_b = drm_int2fixp(rgb_565 & 0x1f); out_pixel.a = (u16)0xffff; out_pixel.r = drm_fixp2int_round(drm_fixp_mul(fp_r, fp_rb_ratio)); out_pixel.g = drm_fixp2int_round(drm_fixp_mul(fp_g, fp_g_ratio)); out_pixel.b = drm_fixp2int_round(drm_fixp_mul(fp_b, fp_rb_ratio)); return out_pixel; } /* * The following functions are read_line function for each pixel format supported by VKMS. * * They read a line starting at the point @x_start,@y_start following the @direction. The result * is stored in @out_pixel and in the format ARGB16161616. * * These functions are very repetitive, but the innermost pixel loops must be kept inside these * functions for performance reasons. Some benchmarking was done in [1] where having the innermost * loop factored out of these functions showed a slowdown by a factor of three. * * [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/ */ static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { u8 *px = (u8 *)src_pixels; *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]); out_pixel += 1; src_pixels += step; } } static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { u8 *px = (u8 *)src_pixels; *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]); out_pixel += 1; src_pixels += step; } } static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { u8 *px = (u8 *)src_pixels; /* Switch blue and red pixels. */ *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]); out_pixel += 1; src_pixels += step; } } static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { u16 *px = (u16 *)src_pixels; *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]); out_pixel += 1; src_pixels += step; } } static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { __le16 *px = (__le16 *)src_pixels; *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]); out_pixel += 1; src_pixels += step; } } static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, enum pixel_read_direction direction, int count, struct pixel_argb_u16 out_pixel[]) { struct pixel_argb_u16 *end = out_pixel + count; u8 *src_pixels; packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); while (out_pixel < end) { __le16 *px = (__le16 *)src_pixels; *out_pixel = argb_u16_from_RGB565(px); out_pixel += 1; src_pixels += step; } } /* * The following functions take one &struct pixel_argb_u16 and convert it to a specific format. * The result is stored in @out_pixel. * * They are used in vkms_writeback_row() to convert and store a pixel from the src_buffer to * the writeback buffer. */ static void argb_u16_to_ARGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { /* * This sequence below is important because the format's byte order is * in little-endian. In the case of the ARGB8888 the memory is * organized this way: * * | Addr | = blue channel * | Addr + 1 | = green channel * | Addr + 2 | = Red channel * | Addr + 3 | = Alpha channel */ out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257); out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257); out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257); out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257); } static void argb_u16_to_XRGB8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { out_pixel[3] = 0xff; out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->r, 257); out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257); out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->b, 257); } static void argb_u16_to_ABGR8888(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { out_pixel[3] = DIV_ROUND_CLOSEST(in_pixel->a, 257); out_pixel[2] = DIV_ROUND_CLOSEST(in_pixel->b, 257); out_pixel[1] = DIV_ROUND_CLOSEST(in_pixel->g, 257); out_pixel[0] = DIV_ROUND_CLOSEST(in_pixel->r, 257); } static void argb_u16_to_ARGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { __le16 *pixel = (__le16 *)out_pixel; pixel[3] = cpu_to_le16(in_pixel->a); pixel[2] = cpu_to_le16(in_pixel->r); pixel[1] = cpu_to_le16(in_pixel->g); pixel[0] = cpu_to_le16(in_pixel->b); } static void argb_u16_to_XRGB16161616(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { __le16 *pixel = (__le16 *)out_pixel; pixel[3] = cpu_to_le16(0xffff); pixel[2] = cpu_to_le16(in_pixel->r); pixel[1] = cpu_to_le16(in_pixel->g); pixel[0] = cpu_to_le16(in_pixel->b); } static void argb_u16_to_RGB565(u8 *out_pixel, const struct pixel_argb_u16 *in_pixel) { __le16 *pixel = (__le16 *)out_pixel; s64 fp_rb_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(31)); s64 fp_g_ratio = drm_fixp_div(drm_int2fixp(65535), drm_int2fixp(63)); s64 fp_r = drm_int2fixp(in_pixel->r); s64 fp_g = drm_int2fixp(in_pixel->g); s64 fp_b = drm_int2fixp(in_pixel->b); u16 r = drm_fixp2int(drm_fixp_div(fp_r, fp_rb_ratio)); u16 g = drm_fixp2int(drm_fixp_div(fp_g, fp_g_ratio)); u16 b = drm_fixp2int(drm_fixp_div(fp_b, fp_rb_ratio)); *pixel = cpu_to_le16(r << 11 | g << 5 | b); } /** * vkms_writeback_row() - Generic loop for all supported writeback format. It is executed just * after the blending to write a line in the writeback buffer. * * @wb: Job where to insert the final image * @src_buffer: Line to write * @y: Row to write in the writeback buffer */ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer *src_buffer, int y) { struct vkms_frame_info *frame_info = &wb->wb_frame_info; int x_dst = frame_info->dst.x1; u8 *dst_pixels; int rem_x, rem_y; packed_pixels_addr(frame_info, x_dst, y, 0, &dst_pixels, &rem_x, &rem_y); struct pixel_argb_u16 *in_pixels = src_buffer->pixels; int x_limit = min_t(size_t, drm_rect_width(&frame_info->dst), src_buffer->n_pixels); for (size_t x = 0; x < x_limit; x++, dst_pixels += frame_info->fb->format->cpp[0]) wb->pixel_write(dst_pixels, &in_pixels[x]); } /** * get_pixel_read_line_function() - Retrieve the correct read_line function for a specific * format. The returned pointer is NULL for unsupported pixel formats. The caller must ensure that * the pointer is valid before using it in a vkms_plane_state. * * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) */ pixel_read_line_t get_pixel_read_line_function(u32 format) { switch (format) { case DRM_FORMAT_ARGB8888: return &ARGB8888_read_line; case DRM_FORMAT_XRGB8888: return &XRGB8888_read_line; case DRM_FORMAT_ABGR8888: return &ABGR8888_read_line; case DRM_FORMAT_ARGB16161616: return &ARGB16161616_read_line; case DRM_FORMAT_XRGB16161616: return &XRGB16161616_read_line; case DRM_FORMAT_RGB565: return &RGB565_read_line; default: /* * This is a bug in vkms_plane_atomic_check(). All the supported * format must: * - Be listed in vkms_formats in vkms_plane.c * - Have a pixel_read callback defined here */ pr_err("Pixel format %p4cc is not supported by VKMS planes. This is a kernel bug, atomic check must forbid this configuration.\n", &format); BUG(); } } /** * get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format. * The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the * pointer is valid before using it in a vkms_writeback_job. * * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h]) */ pixel_write_t get_pixel_write_function(u32 format) { switch (format) { case DRM_FORMAT_ARGB8888: return &argb_u16_to_ARGB8888; case DRM_FORMAT_XRGB8888: return &argb_u16_to_XRGB8888; case DRM_FORMAT_ABGR8888: return &argb_u16_to_ABGR8888; case DRM_FORMAT_ARGB16161616: return &argb_u16_to_ARGB16161616; case DRM_FORMAT_XRGB16161616: return &argb_u16_to_XRGB16161616; case DRM_FORMAT_RGB565: return &argb_u16_to_RGB565; default: /* * This is a bug in vkms_writeback_atomic_check. All the supported * format must: * - Be listed in vkms_wb_formats in vkms_writeback.c * - Have a pixel_write callback defined here */ pr_err("Pixel format %p4cc is not supported by VKMS writeback. This is a kernel bug, atomic check must forbid this configuration.\n", &format); BUG(); } }
47 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 // SPDX-License-Identifier: GPL-2.0-or-later /* * NetLabel Kernel API * * This file defines the kernel API for the NetLabel system. The NetLabel * system manages static and dynamic label mappings for network protocols such * as CIPSO and RIPSO. * * Author: Paul Moore <paul@paul-moore.com> */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 */ #include <linux/init.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/audit.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <net/calipso.h> #include <asm/bug.h> #include <linux/atomic.h> #include "netlabel_domainhash.h" #include "netlabel_unlabeled.h" #include "netlabel_cipso_v4.h" #include "netlabel_calipso.h" #include "netlabel_user.h" #include "netlabel_mgmt.h" #include "netlabel_addrlist.h" /* * Configuration Functions */ /** * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping * @domain: the domain mapping to remove * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the * default domain mapping to be removed. Returns zero on success, negative * values on failure. * */ int netlbl_cfg_map_del(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { if (addr == NULL && mask == NULL) { return netlbl_domhsh_remove(domain, family, audit_info); } else if (addr != NULL && mask != NULL) { switch (family) { case AF_INET: return netlbl_domhsh_remove_af4(domain, addr, mask, audit_info); #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return netlbl_domhsh_remove_af6(domain, addr, mask, audit_info); #endif /* IPv6 */ default: return -EPFNOSUPPORT; } } else return -EINVAL; } /** * netlbl_cfg_unlbl_map_add - Add a new unlabeled mapping * @domain: the domain mapping to add * @family: address family * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL * causes a new default domain mapping to be added. Returns zero on success, * negative values on failure. * */ int netlbl_cfg_unlbl_map_add(const char *domain, u16 family, const void *addr, const void *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *map4 = NULL; struct netlbl_domaddr6_map *map6 = NULL; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto cfg_unlbl_map_add_failure; } entry->family = family; if (addr == NULL && mask == NULL) entry->def.type = NETLBL_NLTYPE_UNLABELED; else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto cfg_unlbl_map_add_failure; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); switch (family) { case AF_INET: { const struct in_addr *addr4 = addr; const struct in_addr *mask4 = mask; map4 = kzalloc(sizeof(*map4), GFP_ATOMIC); if (map4 == NULL) goto cfg_unlbl_map_add_failure; map4->def.type = NETLBL_NLTYPE_UNLABELED; map4->list.addr = addr4->s_addr & mask4->s_addr; map4->list.mask = mask4->s_addr; map4->list.valid = 1; ret_val = netlbl_af4list_add(&map4->list, &addrmap->list4); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { const struct in6_addr *addr6 = addr; const struct in6_addr *mask6 = mask; map6 = kzalloc(sizeof(*map6), GFP_ATOMIC); if (map6 == NULL) goto cfg_unlbl_map_add_failure; map6->def.type = NETLBL_NLTYPE_UNLABELED; map6->list.addr = *addr6; map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0]; map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1]; map6->list.addr.s6_addr32[2] &= mask6->s6_addr32[2]; map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3]; map6->list.mask = *mask6; map6->list.valid = 1; ret_val = netlbl_af6list_add(&map6->list, &addrmap->list6); if (ret_val != 0) goto cfg_unlbl_map_add_failure; break; } #endif /* IPv6 */ default: goto cfg_unlbl_map_add_failure; } entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto cfg_unlbl_map_add_failure; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_unlbl_map_add_failure; return 0; cfg_unlbl_map_add_failure: kfree(entry->domain); kfree(entry); kfree(addrmap); kfree(map4); kfree(map6); return ret_val; } /** * netlbl_cfg_unlbl_static_add - Adds a new static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @secid: LSM secid value for the entry * @audit_info: NetLabel audit information * * Description: * Adds a new NetLabel static label to be used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_add(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, u32 secid, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_add(net, dev_name, addr, mask, addr_len, secid, audit_info); } /** * netlbl_cfg_unlbl_static_del - Removes an existing static label * @net: network namespace * @dev_name: interface name * @addr: IP address in network byte order (struct in[6]_addr) * @mask: address mask in network byte order (struct in[6]_addr) * @family: address family * @audit_info: NetLabel audit information * * Description: * Removes an existing NetLabel static label used when protocol provided labels * are not present on incoming traffic. If @dev_name is NULL then the default * interface will be used. Returns zero on success, negative values on failure. * */ int netlbl_cfg_unlbl_static_del(struct net *net, const char *dev_name, const void *addr, const void *mask, u16 family, struct netlbl_audit *audit_info) { u32 addr_len; switch (family) { case AF_INET: addr_len = sizeof(struct in_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr_len = sizeof(struct in6_addr); break; #endif /* IPv6 */ default: return -EPFNOSUPPORT; } return netlbl_unlhsh_remove(net, dev_name, addr, mask, addr_len, audit_info); } /** * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition * @doi_def: CIPSO DOI definition * @audit_info: NetLabel audit information * * Description: * Add a new CIPSO DOI definition as defined by @doi_def. Returns zero on * success and negative values on failure. * */ int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { return cipso_v4_doi_add(doi_def, audit_info); } /** * netlbl_cfg_cipsov4_del - Remove an existing CIPSOv4 DOI definition * @doi: CIPSO DOI * @audit_info: NetLabel audit information * * Description: * Remove an existing CIPSO DOI definition matching @doi. Returns zero on * success and negative values on failure. * */ void netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info) { cipso_v4_doi_remove(doi, audit_info); } /** * netlbl_cfg_cipsov4_map_add - Add a new CIPSOv4 DOI mapping * @doi: the CIPSO DOI * @domain: the domain mapping to add * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel * subsystem. A @domain value of NULL adds a new default domain mapping. * Returns zero on success, negative values on failure. * */ int netlbl_cfg_cipsov4_map_add(u32 doi, const char *domain, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) { int ret_val = -ENOMEM; struct cipso_v4_doi *doi_def; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr4_map *addrinfo = NULL; doi_def = cipso_v4_doi_getdef(doi); if (doi_def == NULL) return -ENOENT; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) goto out_entry; entry->family = AF_INET; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto out_domain; } if (addr == NULL && mask == NULL) { entry->def.cipso = doi_def; entry->def.type = NETLBL_NLTYPE_CIPSOV4; } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) goto out_addrinfo; addrinfo->def.cipso = doi_def; addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4; addrinfo->list.addr = addr->s_addr & mask->s_addr; addrinfo->list.mask = mask->s_addr; addrinfo->list.valid = 1; ret_val = netlbl_af4list_add(&addrinfo->list, &addrmap->list4); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_cipsov4_map_add_failure; return 0; cfg_cipsov4_map_add_failure: kfree(addrinfo); out_addrinfo: kfree(addrmap); out_addrmap: kfree(entry->domain); out_domain: kfree(entry); out_entry: cipso_v4_doi_putdef(doi_def); return ret_val; } /** * netlbl_cfg_calipso_add - Add a new CALIPSO DOI definition * @doi_def: CALIPSO DOI definition * @audit_info: NetLabel audit information * * Description: * Add a new CALIPSO DOI definition as defined by @doi_def. Returns zero on * success and negative values on failure. * */ int netlbl_cfg_calipso_add(struct calipso_doi *doi_def, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) return calipso_doi_add(doi_def, audit_info); #else /* IPv6 */ return -ENOSYS; #endif /* IPv6 */ } /** * netlbl_cfg_calipso_del - Remove an existing CALIPSO DOI definition * @doi: CALIPSO DOI * @audit_info: NetLabel audit information * * Description: * Remove an existing CALIPSO DOI definition matching @doi. Returns zero on * success and negative values on failure. * */ void netlbl_cfg_calipso_del(u32 doi, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) calipso_doi_remove(doi, audit_info); #endif /* IPv6 */ } /** * netlbl_cfg_calipso_map_add - Add a new CALIPSO DOI mapping * @doi: the CALIPSO DOI * @domain: the domain mapping to add * @addr: IP address * @mask: IP address mask * @audit_info: NetLabel audit information * * Description: * Add a new NetLabel/LSM domain mapping for the given CALIPSO DOI to the * NetLabel subsystem. A @domain value of NULL adds a new default domain * mapping. Returns zero on success, negative values on failure. * */ int netlbl_cfg_calipso_map_add(u32 doi, const char *domain, const struct in6_addr *addr, const struct in6_addr *mask, struct netlbl_audit *audit_info) { #if IS_ENABLED(CONFIG_IPV6) int ret_val = -ENOMEM; struct calipso_doi *doi_def; struct netlbl_dom_map *entry; struct netlbl_domaddr_map *addrmap = NULL; struct netlbl_domaddr6_map *addrinfo = NULL; doi_def = calipso_doi_getdef(doi); if (doi_def == NULL) return -ENOENT; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) goto out_entry; entry->family = AF_INET6; if (domain != NULL) { entry->domain = kstrdup(domain, GFP_ATOMIC); if (entry->domain == NULL) goto out_domain; } if (addr == NULL && mask == NULL) { entry->def.calipso = doi_def; entry->def.type = NETLBL_NLTYPE_CALIPSO; } else if (addr != NULL && mask != NULL) { addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC); if (addrmap == NULL) goto out_addrmap; INIT_LIST_HEAD(&addrmap->list4); INIT_LIST_HEAD(&addrmap->list6); addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC); if (addrinfo == NULL) goto out_addrinfo; addrinfo->def.calipso = doi_def; addrinfo->def.type = NETLBL_NLTYPE_CALIPSO; addrinfo->list.addr = *addr; addrinfo->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; addrinfo->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; addrinfo->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; addrinfo->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; addrinfo->list.mask = *mask; addrinfo->list.valid = 1; ret_val = netlbl_af6list_add(&addrinfo->list, &addrmap->list6); if (ret_val != 0) goto cfg_calipso_map_add_failure; entry->def.addrsel = addrmap; entry->def.type = NETLBL_NLTYPE_ADDRSELECT; } else { ret_val = -EINVAL; goto out_addrmap; } ret_val = netlbl_domhsh_add(entry, audit_info); if (ret_val != 0) goto cfg_calipso_map_add_failure; return 0; cfg_calipso_map_add_failure: kfree(addrinfo); out_addrinfo: kfree(addrmap); out_addrmap: kfree(entry->domain); out_domain: kfree(entry); out_entry: calipso_doi_putdef(doi_def); return ret_val; #else /* IPv6 */ return -ENOSYS; #endif /* IPv6 */ } /* * Security Attribute Functions */ #define _CM_F_NONE 0x00000000 #define _CM_F_ALLOC 0x00000001 #define _CM_F_WALK 0x00000002 /** * _netlbl_catmap_getnode - Get a individual node from a catmap * @catmap: pointer to the category bitmap * @offset: the requested offset * @cm_flags: catmap flags, see _CM_F_* * @gfp_flags: memory allocation flags * * Description: * Iterate through the catmap looking for the node associated with @offset. * If the _CM_F_ALLOC flag is set in @cm_flags and there is no associated node, * one will be created and inserted into the catmap. If the _CM_F_WALK flag is * set in @cm_flags and there is no associated node, the next highest node will * be returned. Returns a pointer to the node on success, NULL on failure. * */ static struct netlbl_lsm_catmap *_netlbl_catmap_getnode( struct netlbl_lsm_catmap **catmap, u32 offset, unsigned int cm_flags, gfp_t gfp_flags) { struct netlbl_lsm_catmap *iter = *catmap; struct netlbl_lsm_catmap *prev = NULL; if (iter == NULL) goto catmap_getnode_alloc; if (offset < iter->startbit) goto catmap_getnode_walk; while (iter && offset >= (iter->startbit + NETLBL_CATMAP_SIZE)) { prev = iter; iter = iter->next; } if (iter == NULL || offset < iter->startbit) goto catmap_getnode_walk; return iter; catmap_getnode_walk: if (cm_flags & _CM_F_WALK) return iter; catmap_getnode_alloc: if (!(cm_flags & _CM_F_ALLOC)) return NULL; iter = netlbl_catmap_alloc(gfp_flags); if (iter == NULL) return NULL; iter->startbit = offset & ~(NETLBL_CATMAP_SIZE - 1); if (prev == NULL) { iter->next = *catmap; *catmap = iter; } else { iter->next = prev->next; prev->next = iter; } return iter; } /** * netlbl_catmap_walk - Walk a LSM secattr catmap looking for a bit * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first set bit or -ENOENT if no bits are set. * */ int netlbl_catmap_walk(struct netlbl_lsm_catmap *catmap, u32 offset) { struct netlbl_lsm_catmap *iter; u32 idx; u32 bit; u64 bitmap; iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); if (iter == NULL) return -ENOENT; if (offset > iter->startbit) { offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; bit = offset % NETLBL_CATMAP_MAPSIZE; } else { idx = 0; bit = 0; } bitmap = iter->bitmap[idx] >> bit; for (;;) { if (bitmap != 0) { while ((bitmap & NETLBL_CATMAP_BIT) == 0) { bitmap >>= 1; bit++; } return iter->startbit + (NETLBL_CATMAP_MAPSIZE * idx) + bit; } if (++idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next != NULL) { iter = iter->next; idx = 0; } else return -ENOENT; } bitmap = iter->bitmap[idx]; bit = 0; } return -ENOENT; } EXPORT_SYMBOL(netlbl_catmap_walk); /** * netlbl_catmap_walkrng - Find the end of a string of set bits * @catmap: the category bitmap * @offset: the offset to start searching at, in bits * * Description: * This function walks a LSM secattr category bitmap starting at @offset and * returns the spot of the first cleared bit or -ENOENT if the offset is past * the end of the bitmap. * */ int netlbl_catmap_walkrng(struct netlbl_lsm_catmap *catmap, u32 offset) { struct netlbl_lsm_catmap *iter; struct netlbl_lsm_catmap *prev = NULL; u32 idx; u32 bit; u64 bitmask; u64 bitmap; iter = _netlbl_catmap_getnode(&catmap, offset, _CM_F_WALK, 0); if (iter == NULL) return -ENOENT; if (offset > iter->startbit) { offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; bit = offset % NETLBL_CATMAP_MAPSIZE; } else { idx = 0; bit = 0; } bitmask = NETLBL_CATMAP_BIT << bit; for (;;) { bitmap = iter->bitmap[idx]; while (bitmask != 0 && (bitmap & bitmask) != 0) { bitmask <<= 1; bit++; } if (prev && idx == 0 && bit == 0) return prev->startbit + NETLBL_CATMAP_SIZE - 1; else if (bitmask != 0) return iter->startbit + (NETLBL_CATMAP_MAPSIZE * idx) + bit - 1; else if (++idx >= NETLBL_CATMAP_MAPCNT) { if (iter->next == NULL) return iter->startbit + NETLBL_CATMAP_SIZE - 1; prev = iter; iter = iter->next; idx = 0; } bitmask = NETLBL_CATMAP_BIT; bit = 0; } return -ENOENT; } /** * netlbl_catmap_getlong - Export an unsigned long bitmap * @catmap: pointer to the category bitmap * @offset: pointer to the requested offset * @bitmap: the exported bitmap * * Description: * Export a bitmap with an offset greater than or equal to @offset and return * it in @bitmap. The @offset must be aligned to an unsigned long and will be * updated on return if different from what was requested; if the catmap is * empty at the requested offset and beyond, the @offset is set to (u32)-1. * Returns zero on success, negative values on failure. * */ int netlbl_catmap_getlong(struct netlbl_lsm_catmap *catmap, u32 *offset, unsigned long *bitmap) { struct netlbl_lsm_catmap *iter; u32 off = *offset; u32 idx; /* only allow aligned offsets */ if ((off & (BITS_PER_LONG - 1)) != 0) return -EINVAL; /* a null catmap is equivalent to an empty one */ if (!catmap) { *offset = (u32)-1; return 0; } if (off < catmap->startbit) { off = catmap->startbit; *offset = off; } iter = _netlbl_catmap_getnode(&catmap, off, _CM_F_WALK, 0); if (iter == NULL) { *offset = (u32)-1; return 0; } if (off < iter->startbit) { *offset = iter->startbit; off = 0; } else off -= iter->startbit; idx = off / NETLBL_CATMAP_MAPSIZE; *bitmap = iter->bitmap[idx] >> (off % NETLBL_CATMAP_MAPSIZE); return 0; } /** * netlbl_catmap_setbit - Set a bit in a LSM secattr catmap * @catmap: pointer to the category bitmap * @bit: the bit to set * @flags: memory allocation flags * * Description: * Set the bit specified by @bit in @catmap. Returns zero on success, * negative values on failure. * */ int netlbl_catmap_setbit(struct netlbl_lsm_catmap **catmap, u32 bit, gfp_t flags) { struct netlbl_lsm_catmap *iter; u32 idx; iter = _netlbl_catmap_getnode(catmap, bit, _CM_F_ALLOC, flags); if (iter == NULL) return -ENOMEM; bit -= iter->startbit; idx = bit / NETLBL_CATMAP_MAPSIZE; iter->bitmap[idx] |= NETLBL_CATMAP_BIT << (bit % NETLBL_CATMAP_MAPSIZE); return 0; } EXPORT_SYMBOL(netlbl_catmap_setbit); /** * netlbl_catmap_setrng - Set a range of bits in a LSM secattr catmap * @catmap: pointer to the category bitmap * @start: the starting bit * @end: the last bit in the string * @flags: memory allocation flags * * Description: * Set a range of bits, starting at @start and ending with @end. Returns zero * on success, negative values on failure. * */ int netlbl_catmap_setrng(struct netlbl_lsm_catmap **catmap, u32 start, u32 end, gfp_t flags) { int rc = 0; u32 spot = start; while (rc == 0 && spot <= end) { if (((spot & (BITS_PER_LONG - 1)) == 0) && ((end - spot) > BITS_PER_LONG)) { rc = netlbl_catmap_setlong(catmap, spot, (unsigned long)-1, flags); spot += BITS_PER_LONG; } else rc = netlbl_catmap_setbit(catmap, spot++, flags); } return rc; } /** * netlbl_catmap_setlong - Import an unsigned long bitmap * @catmap: pointer to the category bitmap * @offset: offset to the start of the imported bitmap * @bitmap: the bitmap to import * @flags: memory allocation flags * * Description: * Import the bitmap specified in @bitmap into @catmap, using the offset * in @offset. The offset must be aligned to an unsigned long. Returns zero * on success, negative values on failure. * */ int netlbl_catmap_setlong(struct netlbl_lsm_catmap **catmap, u32 offset, unsigned long bitmap, gfp_t flags) { struct netlbl_lsm_catmap *iter; u32 idx; /* only allow aligned offsets */ if ((offset & (BITS_PER_LONG - 1)) != 0) return -EINVAL; iter = _netlbl_catmap_getnode(catmap, offset, _CM_F_ALLOC, flags); if (iter == NULL) return -ENOMEM; offset -= iter->startbit; idx = offset / NETLBL_CATMAP_MAPSIZE; iter->bitmap[idx] |= (u64)bitmap << (offset % NETLBL_CATMAP_MAPSIZE); return 0; } /* Bitmap functions */ /** * netlbl_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found. */ int netlbl_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; if (offset >= bitmap_len) return -1; byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; if (++bit_spot >= bitmap_len) return -1; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } EXPORT_SYMBOL(netlbl_bitmap_walk); /** * netlbl_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ void netlbl_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } EXPORT_SYMBOL(netlbl_bitmap_setbit); /* * LSM Functions */ /** * netlbl_enabled - Determine if the NetLabel subsystem is enabled * * Description: * The LSM can use this function to determine if it should use NetLabel * security attributes in it's enforcement mechanism. Currently, NetLabel is * considered to be enabled when it's configuration contains a valid setup for * at least one labeled protocol (i.e. NetLabel can understand incoming * labeled packets of at least one type); otherwise NetLabel is considered to * be disabled. * */ int netlbl_enabled(void) { /* At some point we probably want to expose this mechanism to the user * as well so that admins can toggle NetLabel regardless of the * configuration */ return (atomic_read(&netlabel_mgmt_protocount) > 0); } /** * netlbl_sock_setattr - Label a socket using the correct protocol * @sk: the socket to label * @family: protocol family * @secattr: the security attributes * @sk_locked: true if caller holds the socket lock * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. This function requires exclusive access to @sk, * which means it either needs to be in the process of being created or locked. * Returns zero on success, -EDESTADDRREQ if the domain is configured to use * network address selectors (can't blindly label the socket), and negative * values on all other failures. * */ int netlbl_sock_setattr(struct sock *sk, u16 family, const struct netlbl_lsm_secattr *secattr, bool sk_locked) { int ret_val; struct netlbl_dom_map *dom_entry; rcu_read_lock(); dom_entry = netlbl_domhsh_getentry(secattr->domain, family); if (dom_entry == NULL) { ret_val = -ENOENT; goto socket_setattr_return; } switch (family) { case AF_INET: switch (dom_entry->def.type) { case NETLBL_NLTYPE_ADDRSELECT: ret_val = -EDESTADDRREQ; break; case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, dom_entry->def.cipso, secattr, sk_locked); break; case NETLBL_NLTYPE_UNLABELED: ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: switch (dom_entry->def.type) { case NETLBL_NLTYPE_ADDRSELECT: ret_val = -EDESTADDRREQ; break; case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_sock_setattr(sk, dom_entry->def.calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } socket_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_sock_delattr - Delete all the NetLabel labels on a socket * @sk: the socket * * Description: * Remove all the NetLabel labeling from @sk. The caller is responsible for * ensuring that @sk is locked. * */ void netlbl_sock_delattr(struct sock *sk) { switch (sk->sk_family) { case AF_INET: cipso_v4_sock_delattr(sk); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: calipso_sock_delattr(sk); break; #endif /* IPv6 */ } } /** * netlbl_sock_getattr - Determine the security attributes of a sock * @sk: the sock * @secattr: the security attributes * * Description: * Examines the given sock to see if any NetLabel style labeling has been * applied to the sock, if so it parses the socket label and returns the * security attributes in @secattr. Returns zero on success, negative values * on failure. * */ int netlbl_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { int ret_val; switch (sk->sk_family) { case AF_INET: ret_val = cipso_v4_sock_getattr(sk, secattr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ret_val = calipso_sock_getattr(sk, secattr); break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } return ret_val; } /** * netlbl_sk_lock_check - Check if the socket lock has been acquired. * @sk: the socket to be checked * * Return: true if socket @sk is locked or if lock debugging is disabled at * runtime or compile-time; false otherwise * */ #ifdef CONFIG_LOCKDEP bool netlbl_sk_lock_check(struct sock *sk) { if (debug_locks) return lockdep_sock_is_held(sk); return true; } #else bool netlbl_sk_lock_check(struct sock *sk) { return true; } #endif /** * netlbl_conn_setattr - Label a connected socket using the correct protocol * @sk: the socket to label * @addr: the destination address * @secattr: the security attributes * * Description: * Attach the correct label to the given connected socket using the security * attributes specified in @secattr. The caller is responsible for ensuring * that @sk is locked. Returns zero on success, negative values on failure. * */ int netlbl_conn_setattr(struct sock *sk, struct sockaddr *addr, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct sockaddr_in *addr4; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *addr6; #endif struct netlbl_dommap_def *entry; rcu_read_lock(); switch (addr->sa_family) { case AF_INET: addr4 = (struct sockaddr_in *)addr; entry = netlbl_domhsh_getentry_af4(secattr->domain, addr4->sin_addr.s_addr); if (entry == NULL) { ret_val = -ENOENT; goto conn_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_sock_setattr(sk, entry->cipso, secattr, netlbl_sk_lock_check(sk)); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ netlbl_sock_delattr(sk); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: addr6 = (struct sockaddr_in6 *)addr; entry = netlbl_domhsh_getentry_af6(secattr->domain, &addr6->sin6_addr); if (entry == NULL) { ret_val = -ENOENT; goto conn_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_sock_setattr(sk, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ netlbl_sock_delattr(sk); ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } conn_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_setattr - Label a request socket using the correct protocol * @req: the request socket to label * @secattr: the security attributes * * Description: * Attach the correct label to the given socket using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_req_setattr(struct request_sock *req, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct netlbl_dommap_def *entry; struct inet_request_sock *ireq = inet_rsk(req); rcu_read_lock(); switch (req->rsk_ops->family) { case AF_INET: entry = netlbl_domhsh_getentry_af4(secattr->domain, ireq->ir_rmt_addr); if (entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_req_setattr(req, entry->cipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: netlbl_req_delattr(req); ret_val = 0; break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: entry = netlbl_domhsh_getentry_af6(secattr->domain, &ireq->ir_v6_rmt_addr); if (entry == NULL) { ret_val = -ENOENT; goto req_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_req_setattr(req, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: netlbl_req_delattr(req); ret_val = 0; break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } req_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_req_delattr - Delete all the NetLabel labels on a socket * @req: the socket * * Description: * Remove all the NetLabel labeling from @req. * */ void netlbl_req_delattr(struct request_sock *req) { switch (req->rsk_ops->family) { case AF_INET: cipso_v4_req_delattr(req); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: calipso_req_delattr(req); break; #endif /* IPv6 */ } } /** * netlbl_skbuff_setattr - Label a packet using the correct protocol * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Attach the correct label to the given packet using the security attributes * specified in @secattr. Returns zero on success, negative values on failure. * */ int netlbl_skbuff_setattr(struct sk_buff *skb, u16 family, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *hdr4; #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *hdr6; #endif struct netlbl_dommap_def *entry; rcu_read_lock(); switch (family) { case AF_INET: hdr4 = ip_hdr(skb); entry = netlbl_domhsh_getentry_af4(secattr->domain, hdr4->daddr); if (entry == NULL) { ret_val = -ENOENT; goto skbuff_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CIPSOV4: ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ ret_val = cipso_v4_skbuff_delattr(skb); break; default: ret_val = -ENOENT; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: hdr6 = ipv6_hdr(skb); entry = netlbl_domhsh_getentry_af6(secattr->domain, &hdr6->daddr); if (entry == NULL) { ret_val = -ENOENT; goto skbuff_setattr_return; } switch (entry->type) { case NETLBL_NLTYPE_CALIPSO: ret_val = calipso_skbuff_setattr(skb, entry->calipso, secattr); break; case NETLBL_NLTYPE_UNLABELED: /* just delete the protocols we support for right now * but we could remove other protocols if needed */ ret_val = calipso_skbuff_delattr(skb); break; default: ret_val = -ENOENT; } break; #endif /* IPv6 */ default: ret_val = -EPROTONOSUPPORT; } skbuff_setattr_return: rcu_read_unlock(); return ret_val; } /** * netlbl_skbuff_getattr - Determine the security attributes of a packet * @skb: the packet * @family: protocol family * @secattr: the security attributes * * Description: * Examines the given packet to see if a recognized form of packet labeling * is present, if so it parses the packet label and returns the security * attributes in @secattr. Returns zero on success, negative values on * failure. * */ int netlbl_skbuff_getattr(const struct sk_buff *skb, u16 family, struct netlbl_lsm_secattr *secattr) { unsigned char *ptr; switch (family) { case AF_INET: ptr = cipso_v4_optptr(skb); if (ptr && cipso_v4_getattr(ptr, secattr) == 0) return 0; break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ptr = calipso_optptr(skb); if (ptr && calipso_getattr(ptr, secattr) == 0) return 0; break; #endif /* IPv6 */ } return netlbl_unlabel_getattr(skb, family, secattr); } /** * netlbl_skbuff_err - Handle a LSM error on a sk_buff * @skb: the packet * @family: the family * @error: the error code * @gateway: true if host is acting as a gateway, false otherwise * * Description: * Deal with a LSM problem when handling the packet in @skb, typically this is * a permission denied problem (-EACCES). The correct action is determined * according to the packet's labeling protocol. * */ void netlbl_skbuff_err(struct sk_buff *skb, u16 family, int error, int gateway) { switch (family) { case AF_INET: if (cipso_v4_optptr(skb)) cipso_v4_error(skb, error, gateway); break; } } /** * netlbl_cache_invalidate - Invalidate all of the NetLabel protocol caches * * Description: * For all of the NetLabel protocols that support some form of label mapping * cache, invalidate the cache. Returns zero on success, negative values on * error. * */ void netlbl_cache_invalidate(void) { cipso_v4_cache_invalidate(); #if IS_ENABLED(CONFIG_IPV6) calipso_cache_invalidate(); #endif /* IPv6 */ } /** * netlbl_cache_add - Add an entry to a NetLabel protocol cache * @skb: the packet * @family: the family * @secattr: the packet's security attributes * * Description: * Add the LSM security attributes for the given packet to the underlying * NetLabel protocol's label mapping cache. Returns zero on success, negative * values on error. * */ int netlbl_cache_add(const struct sk_buff *skb, u16 family, const struct netlbl_lsm_secattr *secattr) { unsigned char *ptr; if ((secattr->flags & NETLBL_SECATTR_CACHE) == 0) return -ENOMSG; switch (family) { case AF_INET: ptr = cipso_v4_optptr(skb); if (ptr) return cipso_v4_cache_add(ptr, secattr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: ptr = calipso_optptr(skb); if (ptr) return calipso_cache_add(ptr, secattr); break; #endif /* IPv6 */ } return -ENOMSG; } /* * Protocol Engine Functions */ /** * netlbl_audit_start - Start an audit message * @type: audit message type * @audit_info: NetLabel audit information * * Description: * Start an audit message using the type specified in @type and fill the audit * message with some fields common to all NetLabel audit messages. This * function should only be used by protocol engines, not LSMs. Returns a * pointer to the audit buffer on success, NULL on failure. * */ struct audit_buffer *netlbl_audit_start(int type, struct netlbl_audit *audit_info) { return netlbl_audit_start_common(type, audit_info); } EXPORT_SYMBOL(netlbl_audit_start); /* * Setup Functions */ /** * netlbl_init - Initialize NetLabel * * Description: * Perform the required NetLabel initialization before first use. * */ static int __init netlbl_init(void) { int ret_val; printk(KERN_INFO "NetLabel: Initializing\n"); printk(KERN_INFO "NetLabel: domain hash size = %u\n", (1 << NETLBL_DOMHSH_BITSIZE)); printk(KERN_INFO "NetLabel: protocols = UNLABELED CIPSOv4 CALIPSO\n"); ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_init(NETLBL_UNLHSH_BITSIZE); if (ret_val != 0) goto init_failure; ret_val = netlbl_netlink_init(); if (ret_val != 0) goto init_failure; ret_val = netlbl_unlabel_defconf(); if (ret_val != 0) goto init_failure; printk(KERN_INFO "NetLabel: unlabeled traffic allowed by default\n"); return 0; init_failure: panic("NetLabel: failed to initialize properly (%d)\n", ret_val); } subsys_initcall(netlbl_init);
439 32 32 29 32 32 2 29 1 29 29 29 2 27 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 29 31 32 3 32 32 32 30 32 32 1 2 30 29 29 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ /* *************************************************************** * Tuning parameters *****************************************************************/ /*! * HEAPMODE : * Select how default decompression function ZSTD_decompress() allocates its context, * on stack (0), or into heap (1, default; requires malloc()). * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected. */ #ifndef ZSTD_HEAPMODE # define ZSTD_HEAPMODE 1 #endif /*! * LEGACY_SUPPORT : * if set to 1+, ZSTD_decompress() can decode older formats (v0.1+) */ /*! * MAXWINDOWSIZE_DEFAULT : * maximum window size accepted by DStream __by default__. * Frames requiring more memory will be rejected. * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize(). */ #ifndef ZSTD_MAXWINDOWSIZE_DEFAULT # define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) #endif /*! * NO_FORWARD_PROGRESS_MAX : * maximum allowed nb of calls to ZSTD_decompressStream() * without any forward progress * (defined as: no byte read from input, and no byte flushed to output) * before triggering an error. */ #ifndef ZSTD_NO_FORWARD_PROGRESS_MAX # define ZSTD_NO_FORWARD_PROGRESS_MAX 16 #endif /*-******************************************************* * Dependencies *********************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */ #include "../common/error_private.h" #include "../common/zstd_internal.h" /* blockProperties_t */ #include "../common/mem.h" /* low level memory routines */ #include "../common/bits.h" /* ZSTD_highbit32 */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" #include "../common/huf.h" #include <linux/xxhash.h> /* xxh64_reset, xxh64_update, xxh64_digest, XXH64 */ #include "zstd_decompress_internal.h" /* ZSTD_DCtx */ #include "zstd_ddict.h" /* ZSTD_DDictDictContent */ #include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ /* *********************************** * Multiple DDicts Hashset internals * *************************************/ #define DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT 4 #define DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT 3 /* These two constants represent SIZE_MULT/COUNT_MULT load factor without using a float. * Currently, that means a 0.75 load factor. * So, if count * COUNT_MULT / size * SIZE_MULT != 0, then we've exceeded * the load factor of the ddict hash set. */ #define DDICT_HASHSET_TABLE_BASE_SIZE 64 #define DDICT_HASHSET_RESIZE_FACTOR 2 /* Hash function to determine starting position of dict insertion within the table * Returns an index between [0, hashSet->ddictPtrTableSize] */ static size_t ZSTD_DDictHashSet_getIndex(const ZSTD_DDictHashSet* hashSet, U32 dictID) { const U64 hash = xxh64(&dictID, sizeof(U32), 0); /* DDict ptr table size is a multiple of 2, use size - 1 as mask to get index within [0, hashSet->ddictPtrTableSize) */ return hash & (hashSet->ddictPtrTableSize - 1); } /* Adds DDict to a hashset without resizing it. * If inserting a DDict with a dictID that already exists in the set, replaces the one in the set. * Returns 0 if successful, or a zstd error code if something went wrong. */ static size_t ZSTD_DDictHashSet_emplaceDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict) { const U32 dictID = ZSTD_getDictID_fromDDict(ddict); size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; RETURN_ERROR_IF(hashSet->ddictPtrCount == hashSet->ddictPtrTableSize, GENERIC, "Hash set is full!"); DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); while (hashSet->ddictPtrTable[idx] != NULL) { /* Replace existing ddict if inserting ddict with same dictID */ if (ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]) == dictID) { DEBUGLOG(4, "DictID already exists, replacing rather than adding"); hashSet->ddictPtrTable[idx] = ddict; return 0; } idx &= idxRangeMask; idx++; } DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); hashSet->ddictPtrTable[idx] = ddict; hashSet->ddictPtrCount++; return 0; } /* Expands hash table by factor of DDICT_HASHSET_RESIZE_FACTOR and * rehashes all values, allocates new table, frees old table. * Returns 0 on success, otherwise a zstd error code. */ static size_t ZSTD_DDictHashSet_expand(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { size_t newTableSize = hashSet->ddictPtrTableSize * DDICT_HASHSET_RESIZE_FACTOR; const ZSTD_DDict** newTable = (const ZSTD_DDict**)ZSTD_customCalloc(sizeof(ZSTD_DDict*) * newTableSize, customMem); const ZSTD_DDict** oldTable = hashSet->ddictPtrTable; size_t oldTableSize = hashSet->ddictPtrTableSize; size_t i; DEBUGLOG(4, "Expanding DDict hash table! Old size: %zu new size: %zu", oldTableSize, newTableSize); RETURN_ERROR_IF(!newTable, memory_allocation, "Expanded hashset allocation failed!"); hashSet->ddictPtrTable = newTable; hashSet->ddictPtrTableSize = newTableSize; hashSet->ddictPtrCount = 0; for (i = 0; i < oldTableSize; ++i) { if (oldTable[i] != NULL) { FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, oldTable[i]), ""); } } ZSTD_customFree((void*)oldTable, customMem); DEBUGLOG(4, "Finished re-hash"); return 0; } /* Fetches a DDict with the given dictID * Returns the ZSTD_DDict* with the requested dictID. If it doesn't exist, then returns NULL. */ static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, U32 dictID) { size_t idx = ZSTD_DDictHashSet_getIndex(hashSet, dictID); const size_t idxRangeMask = hashSet->ddictPtrTableSize - 1; DEBUGLOG(4, "Hashed index: for dictID: %u is %zu", dictID, idx); for (;;) { size_t currDictID = ZSTD_getDictID_fromDDict(hashSet->ddictPtrTable[idx]); if (currDictID == dictID || currDictID == 0) { /* currDictID == 0 implies a NULL ddict entry */ break; } else { idx &= idxRangeMask; /* Goes to start of table when we reach the end */ idx++; } } DEBUGLOG(4, "Final idx after probing for dictID %u is: %zu", dictID, idx); return hashSet->ddictPtrTable[idx]; } /* Allocates space for and returns a ddict hash set * The hash set's ZSTD_DDict* table has all values automatically set to NULL to begin with. * Returns NULL if allocation failed. */ static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); DEBUGLOG(4, "Allocating new hash set"); if (!ret) return NULL; ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); if (!ret->ddictPtrTable) { ZSTD_customFree(ret, customMem); return NULL; } ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; ret->ddictPtrCount = 0; return ret; } /* Frees the table of ZSTD_DDict* within a hashset, then frees the hashset itself. * Note: The ZSTD_DDict* within the table are NOT freed. */ static void ZSTD_freeDDictHashSet(ZSTD_DDictHashSet* hashSet, ZSTD_customMem customMem) { DEBUGLOG(4, "Freeing ddict hash set"); if (hashSet && hashSet->ddictPtrTable) { ZSTD_customFree((void*)hashSet->ddictPtrTable, customMem); } if (hashSet) { ZSTD_customFree(hashSet, customMem); } } /* Public function: Adds a DDict into the ZSTD_DDictHashSet, possibly triggering a resize of the hash set. * Returns 0 on success, or a ZSTD error. */ static size_t ZSTD_DDictHashSet_addDDict(ZSTD_DDictHashSet* hashSet, const ZSTD_DDict* ddict, ZSTD_customMem customMem) { DEBUGLOG(4, "Adding dict ID: %u to hashset with - Count: %zu Tablesize: %zu", ZSTD_getDictID_fromDDict(ddict), hashSet->ddictPtrCount, hashSet->ddictPtrTableSize); if (hashSet->ddictPtrCount * DDICT_HASHSET_MAX_LOAD_FACTOR_COUNT_MULT / hashSet->ddictPtrTableSize * DDICT_HASHSET_MAX_LOAD_FACTOR_SIZE_MULT != 0) { FORWARD_IF_ERROR(ZSTD_DDictHashSet_expand(hashSet, customMem), ""); } FORWARD_IF_ERROR(ZSTD_DDictHashSet_emplaceDDict(hashSet, ddict), ""); return 0; } /*-************************************************************* * Context management ***************************************************************/ size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support sizeof NULL */ return sizeof(*dctx) + ZSTD_sizeof_DDict(dctx->ddictLocal) + dctx->inBuffSize + dctx->outBuffSize; } size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } static size_t ZSTD_startingInputLength(ZSTD_format_e format) { size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); return startingInputLength; } static void ZSTD_DCtx_resetParameters(ZSTD_DCtx* dctx) { assert(dctx->streamStage == zdss_init); dctx->format = ZSTD_f_zstd1; dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; dctx->outBufferMode = ZSTD_bm_buffered; dctx->forceIgnoreChecksum = ZSTD_d_validateChecksum; dctx->refMultipleDDicts = ZSTD_rmd_refSingleDDict; dctx->disableHufAsm = 0; dctx->maxBlockSizeParam = 0; } static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) { dctx->staticSize = 0; dctx->ddict = NULL; dctx->ddictLocal = NULL; dctx->dictEnd = NULL; dctx->ddictIsCold = 0; dctx->dictUses = ZSTD_dont_use; dctx->inBuff = NULL; dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; dctx->isFrameDecompression = 1; #if DYNAMIC_BMI2 dctx->bmi2 = ZSTD_cpuSupportsBmi2(); #endif dctx->ddictSet = NULL; ZSTD_DCtx_resetParameters(dctx); #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentEndForFuzzing = NULL; #endif } ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) { ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; if ((size_t)workspace & 7) return NULL; /* 8-aligned */ if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ ZSTD_initDCtx_internal(dctx); dctx->staticSize = workspaceSize; dctx->inBuff = (char*)(dctx+1); return dctx; } static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); if (!dctx) return NULL; dctx->customMem = customMem; ZSTD_initDCtx_internal(dctx); return dctx; } } ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) { return ZSTD_createDCtx_internal(customMem); } ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) { ZSTD_freeDDict(dctx->ddictLocal); dctx->ddictLocal = NULL; dctx->ddict = NULL; dctx->dictUses = ZSTD_dont_use; } size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) { if (dctx==NULL) return 0; /* support free on NULL */ RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); { ZSTD_customMem const cMem = dctx->customMem; ZSTD_clearDict(dctx); ZSTD_customFree(dctx->inBuff, cMem); dctx->inBuff = NULL; if (dctx->ddictSet) { ZSTD_freeDDictHashSet(dctx->ddictSet, cMem); dctx->ddictSet = NULL; } ZSTD_customFree(dctx, cMem); return 0; } } /* no longer useful */ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) { size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); ZSTD_memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ } /* Given a dctx with a digested frame params, re-selects the correct ZSTD_DDict based on * the requested dict ID from the frame. If there exists a reference to the correct ZSTD_DDict, then * accordingly sets the ddict to be used to decompress the frame. * * If no DDict is found, then no action is taken, and the ZSTD_DCtx::ddict remains as-is. * * ZSTD_d_refMultipleDDicts must be enabled for this function to be called. */ static void ZSTD_DCtx_selectFrameDDict(ZSTD_DCtx* dctx) { assert(dctx->refMultipleDDicts && dctx->ddictSet); DEBUGLOG(4, "Adjusting DDict based on requested dict ID from frame"); if (dctx->ddict) { const ZSTD_DDict* frameDDict = ZSTD_DDictHashSet_getDDict(dctx->ddictSet, dctx->fParams.dictID); if (frameDDict) { DEBUGLOG(4, "DDict found!"); ZSTD_clearDict(dctx); dctx->dictID = dctx->fParams.dictID; dctx->ddict = frameDDict; dctx->dictUses = ZSTD_use_indefinitely; } } } /*-************************************************************* * Frame header decoding ***************************************************************/ /*! ZSTD_isFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ unsigned ZSTD_isFrame(const void* buffer, size_t size) { if (size < ZSTD_FRAMEIDSIZE) return 0; { U32 const magic = MEM_readLE32(buffer); if (magic == ZSTD_MAGICNUMBER) return 1; if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } return 0; } /*! ZSTD_isSkippableFrame() : * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. */ unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size) { if (size < ZSTD_FRAMEIDSIZE) return 0; { U32 const magic = MEM_readLE32(buffer); if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } return 0; } /* ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. * @return : size of the Frame Header * or an error code, which can be tested with ZSTD_isError() */ static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) { size_t const minInputSize = ZSTD_startingInputLength(format); RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; U32 const dictID= fhd & 3; U32 const singleSegment = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; return minInputSize + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + (singleSegment && !fcsId); } } /* ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_frameHeaderSize_prefix. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) { return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); } /* ZSTD_getFrameHeader_advanced() : * decode Frame Header, or require larger `srcSize`. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, ** or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader_advanced(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) { const BYTE* ip = (const BYTE*)src; size_t const minInputSize = ZSTD_startingInputLength(format); DEBUGLOG(5, "ZSTD_getFrameHeader_advanced: minInputSize = %zu, srcSize = %zu", minInputSize, srcSize); if (srcSize > 0) { /* note : technically could be considered an assert(), since it's an invalid entry */ RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter : src==NULL, but srcSize>0"); } if (srcSize < minInputSize) { if (srcSize > 0 && format != ZSTD_f_zstd1_magicless) { /* when receiving less than @minInputSize bytes, * control these bytes at least correspond to a supported magic number * in order to error out early if they don't. **/ size_t const toCopy = MIN(4, srcSize); unsigned char hbuf[4]; MEM_writeLE32(hbuf, ZSTD_MAGICNUMBER); assert(src != NULL); ZSTD_memcpy(hbuf, src, toCopy); if ( MEM_readLE32(hbuf) != ZSTD_MAGICNUMBER ) { /* not a zstd frame : let's check if it's a skippable frame */ MEM_writeLE32(hbuf, ZSTD_MAGIC_SKIPPABLE_START); ZSTD_memcpy(hbuf, src, toCopy); if ((MEM_readLE32(hbuf) & ZSTD_MAGIC_SKIPPABLE_MASK) != ZSTD_MAGIC_SKIPPABLE_START) { RETURN_ERROR(prefix_unknown, "first bytes don't correspond to any supported magic number"); } } } return minInputSize; } ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzers may not understand that zfhPtr will be read only if return value is zero, since they are 2 different signals */ if ( (format != ZSTD_f_zstd1_magicless) && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ ZSTD_memset(zfhPtr, 0, sizeof(*zfhPtr)); zfhPtr->frameType = ZSTD_skippableFrame; zfhPtr->dictID = MEM_readLE32(src) - ZSTD_MAGIC_SKIPPABLE_START; zfhPtr->headerSize = ZSTD_SKIPPABLEHEADERSIZE; zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); return 0; } RETURN_ERROR(prefix_unknown, ""); } /* ensure there is enough `srcSize` to fully read/decode frame header */ { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); if (srcSize < fhsize) return fhsize; zfhPtr->headerSize = (U32)fhsize; } { BYTE const fhdByte = ip[minInputSize-1]; size_t pos = minInputSize; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; U32 const singleSegment = (fhdByte>>5)&1; U32 const fcsID = fhdByte>>6; U64 windowSize = 0; U32 dictID = 0; U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported, "reserved bits, must be zero"); if (!singleSegment) { BYTE const wlByte = ip[pos++]; U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); windowSize = (1ULL << windowLog); windowSize += (windowSize >> 3) * (wlByte&7); } switch(dictIDSizeCode) { default: assert(0); /* impossible */ ZSTD_FALLTHROUGH; case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; } switch(fcsID) { default: assert(0); /* impossible */ ZSTD_FALLTHROUGH; case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; case 3 : frameContentSize = MEM_readLE64(ip+pos); break; } if (singleSegment) windowSize = frameContentSize; zfhPtr->frameType = ZSTD_frame; zfhPtr->frameContentSize = frameContentSize; zfhPtr->windowSize = windowSize; zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); zfhPtr->dictID = dictID; zfhPtr->checksumFlag = checksumFlag; } return 0; } /* ZSTD_getFrameHeader() : * decode Frame Header, or require larger `srcSize`. * note : this function does not consume input, it only reads it. * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_getFrameHeader(ZSTD_FrameHeader* zfhPtr, const void* src, size_t srcSize) { return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } /* ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) { { ZSTD_FrameHeader zfh; if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; if (zfh.frameType == ZSTD_skippableFrame) { return 0; } else { return zfh.frameContentSize; } } } static size_t readSkippableFrameSize(void const* src, size_t srcSize) { size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; U32 sizeU32; RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, frameParameter_unsupported, ""); { size_t const skippableSize = skippableHeaderSize + sizeU32; RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); return skippableSize; } } /*! ZSTD_readSkippableFrame() : * Retrieves content of a skippable frame, and writes it to dst buffer. * * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested * in the magicVariant. * * Returns an error if destination buffer is not large enough, or if this is not a valid skippable frame. * * @return : number of bytes written or a ZSTD error. */ size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, /* optional, can be NULL */ const void* src, size_t srcSize) { RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); { U32 const magicNumber = MEM_readLE32(src); size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; /* check input validity */ RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); /* deliver payload */ if (skippableContentSize > 0 && dst != NULL) ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); if (magicVariant != NULL) *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; return skippableContentSize; } } /* ZSTD_findDecompressedSize() : * `srcSize` must be the exact length of some number of ZSTD compressed and/or * skippable frames * note: compatible with legacy mode * @return : decompressed size of the frames contained */ unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) { unsigned long long totalDstSize = 0; while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { U32 const magicNumber = MEM_readLE32(src); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { size_t const skippableSize = readSkippableFrameSize(src, srcSize); if (ZSTD_isError(skippableSize)) return ZSTD_CONTENTSIZE_ERROR; assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; } { unsigned long long const fcs = ZSTD_getFrameContentSize(src, srcSize); if (fcs >= ZSTD_CONTENTSIZE_ERROR) return fcs; if (totalDstSize + fcs < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; /* check for overflow */ totalDstSize += fcs; } /* skip to next frame */ { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); if (ZSTD_isError(frameSrcSize)) return ZSTD_CONTENTSIZE_ERROR; assert(frameSrcSize <= srcSize); src = (const BYTE *)src + frameSrcSize; srcSize -= frameSrcSize; } } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ if (srcSize) return ZSTD_CONTENTSIZE_ERROR; return totalDstSize; } /* ZSTD_getDecompressedSize() : * compatible with legacy mode * @return : decompressed size if known, 0 otherwise note : 0 can mean any of the following : - frame content is empty - decompressed size field is not present in frame header - frame header unknown / not supported - frame header not complete (`srcSize` too small) */ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; } /* ZSTD_decodeFrameHeader() : * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). * If multiple DDict references are enabled, also will choose the correct DDict to use. * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) { size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); if (ZSTD_isError(result)) return result; /* invalid header */ RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); /* Reference DDict requested by frame if dctx references multiple ddicts */ if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts && dctx->ddictSet) { ZSTD_DCtx_selectFrameDDict(dctx); } #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION /* Skip the dictID check in fuzzing mode, because it makes the search * harder. */ RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), dictionary_wrong, ""); #endif dctx->validateChecksum = (dctx->fParams.checksumFlag && !dctx->forceIgnoreChecksum) ? 1 : 0; if (dctx->validateChecksum) xxh64_reset(&dctx->xxhState, 0); dctx->processedCSize += headerSize; return 0; } static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) { ZSTD_frameSizeInfo frameSizeInfo; frameSizeInfo.compressedSize = ret; frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; return frameSizeInfo; } static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize, ZSTD_format_e format) { ZSTD_frameSizeInfo frameSizeInfo; ZSTD_memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); if (format == ZSTD_f_zstd1 && (srcSize >= ZSTD_SKIPPABLEHEADERSIZE) && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); assert(ZSTD_isError(frameSizeInfo.compressedSize) || frameSizeInfo.compressedSize <= srcSize); return frameSizeInfo; } else { const BYTE* ip = (const BYTE*)src; const BYTE* const ipstart = ip; size_t remainingSize = srcSize; size_t nbBlocks = 0; ZSTD_FrameHeader zfh; /* Extract Frame Header */ { size_t const ret = ZSTD_getFrameHeader_advanced(&zfh, src, srcSize, format); if (ZSTD_isError(ret)) return ZSTD_errorFrameSizeInfo(ret); if (ret > 0) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); } ip += zfh.headerSize; remainingSize -= zfh.headerSize; /* Iterate over each block */ while (1) { blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return ZSTD_errorFrameSizeInfo(cBlockSize); if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += ZSTD_blockHeaderSize + cBlockSize; remainingSize -= ZSTD_blockHeaderSize + cBlockSize; nbBlocks++; if (blockProperties.lastBlock) break; } /* Final frame content checksum */ if (zfh.checksumFlag) { if (remainingSize < 4) return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); ip += 4; } frameSizeInfo.nbBlocks = nbBlocks; frameSizeInfo.compressedSize = (size_t)(ip - ipstart); frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) ? zfh.frameContentSize : (unsigned long long)nbBlocks * zfh.blockSizeMax; return frameSizeInfo; } } static size_t ZSTD_findFrameCompressedSize_advanced(const void *src, size_t srcSize, ZSTD_format_e format) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, format); return frameSizeInfo.compressedSize; } /* ZSTD_findFrameCompressedSize() : * See docs in zstd.h * Note: compatible with legacy mode */ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) { return ZSTD_findFrameCompressedSize_advanced(src, srcSize, ZSTD_f_zstd1); } /* ZSTD_decompressBound() : * compatible with legacy mode * `src` must point to the start of a ZSTD frame or a skippable frame * `srcSize` must be at least as large as the frame contained * @return : the maximum decompressed size of the compressed source */ unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) { unsigned long long bound = 0; /* Iterate over each frame */ while (srcSize > 0) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) return ZSTD_CONTENTSIZE_ERROR; assert(srcSize >= compressedSize); src = (const BYTE*)src + compressedSize; srcSize -= compressedSize; bound += decompressedBound; } return bound; } size_t ZSTD_decompressionMargin(void const* src, size_t srcSize) { size_t margin = 0; unsigned maxBlockSize = 0; /* Iterate over each frame */ while (srcSize > 0) { ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize, ZSTD_f_zstd1); size_t const compressedSize = frameSizeInfo.compressedSize; unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; ZSTD_FrameHeader zfh; FORWARD_IF_ERROR(ZSTD_getFrameHeader(&zfh, src, srcSize), ""); if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) return ERROR(corruption_detected); if (zfh.frameType == ZSTD_frame) { /* Add the frame header to our margin */ margin += zfh.headerSize; /* Add the checksum to our margin */ margin += zfh.checksumFlag ? 4 : 0; /* Add 3 bytes per block */ margin += 3 * frameSizeInfo.nbBlocks; /* Compute the max block size */ maxBlockSize = MAX(maxBlockSize, zfh.blockSizeMax); } else { assert(zfh.frameType == ZSTD_skippableFrame); /* Add the entire skippable frame size to our margin. */ margin += compressedSize; } assert(srcSize >= compressedSize); src = (const BYTE*)src + compressedSize; srcSize -= compressedSize; } /* Add the max block size back to the margin. */ margin += maxBlockSize; return margin; } /*-************************************************************* * Frame decoding ***************************************************************/ /* ZSTD_insertBlock() : * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) { DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); ZSTD_checkContinuity(dctx, blockStart, blockSize); dctx->previousDstEnd = (const char*)blockStart + blockSize; return blockSize; } static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_copyRawBlock"); RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } ZSTD_memmove(dst, src, srcSize); return srcSize; } static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, BYTE b, size_t regenSize) { RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); if (dst == NULL) { if (regenSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } ZSTD_memset(dst, b, regenSize); return regenSize; } static void ZSTD_DCtx_trace_end(ZSTD_DCtx const* dctx, U64 uncompressedSize, U64 compressedSize, int streaming) { (void)dctx; (void)uncompressedSize; (void)compressedSize; (void)streaming; } /*! ZSTD_decompressFrame() : * @dctx must be properly initialized * will update *srcPtr and *srcSizePtr, * to make *srcPtr progress by one frame. */ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void** srcPtr, size_t *srcSizePtr) { const BYTE* const istart = (const BYTE*)(*srcPtr); const BYTE* ip = istart; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; BYTE* op = ostart; size_t remainingSrcSize = *srcSizePtr; DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr); /* check */ RETURN_ERROR_IF( remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, srcSize_wrong, ""); /* Frame Header */ { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, srcSize_wrong, ""); FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; } /* Shrink the blockSizeMax if enabled */ if (dctx->maxBlockSizeParam != 0) dctx->fParams.blockSizeMax = MIN(dctx->fParams.blockSizeMax, (unsigned)dctx->maxBlockSizeParam); /* Loop on each block */ while (1) { BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); if (ZSTD_isError(cBlockSize)) return cBlockSize; ip += ZSTD_blockHeaderSize; remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); if (ip >= op && ip < oBlockEnd) { /* We are decompressing in-place. Limit the output pointer so that we * don't overwrite the block that we are currently reading. This will * fail decompression if the input & output pointers aren't spaced * far enough apart. * * This is important to set, even when the pointers are far enough * apart, because ZSTD_decompressBlock_internal() can decide to store * literals in the output buffer, after the block it is decompressing. * Since we don't want anything to overwrite our input, we have to tell * ZSTD_decompressBlock_internal to never write past ip. * * See ZSTD_allocateLiteralsBuffer() for reference. */ oBlockEnd = op + (ip - op); } switch(blockProperties.blockType) { case bt_compressed: assert(dctx->isFrameDecompression == 1); decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, not_streaming); break; case bt_raw : /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: RETURN_ERROR(corruption_detected, "invalid block type"); } FORWARD_IF_ERROR(decodedSize, "Block decompression failure"); DEBUGLOG(5, "Decompressed block of dSize = %u", (unsigned)decodedSize); if (dctx->validateChecksum) { xxh64_update(&dctx->xxhState, op, decodedSize); } if (decodedSize) /* support dst = NULL,0 */ { op += decodedSize; } assert(ip != NULL); ip += cBlockSize; remainingSrcSize -= cBlockSize; if (blockProperties.lastBlock) break; } if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, corruption_detected, ""); } if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); if (!dctx->forceIgnoreChecksum) { U32 const checkCalc = (U32)xxh64_digest(&dctx->xxhState); U32 checkRead; checkRead = MEM_readLE32(ip); RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); } ip += 4; remainingSrcSize -= 4; } ZSTD_DCtx_trace_end(dctx, (U64)(op-ostart), (U64)(ip-istart), /* streaming */ 0); /* Allow caller to get size read */ DEBUGLOG(4, "ZSTD_decompressFrame: decompressed frame of size %i, consuming %i bytes of input", (int)(op-ostart), (int)(ip - (const BYTE*)*srcPtr)); *srcPtr = ip; *srcSizePtr = remainingSrcSize; return (size_t)(op-ostart); } static ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, const ZSTD_DDict* ddict) { void* const dststart = dst; int moreThan1Frame = 0; DEBUGLOG(5, "ZSTD_decompressMultiFrame"); assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ if (ddict) { dict = ZSTD_DDict_dictContent(ddict); dictSize = ZSTD_DDict_dictSize(ddict); } while (srcSize >= ZSTD_startingInputLength(dctx->format)) { if (dctx->format == ZSTD_f_zstd1 && srcSize >= 4) { U32 const magicNumber = MEM_readLE32(src); DEBUGLOG(5, "reading magic number %08X", (unsigned)magicNumber); if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame detected : skip it */ size_t const skippableSize = readSkippableFrameSize(src, srcSize); FORWARD_IF_ERROR(skippableSize, "invalid skippable frame"); assert(skippableSize <= srcSize); src = (const BYTE *)src + skippableSize; srcSize -= skippableSize; continue; /* check next frame */ } } if (ddict) { /* we were called from ZSTD_decompress_usingDDict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); } else { /* this will initialize correctly with no dict if dict == NULL, so * use this in all cases but ddict */ FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); } ZSTD_checkContinuity(dctx, dst, dstCapacity); { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); RETURN_ERROR_IF( (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) && (moreThan1Frame==1), srcSize_wrong, "At least one frame successfully completed, " "but following bytes are garbage: " "it's more likely to be a srcSize error, " "specifying more input bytes than size of frame(s). " "Note: one could be unlucky, it might be a corruption error instead, " "happening right at the place where we expect zstd magic bytes. " "But this is _much_ less likely than a srcSize field error."); if (ZSTD_isError(res)) return res; assert(res <= dstCapacity); if (res != 0) dst = (BYTE*)dst + res; dstCapacity -= res; } moreThan1Frame = 1; } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); return (size_t)((BYTE*)dst - (BYTE*)dststart); } size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize) { return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); } static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) { switch (dctx->dictUses) { default: assert(0 /* Impossible */); ZSTD_FALLTHROUGH; case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; case ZSTD_use_indefinitely: return dctx->ddict; case ZSTD_use_once: dctx->dictUses = ZSTD_dont_use; return dctx->ddict; } } size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); } size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); return regenSize; #else /* stack mode */ ZSTD_DCtx dctx; ZSTD_initDCtx_internal(&dctx); return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); #endif } /*-************************************** * Advanced Streaming Decompression API * Bufferless and synchronous ****************************************/ size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } /* * Similar to ZSTD_nextSrcSizeToDecompress(), but when a block input can be streamed, we * allow taking a partial block as the input. Currently only raw uncompressed blocks can * be streamed. * * For blocks that can be streamed, this allows us to reduce the latency until we produce * output, and avoid copying the input. * * @param inputSize - The total amount of input that the caller currently has. */ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) return dctx->expected; if (dctx->bType != bt_raw) return dctx->expected; return BOUNDED(1, inputSize, dctx->expected); } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { switch(dctx->stage) { default: /* should not happen */ assert(0); ZSTD_FALLTHROUGH; case ZSTDds_getFrameHeaderSize: ZSTD_FALLTHROUGH; case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: return ZSTDnit_blockHeader; case ZSTDds_decompressBlock: return ZSTDnit_block; case ZSTDds_decompressLastBlock: return ZSTDnit_lastBlock; case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: ZSTD_FALLTHROUGH; case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } } static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /* ZSTD_decompressContinue() : * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) * or an error code, which can be tested using ZSTD_isError() */ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); /* Sanity check */ RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); ZSTD_checkContinuity(dctx, dst, dstCapacity); dctx->processedCSize += srcSize; switch (dctx->stage) { case ZSTDds_getFrameHeaderSize : assert(src != NULL); if (dctx->format == ZSTD_f_zstd1) { /* allows header */ assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ dctx->stage = ZSTDds_decodeSkippableHeader; return 0; } } dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; ZSTD_memcpy(dctx->headerBuffer, src, srcSize); dctx->expected = dctx->headerSize - srcSize; dctx->stage = ZSTDds_decodeFrameHeader; return 0; case ZSTDds_decodeFrameHeader: assert(src != NULL); ZSTD_memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); dctx->expected = ZSTD_blockHeaderSize; dctx->stage = ZSTDds_decodeBlockHeader; return 0; case ZSTDds_decodeBlockHeader: { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); dctx->expected = cBlockSize; dctx->bType = bp.blockType; dctx->rleSize = bp.origSize; if (cBlockSize) { dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; return 0; } /* empty block */ if (bp.lastBlock) { if (dctx->fParams.checksumFlag) { dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { dctx->expected = 0; /* end of frame */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ dctx->stage = ZSTDds_decodeBlockHeader; } return 0; } case ZSTDds_decompressLastBlock: case ZSTDds_decompressBlock: DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); { size_t rSize; switch(dctx->bType) { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); assert(dctx->isFrameDecompression == 1); rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, is_streaming); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : assert(srcSize <= dctx->expected); rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); assert(rSize == srcSize); dctx->expected -= rSize; break; case bt_rle : rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); dctx->expected = 0; /* Streaming not supported */ break; case bt_reserved : /* should never happen */ default: RETURN_ERROR(corruption_detected, "invalid block type"); } FORWARD_IF_ERROR(rSize, ""); RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); dctx->decodedSize += rSize; if (dctx->validateChecksum) xxh64_update(&dctx->xxhState, dst, rSize); dctx->previousDstEnd = (char*)dst + rSize; /* Stay on the same stage until we are finished streaming the block. */ if (dctx->expected > 0) { return rSize; } if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); RETURN_ERROR_IF( dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && dctx->decodedSize != dctx->fParams.frameContentSize, corruption_detected, ""); if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ dctx->expected = 4; dctx->stage = ZSTDds_checkChecksum; } else { ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; /* ends here */ dctx->stage = ZSTDds_getFrameHeaderSize; } } else { dctx->stage = ZSTDds_decodeBlockHeader; dctx->expected = ZSTD_blockHeaderSize; } return rSize; } case ZSTDds_checkChecksum: assert(srcSize == 4); /* guaranteed by dctx->expected */ { if (dctx->validateChecksum) { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); U32 const check32 = MEM_readLE32(src); DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); } ZSTD_DCtx_trace_end(dctx, dctx->decodedSize, dctx->processedCSize, /* streaming */ 1); dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } case ZSTDds_decodeSkippableHeader: assert(src != NULL); assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); assert(dctx->format != ZSTD_f_zstd1_magicless); ZSTD_memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ dctx->stage = ZSTDds_skipFrame; return 0; case ZSTDds_skipFrame: dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { dctx->dictEnd = dctx->previousDstEnd; dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); dctx->prefixStart = dict; dctx->previousDstEnd = (const char*)dict + dictSize; #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION dctx->dictContentBeginForFuzzing = dctx->prefixStart; dctx->dictContentEndForFuzzing = dctx->previousDstEnd; #endif return 0; } /*! ZSTD_loadDEntropy() : * dict : must point at beginning of a valid zstd dictionary. * @return : size of entropy tables read */ size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize) { const BYTE* dictPtr = (const BYTE*)dict; const BYTE* const dictEnd = dictPtr + dictSize; RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ dictPtr += 8; /* skip header = magic + dictID */ ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable)); ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable)); ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE); { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */ size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable); #ifdef HUF_FORCE_DECOMPRESS_X1 /* in minimal huffman, we always use X1 variants */ size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, dictPtr, dictEnd - dictPtr, workspace, workspaceSize, /* flags */ 0); #else size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, dictPtr, (size_t)(dictEnd - dictPtr), workspace, workspaceSize, /* flags */ 0); #endif RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); dictPtr += hSize; } { short offcodeNCount[MaxOff+1]; unsigned offcodeMaxValue = MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->OFTable, offcodeNCount, offcodeMaxValue, OF_base, OF_bits, offcodeLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */0); dictPtr += offcodeHeaderSize; } { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->MLTable, matchlengthNCount, matchlengthMaxValue, ML_base, ML_bits, matchlengthLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */ 0); dictPtr += matchlengthHeaderSize; } { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, (size_t)(dictEnd-dictPtr)); RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); ZSTD_buildFSETable( entropy->LLTable, litlengthNCount, litlengthMaxValue, LL_base, LL_bits, litlengthLog, entropy->workspace, sizeof(entropy->workspace), /* bmi2 */ 0); dictPtr += litlengthHeaderSize; } RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; RETURN_ERROR_IF(rep==0 || rep > dictContentSize, dictionary_corrupted, ""); entropy->rep[i] = rep; } } return (size_t)(dictPtr - (const BYTE*)dict); } static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); { U32 const magic = MEM_readLE32(dict); if (magic != ZSTD_MAGIC_DICTIONARY) { return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); /* load entropy tables */ { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); dict = (const char*)dict + eSize; dictSize -= eSize; } dctx->litEntropy = dctx->fseEntropy = 1; /* reference dictionary content */ return ZSTD_refDictContent(dctx, dict, dictSize); } size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) { assert(dctx != NULL); dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ dctx->stage = ZSTDds_getFrameHeaderSize; dctx->processedCSize = 0; dctx->decodedSize = 0; dctx->previousDstEnd = NULL; dctx->prefixStart = NULL; dctx->virtualStart = NULL; dctx->dictEnd = NULL; dctx->entropy.hufTable[0] = (HUF_DTable)((ZSTD_HUFFDTABLE_CAPACITY_LOG)*0x1000001); /* cover both little and big endian */ dctx->litEntropy = dctx->fseEntropy = 0; dctx->dictID = 0; dctx->bType = bt_reserved; dctx->isFrameDecompression = 1; ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); ZSTD_memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ dctx->LLTptr = dctx->entropy.LLTable; dctx->MLTptr = dctx->entropy.MLTable; dctx->OFTptr = dctx->entropy.OFTable; dctx->HUFptr = dctx->entropy.hufTable; return 0; } size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (dict && dictSize) RETURN_ERROR_IF( ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), dictionary_corrupted, ""); return 0; } /* ====== ZSTD_DDict ====== */ size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict"); assert(dctx != NULL); if (ddict) { const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict); size_t const dictSize = ZSTD_DDict_dictSize(ddict); const void* const dictEnd = dictStart + dictSize; dctx->ddictIsCold = (dctx->dictEnd != dictEnd); DEBUGLOG(4, "DDict is %s", dctx->ddictIsCold ? "~cold~" : "hot!"); } FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); if (ddict) { /* NULL ddict is equivalent to no dictionary */ ZSTD_copyDDictParameters(dctx, ddict); } return 0; } /*! ZSTD_getDictID_fromDict() : * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) { if (dictSize < 8) return 0; if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); } /*! ZSTD_getDictID_fromFrame() : * Provides the dictID required to decompress frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : * - The frame does not require a dictionary (most common case). * - The frame was built with dictID intentionally removed. * Needed dictionary is a hidden piece of information. * Note : this use case also happens when using a non-conformant dictionary. * - `srcSize` is too small, and as a result, frame header could not be decoded. * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. * - This is not a Zstandard frame. * When identifying the exact failure cause, it's possible to use * ZSTD_getFrameHeader(), which will provide a more precise error code. */ unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) { ZSTD_FrameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0, 0, 0 }; size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); if (ZSTD_isError(hError)) return 0; return zfp.dictID; } /*! ZSTD_decompress_usingDDict() : * Decompression using a pre-digested Dictionary * Use dictionary without significant overhead. */ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_DDict* ddict) { /* pass content and size in case legacy frames are encountered */ return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, NULL, 0, ddict); } /*===================================== * Streaming decompression *====================================*/ ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) { return ZSTD_initStaticDCtx(workspace, workspaceSize); } ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { return ZSTD_createDCtx_internal(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) { return ZSTD_freeDCtx(zds); } /* *** Initialization *** */ size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (dict && dictSize != 0) { dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); dctx->ddict = dctx->ddictLocal; dctx->dictUses = ZSTD_use_indefinitely; } return 0; } size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); } size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) { FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); dctx->dictUses = ZSTD_use_once; return 0; } size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) { return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); } /* ZSTD_initDStream_usingDict() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) { DEBUGLOG(4, "ZSTD_initDStream_usingDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); return ZSTD_startingInputLength(zds->format); } /* note : this variant can't fail */ size_t ZSTD_initDStream(ZSTD_DStream* zds) { DEBUGLOG(4, "ZSTD_initDStream"); FORWARD_IF_ERROR(ZSTD_DCtx_reset(zds, ZSTD_reset_session_only), ""); FORWARD_IF_ERROR(ZSTD_DCtx_refDDict(zds, NULL), ""); return ZSTD_startingInputLength(zds->format); } /* ZSTD_initDStream_usingDDict() : * ddict will just be referenced, and must outlive decompression session * this function cannot fail */ size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) { DEBUGLOG(4, "ZSTD_initDStream_usingDDict"); FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); return ZSTD_startingInputLength(dctx->format); } /* ZSTD_resetDStream() : * return : expected size, aka ZSTD_startingInputLength(). * this function cannot fail */ size_t ZSTD_resetDStream(ZSTD_DStream* dctx) { DEBUGLOG(4, "ZSTD_resetDStream"); FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); return ZSTD_startingInputLength(dctx->format); } size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); if (ddict) { dctx->ddict = ddict; dctx->dictUses = ZSTD_use_indefinitely; if (dctx->refMultipleDDicts == ZSTD_rmd_refMultipleDDicts) { if (dctx->ddictSet == NULL) { dctx->ddictSet = ZSTD_createDDictHashSet(dctx->customMem); if (!dctx->ddictSet) { RETURN_ERROR(memory_allocation, "Failed to allocate memory for hash set!"); } } assert(!dctx->staticSize); /* Impossible: ddictSet cannot have been allocated if static dctx */ FORWARD_IF_ERROR(ZSTD_DDictHashSet_addDDict(dctx->ddictSet, ddict, dctx->customMem), ""); } } return 0; } /* ZSTD_DCtx_setMaxWindowSize() : * note : no direct equivalence in ZSTD_DCtx_setParameter, * since this version sets windowSize, and the other sets windowLog */ size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); size_t const min = (size_t)1 << bounds.lowerBound; size_t const max = (size_t)1 << bounds.upperBound; RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); dctx->maxWindowSize = maxWindowSize; return 0; } size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) { return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, (int)format); } ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) { ZSTD_bounds bounds = { 0, 0, 0 }; switch(dParam) { case ZSTD_d_windowLogMax: bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN; bounds.upperBound = ZSTD_WINDOWLOG_MAX; return bounds; case ZSTD_d_format: bounds.lowerBound = (int)ZSTD_f_zstd1; bounds.upperBound = (int)ZSTD_f_zstd1_magicless; ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); return bounds; case ZSTD_d_stableOutBuffer: bounds.lowerBound = (int)ZSTD_bm_buffered; bounds.upperBound = (int)ZSTD_bm_stable; return bounds; case ZSTD_d_forceIgnoreChecksum: bounds.lowerBound = (int)ZSTD_d_validateChecksum; bounds.upperBound = (int)ZSTD_d_ignoreChecksum; return bounds; case ZSTD_d_refMultipleDDicts: bounds.lowerBound = (int)ZSTD_rmd_refSingleDDict; bounds.upperBound = (int)ZSTD_rmd_refMultipleDDicts; return bounds; case ZSTD_d_disableHuffmanAssembly: bounds.lowerBound = 0; bounds.upperBound = 1; return bounds; case ZSTD_d_maxBlockSize: bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN; bounds.upperBound = ZSTD_BLOCKSIZE_MAX; return bounds; default:; } bounds.error = ERROR(parameter_unsupported); return bounds; } /* ZSTD_dParam_withinBounds: * @return 1 if value is within dParam bounds, * 0 otherwise */ static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) { ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam); if (ZSTD_isError(bounds.error)) return 0; if (value < bounds.lowerBound) return 0; if (value > bounds.upperBound) return 0; return 1; } #define CHECK_DBOUNDS(p,v) { \ RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ } size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value) { switch (param) { case ZSTD_d_windowLogMax: *value = (int)ZSTD_highbit32((U32)dctx->maxWindowSize); return 0; case ZSTD_d_format: *value = (int)dctx->format; return 0; case ZSTD_d_stableOutBuffer: *value = (int)dctx->outBufferMode; return 0; case ZSTD_d_forceIgnoreChecksum: *value = (int)dctx->forceIgnoreChecksum; return 0; case ZSTD_d_refMultipleDDicts: *value = (int)dctx->refMultipleDDicts; return 0; case ZSTD_d_disableHuffmanAssembly: *value = (int)dctx->disableHufAsm; return 0; case ZSTD_d_maxBlockSize: *value = dctx->maxBlockSizeParam; return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); switch(dParam) { case ZSTD_d_windowLogMax: if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; CHECK_DBOUNDS(ZSTD_d_windowLogMax, value); dctx->maxWindowSize = ((size_t)1) << value; return 0; case ZSTD_d_format: CHECK_DBOUNDS(ZSTD_d_format, value); dctx->format = (ZSTD_format_e)value; return 0; case ZSTD_d_stableOutBuffer: CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); dctx->outBufferMode = (ZSTD_bufferMode_e)value; return 0; case ZSTD_d_forceIgnoreChecksum: CHECK_DBOUNDS(ZSTD_d_forceIgnoreChecksum, value); dctx->forceIgnoreChecksum = (ZSTD_forceIgnoreChecksum_e)value; return 0; case ZSTD_d_refMultipleDDicts: CHECK_DBOUNDS(ZSTD_d_refMultipleDDicts, value); if (dctx->staticSize != 0) { RETURN_ERROR(parameter_unsupported, "Static dctx does not support multiple DDicts!"); } dctx->refMultipleDDicts = (ZSTD_refMultipleDDicts_e)value; return 0; case ZSTD_d_disableHuffmanAssembly: CHECK_DBOUNDS(ZSTD_d_disableHuffmanAssembly, value); dctx->disableHufAsm = value != 0; return 0; case ZSTD_d_maxBlockSize: if (value != 0) CHECK_DBOUNDS(ZSTD_d_maxBlockSize, value); dctx->maxBlockSizeParam = value; return 0; default:; } RETURN_ERROR(parameter_unsupported, ""); } size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) { if ( (reset == ZSTD_reset_session_only) || (reset == ZSTD_reset_session_and_parameters) ) { dctx->streamStage = zdss_init; dctx->noForwardProgress = 0; dctx->isFrameDecompression = 1; } if ( (reset == ZSTD_reset_parameters) || (reset == ZSTD_reset_session_and_parameters) ) { RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); ZSTD_clearDict(dctx); ZSTD_DCtx_resetParameters(dctx); } return 0; } size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) { return ZSTD_sizeof_DCtx(dctx); } static size_t ZSTD_decodingBufferSize_internal(unsigned long long windowSize, unsigned long long frameContentSize, size_t blockSizeMax) { size_t const blockSize = MIN((size_t)MIN(windowSize, ZSTD_BLOCKSIZE_MAX), blockSizeMax); /* We need blockSize + WILDCOPY_OVERLENGTH worth of buffer so that if a block * ends at windowSize + WILDCOPY_OVERLENGTH + 1 bytes, we can start writing * the block at the beginning of the output buffer, and maintain a full window. * * We need another blockSize worth of buffer so that we can store split * literals at the end of the block without overwriting the extDict window. */ unsigned long long const neededRBSize = windowSize + (blockSize * 2) + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, frameParameter_windowTooLarge, ""); return minRBSize; } size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) { return ZSTD_decodingBufferSize_internal(windowSize, frameContentSize, ZSTD_BLOCKSIZE_MAX); } size_t ZSTD_estimateDStreamSize(size_t windowSize) { size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); size_t const inBuffSize = blockSize; /* no block can be larger */ size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; } size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) { U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ ZSTD_FrameHeader zfh; size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); if (ZSTD_isError(err)) return err; RETURN_ERROR_IF(err>0, srcSize_wrong, ""); RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, frameParameter_windowTooLarge, ""); return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); } /* ***** Decompression ***** */ static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; } static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) { if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) zds->oversizedDuration++; else zds->oversizedDuration = 0; } static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) { return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; } /* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) { ZSTD_outBuffer const expect = zds->expectedOutBuffer; /* No requirement when ZSTD_obm_stable is not enabled. */ if (zds->outBufferMode != ZSTD_bm_stable) return 0; /* Any buffer is allowed in zdss_init, this must be the same for every other call until * the context is reset. */ if (zds->streamStage == zdss_init) return 0; /* The buffer must match our expectation exactly. */ if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) return 0; RETURN_ERROR(dstBuffer_wrong, "ZSTD_d_stableOutBuffer enabled but output differs!"); } /* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() * and updates the stage and the output buffer state. This call is extracted so it can be * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. * NOTE: You must break after calling this function since the streamStage is modified. */ static size_t ZSTD_decompressContinueStream( ZSTD_DStream* zds, char** op, char* oend, void const* src, size_t srcSize) { int const isSkipFrame = ZSTD_isSkipFrame(zds); if (zds->outBufferMode == ZSTD_bm_buffered) { size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; size_t const decodedSize = ZSTD_decompressContinue(zds, zds->outBuff + zds->outStart, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; } else { zds->outEnd = zds->outStart + decodedSize; zds->streamStage = zdss_flush; } } else { /* Write directly into the output buffer */ size_t const dstSize = isSkipFrame ? 0 : (size_t)(oend - *op); size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); FORWARD_IF_ERROR(decodedSize, ""); *op += decodedSize; /* Flushing is not needed. */ zds->streamStage = zdss_read; assert(*op <= oend); assert(zds->outBufferMode == ZSTD_bm_stable); } return 0; } size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) { const char* const src = (const char*)input->src; const char* const istart = input->pos != 0 ? src + input->pos : src; const char* const iend = input->size != 0 ? src + input->size : src; const char* ip = istart; char* const dst = (char*)output->dst; char* const ostart = output->pos != 0 ? dst + output->pos : dst; char* const oend = output->size != 0 ? dst + output->size : dst; char* op = ostart; U32 someMoreWork = 1; DEBUGLOG(5, "ZSTD_decompressStream"); assert(zds != NULL); RETURN_ERROR_IF( input->pos > input->size, srcSize_wrong, "forbidden. in: pos: %u vs size: %u", (U32)input->pos, (U32)input->size); RETURN_ERROR_IF( output->pos > output->size, dstSize_tooSmall, "forbidden. out: pos: %u vs size: %u", (U32)output->pos, (U32)output->size); DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); while (someMoreWork) { switch(zds->streamStage) { case zdss_init : DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; zds->hostageByte = 0; zds->expectedOutBuffer = *output; ZSTD_FALLTHROUGH; case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); if (zds->refMultipleDDicts && zds->ddictSet) { ZSTD_DCtx_selectFrameDDict(zds); } if (ZSTD_isError(hSize)) { return hSize; /* error */ } if (hSize != 0) { /* need more input */ size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ size_t const remainingInput = (size_t)(iend-ip); assert(iend >= ip); if (toLoad > remainingInput) { /* not enough input to load full header */ if (remainingInput > 0) { ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); zds->lhSize += remainingInput; } input->pos = input->size; /* check first few bytes */ FORWARD_IF_ERROR( ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format), "First few bytes detected incorrect" ); /* return hint input size */ return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ } assert(ip != NULL); ZSTD_memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; break; } } /* check for single-pass mode opportunity */ if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && zds->fParams.frameType != ZSTD_skippableFrame && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { size_t const cSize = ZSTD_findFrameCompressedSize_advanced(istart, (size_t)(iend-istart), zds->format); if (cSize <= (size_t)(iend-istart)) { /* shortcut : using single-pass mode */ size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, (size_t)(oend-op), istart, cSize, ZSTD_getDDict(zds)); if (ZSTD_isError(decompressedSize)) return decompressedSize; DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()"); assert(istart != NULL); ip = istart + cSize; op = op ? op + decompressedSize : op; /* can occur if frameContentSize = 0 (empty frame) */ zds->expected = 0; zds->streamStage = zdss_init; someMoreWork = 0; break; } } /* Check output buffer is large enough for ZSTD_odm_stable. */ if (zds->outBufferMode == ZSTD_bm_stable && zds->fParams.frameType != ZSTD_skippableFrame && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); } /* Consume header (see ZSTDds_decodeFrameHeader) */ DEBUGLOG(4, "Consume header"); FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); if (zds->format == ZSTD_f_zstd1 && (MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); zds->stage = ZSTDds_skipFrame; } else { FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); zds->expected = ZSTD_blockHeaderSize; zds->stage = ZSTDds_decodeBlockHeader; } /* control buffer memory usage */ DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", (U32)(zds->fParams.windowSize >>10), (U32)(zds->maxWindowSize >> 10) ); zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, frameParameter_windowTooLarge, ""); if (zds->maxBlockSizeParam != 0) zds->fParams.blockSizeMax = MIN(zds->fParams.blockSizeMax, (unsigned)zds->maxBlockSizeParam); /* Adapt buffer sizes to frame header instructions */ { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_bm_buffered ? ZSTD_decodingBufferSize_internal(zds->fParams.windowSize, zds->fParams.frameContentSize, zds->fParams.blockSizeMax) : 0; ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); if (tooSmall || tooLarge) { size_t const bufferSize = neededInBuffSize + neededOutBuffSize; DEBUGLOG(4, "inBuff : from %u to %u", (U32)zds->inBuffSize, (U32)neededInBuffSize); DEBUGLOG(4, "outBuff : from %u to %u", (U32)zds->outBuffSize, (U32)neededOutBuffSize); if (zds->staticSize) { /* static DCtx */ DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ RETURN_ERROR_IF( bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), memory_allocation, ""); } else { ZSTD_customFree(zds->inBuff, zds->customMem); zds->inBuffSize = 0; zds->outBuffSize = 0; zds->inBuff = (char*)ZSTD_customMalloc(bufferSize, zds->customMem); RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); } zds->inBuffSize = neededInBuffSize; zds->outBuff = zds->inBuff + zds->inBuffSize; zds->outBuffSize = neededOutBuffSize; } } } zds->streamStage = zdss_read; ZSTD_FALLTHROUGH; case zdss_read: DEBUGLOG(5, "stage zdss_read"); { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip)); DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); if (neededInSize==0) { /* end of frame */ zds->streamStage = zdss_init; someMoreWork = 0; break; } if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); assert(ip != NULL); ip += neededInSize; /* Function modifies the stage so we must break */ break; } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; ZSTD_FALLTHROUGH; case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); size_t const toLoad = neededInSize - zds->inPos; int const isSkipFrame = ZSTD_isSkipFrame(zds); size_t loadedSize; /* At this point we shouldn't be decompressing a block that we can stream. */ assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, (size_t)(iend - ip))); if (isSkipFrame) { loadedSize = MIN(toLoad, (size_t)(iend-ip)); } else { RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, corruption_detected, "should never happen"); loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, (size_t)(iend-ip)); } if (loadedSize != 0) { /* ip may be NULL */ ip += loadedSize; zds->inPos += loadedSize; } if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ /* decode loaded input */ zds->inPos = 0; /* input is consumed */ FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); /* Function modifies the stage so we must break */ break; } case zdss_flush: { size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, (size_t)(oend-op), zds->outBuff + zds->outStart, toFlushSize); op = op ? op + flushedSize : op; zds->outStart += flushedSize; if (flushedSize == toFlushSize) { /* flush completed */ zds->streamStage = zdss_read; if ( (zds->outBuffSize < zds->fParams.frameContentSize) && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", (int)(zds->outBuffSize - zds->outStart), (U32)zds->fParams.blockSizeMax); zds->outStart = zds->outEnd = 0; } break; } } /* cannot complete flush */ someMoreWork = 0; break; default: assert(0); /* impossible */ RETURN_ERROR(GENERIC, "impossible to reach"); /* some compilers require default to do something */ } } /* result */ input->pos = (size_t)(ip - (const char*)(input->src)); output->pos = (size_t)(op - (char*)(output->dst)); /* Update the expected output buffer for ZSTD_obm_stable. */ zds->expectedOutBuffer = *output; if ((ip==istart) && (op==ostart)) { /* no forward progress */ zds->noForwardProgress ++; if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { RETURN_ERROR_IF(op==oend, noForwardProgress_destFull, ""); RETURN_ERROR_IF(ip==iend, noForwardProgress_inputEmpty, ""); assert(0); } } else { zds->noForwardProgress = 0; } { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); if (!nextSrcSizeHint) { /* frame fully decoded */ if (zds->outEnd == zds->outStart) { /* output fully flushed */ if (zds->hostageByte) { if (input->pos >= input->size) { /* can't release hostage (not present) */ zds->streamStage = zdss_read; return 1; } input->pos++; /* release hostage */ } /* zds->hostageByte */ return 0; } /* zds->outEnd == zds->outStart */ if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ zds->hostageByte=1; } return 1; } /* nextSrcSizeHint==0 */ nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ assert(zds->inPos <= nextSrcSizeHint); nextSrcSizeHint -= zds->inPos; /* part already loaded*/ return nextSrcSizeHint; } } size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos) { ZSTD_outBuffer output; ZSTD_inBuffer input; output.dst = dst; output.size = dstCapacity; output.pos = *dstPos; input.src = src; input.size = srcSize; input.pos = *srcPos; { size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); *dstPos = output.pos; *srcPos = input.pos; return cErr; } }
82 148 2 9 2 1 47 105 105 105 127 50 50 50 119 25 43 7 77 53 99 82 23 56 79 79 100 196 130 196 129 50 119 23 192 4806 4831 4806 4783 134 4830 354 353 129 129 129 7 125 125 129 128 129 55 54 55 54 54 55 55 55 122 122 123 123 123 123 36 9 5 32 9 3 52 51 1 3 2 1 51 44 3 42 42 33 5 50 66 66 65 5 62 76 34 67 3 66 55 1 1 53 1 4 3 1 1 2 54 54 53 49 4 52 53 5 43 2 44 11 1 4 4 4 27 2 7 2 26 26 26 22 1 20 20 20 20 3 16 8 9 9 6 6 4 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include <linux/capability.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/sched/user.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/pagewalk.h> #include <linux/mempolicy.h> #include <linux/syscalls.h> #include <linux/sched.h> #include <linux/export.h> #include <linux/rmap.h> #include <linux/mmzone.h> #include <linux/hugetlb.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/secretmem.h> #include "internal.h" struct mlock_fbatch { local_lock_t lock; struct folio_batch fbatch; }; static DEFINE_PER_CPU(struct mlock_fbatch, mlock_fbatch) = { .lock = INIT_LOCAL_LOCK(lock), }; bool can_do_mlock(void) { if (rlimit(RLIMIT_MEMLOCK) != 0) return true; if (capable(CAP_IPC_LOCK)) return true; return false; } EXPORT_SYMBOL(can_do_mlock); /* * Mlocked folios are marked with the PG_mlocked flag for efficient testing * in vmscan and, possibly, the fault path; and to support semi-accurate * statistics. * * An mlocked folio [folio_test_mlocked(folio)] is unevictable. As such, it * will be ostensibly placed on the LRU "unevictable" list (actually no such * list exists), rather than the [in]active lists. PG_unevictable is set to * indicate the unevictable state. */ static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) { /* There is nothing more we can do while it's off LRU */ if (!folio_test_clear_lru(folio)) return lruvec; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (unlikely(folio_evictable(folio))) { /* * This is a little surprising, but quite possible: PG_mlocked * must have got cleared already by another CPU. Could this * folio be unevictable? I'm not sure, but move it now if so. */ if (folio_test_unevictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, folio_nr_pages(folio)); } goto out; } if (folio_test_unevictable(folio)) { if (folio_test_mlocked(folio)) folio->mlock_count++; goto out; } lruvec_del_folio(lruvec, folio); folio_clear_active(folio); folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: folio_set_lru(folio); return lruvec; } static struct lruvec *__mlock_new_folio(struct folio *folio, struct lruvec *lruvec) { VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); lruvec = folio_lruvec_relock_irq(folio, lruvec); /* As above, this is a little surprising, but possible */ if (unlikely(folio_evictable(folio))) goto out; folio_set_unevictable(folio); folio->mlock_count = !!folio_test_mlocked(folio); __count_vm_events(UNEVICTABLE_PGCULLED, folio_nr_pages(folio)); out: lruvec_add_folio(lruvec, folio); folio_set_lru(folio); return lruvec; } static struct lruvec *__munlock_folio(struct folio *folio, struct lruvec *lruvec) { int nr_pages = folio_nr_pages(folio); bool isolated = false; if (!folio_test_clear_lru(folio)) goto munlock; isolated = true; lruvec = folio_lruvec_relock_irq(folio, lruvec); if (folio_test_unevictable(folio)) { /* Then mlock_count is maintained, but might undercount */ if (folio->mlock_count) folio->mlock_count--; if (folio->mlock_count) goto out; } /* else assume that was the last mlock: reclaim will fix it if not */ munlock: if (folio_test_clear_mlocked(folio)) { __zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); if (isolated || !folio_test_unevictable(folio)) __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages); else __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages); } /* folio_evictable() has to be checked *after* clearing Mlocked */ if (isolated && folio_test_unevictable(folio) && folio_evictable(folio)) { lruvec_del_folio(lruvec, folio); folio_clear_unevictable(folio); lruvec_add_folio(lruvec, folio); __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); } out: if (isolated) folio_set_lru(folio); return lruvec; } /* * Flags held in the low bits of a struct folio pointer on the mlock_fbatch. */ #define LRU_FOLIO 0x1 #define NEW_FOLIO 0x2 static inline struct folio *mlock_lru(struct folio *folio) { return (struct folio *)((unsigned long)folio + LRU_FOLIO); } static inline struct folio *mlock_new(struct folio *folio) { return (struct folio *)((unsigned long)folio + NEW_FOLIO); } /* * mlock_folio_batch() is derived from folio_batch_move_lru(): perhaps that can * make use of such folio pointer flags in future, but for now just keep it for * mlock. We could use three separate folio batches instead, but one feels * better (munlocking a full folio batch does not need to drain mlocking folio * batches first). */ static void mlock_folio_batch(struct folio_batch *fbatch) { struct lruvec *lruvec = NULL; unsigned long mlock; struct folio *folio; int i; for (i = 0; i < folio_batch_count(fbatch); i++) { folio = fbatch->folios[i]; mlock = (unsigned long)folio & (LRU_FOLIO | NEW_FOLIO); folio = (struct folio *)((unsigned long)folio - mlock); fbatch->folios[i] = folio; if (mlock & LRU_FOLIO) lruvec = __mlock_folio(folio, lruvec); else if (mlock & NEW_FOLIO) lruvec = __mlock_new_folio(folio, lruvec); else lruvec = __munlock_folio(folio, lruvec); } if (lruvec) unlock_page_lruvec_irq(lruvec); folios_put(fbatch); } void mlock_drain_local(void) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } void mlock_drain_remote(int cpu) { struct folio_batch *fbatch; WARN_ON_ONCE(cpu_online(cpu)); fbatch = &per_cpu(mlock_fbatch.fbatch, cpu); if (folio_batch_count(fbatch)) mlock_folio_batch(fbatch); } bool need_mlock_drain(int cpu) { return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); } /** * mlock_folio - mlock a folio already on (or temporarily off) LRU * @folio: folio to be mlocked. */ void mlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); if (!folio_test_set_mlocked(folio)) { int nr_pages = folio_nr_pages(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); } folio_get(folio); if (!folio_batch_add(fbatch, mlock_lru(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * mlock_new_folio - mlock a newly allocated folio not yet on LRU * @folio: folio to be mlocked, either normal or a THP head. */ void mlock_new_folio(struct folio *folio) { struct folio_batch *fbatch; int nr_pages = folio_nr_pages(folio); local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); folio_set_mlocked(folio); zone_stat_mod_folio(folio, NR_MLOCK, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); folio_get(folio); if (!folio_batch_add(fbatch, mlock_new(folio)) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } /** * munlock_folio - munlock a folio * @folio: folio to be munlocked, either normal or a THP head. */ void munlock_folio(struct folio *folio) { struct folio_batch *fbatch; local_lock(&mlock_fbatch.lock); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); /* * folio_test_clear_mlocked(folio) must be left to __munlock_folio(), * which will check whether the folio is multiply mlocked. */ folio_get(folio); if (!folio_batch_add(fbatch, folio) || folio_test_large(folio) || lru_cache_disabled()) mlock_folio_batch(fbatch); local_unlock(&mlock_fbatch.lock); } static inline unsigned int folio_mlock_step(struct folio *folio, pte_t *pte, unsigned long addr, unsigned long end) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; unsigned int count = (end - addr) >> PAGE_SHIFT; pte_t ptent = ptep_get(pte); if (!folio_test_large(folio)) return 1; return folio_pte_batch(folio, addr, pte, ptent, count, fpb_flags, NULL, NULL, NULL); } static inline bool allow_mlock_munlock(struct folio *folio, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned int step) { /* * For unlock, allow munlock large folio which is partially * mapped to VMA. As it's possible that large folio is * mlocked and VMA is split later. * * During memory pressure, such kind of large folio can * be split. And the pages are not in VM_LOCKed VMA * can be reclaimed. */ if (!(vma->vm_flags & VM_LOCKED)) return true; /* folio_within_range() cannot take KSM, but any small folio is OK */ if (!folio_test_large(folio)) return true; /* folio not in range [start, end), skip mlock */ if (!folio_within_range(folio, vma, start, end)) return false; /* folio is not fully mapped, skip mlock */ if (step != folio_nr_pages(folio)) return false; return true; } static int mlock_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; pte_t ptent; struct folio *folio; unsigned int step = 1; unsigned long start = addr; ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { if (!pmd_present(*pmd)) goto out; if (is_huge_zero_pmd(*pmd)) goto out; folio = pmd_folio(*pmd); if (folio_is_zone_device(folio)) goto out; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); goto out; } start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!start_pte) { walk->action = ACTION_AGAIN; return 0; } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { ptent = ptep_get(pte); if (!pte_present(ptent)) continue; folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; step = folio_mlock_step(folio, pte, addr, end); if (!allow_mlock_munlock(folio, vma, start, end, step)) goto next_entry; if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else munlock_folio(folio); next_entry: pte += step - 1; addr += (step - 1) << PAGE_SHIFT; } pte_unmap(start_pte); out: spin_unlock(ptl); cond_resched(); return 0; } /* * mlock_vma_pages_range() - mlock any pages already in the range, * or munlock all pages in the range. * @vma - vma containing range to be mlock()ed or munlock()ed * @start - start address in @vma of the range * @end - end of range in @vma * @newflags - the new set of flags for @vma. * * Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED; * called for munlock() and munlockall(), to clear VM_LOCKED from @vma. */ static void mlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, vm_flags_t newflags) { static const struct mm_walk_ops mlock_walk_ops = { .pmd_entry = mlock_pte_range, .walk_lock = PGWALK_WRLOCK_VERIFY, }; /* * There is a slight chance that concurrent page migration, * or page reclaim finding a page of this now-VM_LOCKED vma, * will call mlock_vma_folio() and raise page's mlock_count: * double counting, leaving the page unevictable indefinitely. * Communicate this danger to mlock_vma_folio() with VM_IO, * which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas. * mmap_lock is held in write mode here, so this weird * combination should not be visible to other mmap_lock users; * but WRITE_ONCE so rmap walkers must see VM_IO if VM_LOCKED. */ if (newflags & VM_LOCKED) newflags |= VM_IO; vma_start_write(vma); vm_flags_reset_once(vma, newflags); lru_add_drain(); walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); lru_add_drain(); if (newflags & VM_IO) { newflags &= ~VM_IO; vm_flags_reset_once(vma, newflags); } } /* * mlock_fixup - handle mlock[all]/munlock[all] requests. * * Filters out "special" vmas -- VM_LOCKED never gets set for these, and * munlock is a no-op. However, for some special vmas, we go ahead and * populate the ptes. * * For vmas that pass the filters, merge/split as appropriate. */ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; int nr_pages; int ret = 0; vm_flags_t oldflags = vma->vm_flags; if (newflags == oldflags || (oldflags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm) || vma_is_dax(vma) || vma_is_secretmem(vma) || (oldflags & VM_DROPPABLE)) /* don't set VM_LOCKED or VM_LOCKONFAULT and don't count */ goto out; vma = vma_modify_flags(vmi, *prev, vma, start, end, newflags); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto out; } /* * Keep track of amount of locked VM. */ nr_pages = (end - start) >> PAGE_SHIFT; if (!(newflags & VM_LOCKED)) nr_pages = -nr_pages; else if (oldflags & VM_LOCKED) nr_pages = 0; mm->locked_vm += nr_pages; /* * vm_flags is protected by the mmap_lock held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { /* No work to do, and mlocking twice would be wrong */ vma_start_write(vma); vm_flags_reset(vma, newflags); } else { mlock_vma_pages_range(vma, start, end, newflags); } out: *prev = vma; return ret; } static int apply_vma_lock_flags(unsigned long start, size_t len, vm_flags_t flags) { unsigned long nstart, end, tmp; struct vm_area_struct *vma, *prev; VMA_ITERATOR(vmi, current->mm, start); VM_BUG_ON(offset_in_page(start)); VM_BUG_ON(len != PAGE_ALIGN(len)); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = vma_iter_load(&vmi); if (!vma) return -ENOMEM; prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; nstart = start; tmp = vma->vm_start; for_each_vma_range(vmi, vma, end) { int error; vm_flags_t newflags; if (vma->vm_start != tmp) return -ENOMEM; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= flags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ tmp = vma->vm_end; if (tmp > end) tmp = end; error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); if (error) return error; tmp = vma_iter_end(&vmi); nstart = tmp; } if (tmp < end) return -ENOMEM; return 0; } /* * Go through vma areas and sum size of mlocked * vma pages, as return value. * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT) * is also counted. * Return value: previously mlocked page counts */ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm, unsigned long start, size_t len) { struct vm_area_struct *vma; unsigned long count = 0; unsigned long end; VMA_ITERATOR(vmi, mm, start); /* Don't overflow past ULONG_MAX */ if (unlikely(ULONG_MAX - len < start)) end = ULONG_MAX; else end = start + len; for_each_vma_range(vmi, vma, end) { if (vma->vm_flags & VM_LOCKED) { if (start > vma->vm_start) count -= (start - vma->vm_start); if (end < vma->vm_end) { count += end - vma->vm_start; break; } count += vma->vm_end - vma->vm_start; } } return count >> PAGE_SHIFT; } /* * convert get_user_pages() return value to posix mlock() error */ static int __mlock_posix_error_return(long retval) { if (retval == -EFAULT) retval = -ENOMEM; else if (retval == -ENOMEM) retval = -EAGAIN; return retval; } static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags) { unsigned long locked; unsigned long lock_limit; int error = -ENOMEM; start = untagged_addr(start); if (!can_do_mlock()) return -EPERM; len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; locked = len >> PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; locked += current->mm->locked_vm; if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) { /* * It is possible that the regions requested intersect with * previously mlocked areas, that part area in "mm->locked_vm" * should not be counted to new mlock increment count. So check * and adjust locked count if necessary. */ locked -= count_mm_mlocked_page_nr(current->mm, start, len); } /* check against resource limits */ if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = apply_vma_lock_flags(start, len, flags); mmap_write_unlock(current->mm); if (error) return error; error = __mm_populate(start, len, 0); if (error) return __mlock_posix_error_return(error); return 0; } SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) { return do_mlock(start, len, VM_LOCKED); } SYSCALL_DEFINE3(mlock2, unsigned long, start, size_t, len, int, flags) { vm_flags_t vm_flags = VM_LOCKED; if (flags & ~MLOCK_ONFAULT) return -EINVAL; if (flags & MLOCK_ONFAULT) vm_flags |= VM_LOCKONFAULT; return do_mlock(start, len, vm_flags); } SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) { int ret; start = untagged_addr(start); len = PAGE_ALIGN(len + (offset_in_page(start))); start &= PAGE_MASK; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_vma_lock_flags(start, len, 0); mmap_write_unlock(current->mm); return ret; } /* * Take the MCL_* flags passed into mlockall (or 0 if called from munlockall) * and translate into the appropriate modifications to mm->def_flags and/or the * flags for all current VMAs. * * There are a couple of subtleties with this. If mlockall() is called multiple * times with different flags, the values do not necessarily stack. If mlockall * is called once including the MCL_FUTURE flag and then a second time without * it, VM_LOCKED and VM_LOCKONFAULT will be cleared from mm->def_flags. */ static int apply_mlockall_flags(int flags) { VMA_ITERATOR(vmi, current->mm, 0); struct vm_area_struct *vma, *prev = NULL; vm_flags_t to_add = 0; current->mm->def_flags &= ~VM_LOCKED_MASK; if (flags & MCL_FUTURE) { current->mm->def_flags |= VM_LOCKED; if (flags & MCL_ONFAULT) current->mm->def_flags |= VM_LOCKONFAULT; if (!(flags & MCL_CURRENT)) goto out; } if (flags & MCL_CURRENT) { to_add |= VM_LOCKED; if (flags & MCL_ONFAULT) to_add |= VM_LOCKONFAULT; } for_each_vma(vmi, vma) { int error; vm_flags_t newflags; newflags = vma->vm_flags & ~VM_LOCKED_MASK; newflags |= to_add; error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end, newflags); /* Ignore errors, but prev needs fixing up. */ if (error) prev = vma; cond_resched(); } out: return 0; } SYSCALL_DEFINE1(mlockall, int, flags) { unsigned long lock_limit; int ret; if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)) || flags == MCL_ONFAULT) return -EINVAL; if (!can_do_mlock()) return -EPERM; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = -ENOMEM; if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = apply_mlockall_flags(flags); mmap_write_unlock(current->mm); if (!ret && (flags & MCL_CURRENT)) mm_populate(0, TASK_SIZE); return ret; } SYSCALL_DEFINE0(munlockall) { int ret; if (mmap_write_lock_killable(current->mm)) return -EINTR; ret = apply_mlockall_flags(0); mmap_write_unlock(current->mm); return ret; } /* * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB * shm segments) get accounted against the user_struct instead. */ static DEFINE_SPINLOCK(shmlock_user_lock); int user_shm_lock(size_t size, struct ucounts *ucounts) { unsigned long lock_limit, locked; long memlock; int allowed = 0; locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK); if (lock_limit != RLIM_INFINITY) lock_limit >>= PAGE_SHIFT; spin_lock(&shmlock_user_lock); memlock = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); if ((memlock == LONG_MAX || memlock > lock_limit) && !capable(CAP_IPC_LOCK)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); goto out; } if (!get_ucounts(ucounts)) { dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, locked); allowed = 0; goto out; } allowed = 1; out: spin_unlock(&shmlock_user_lock); return allowed; } void user_shm_unlock(size_t size, struct ucounts *ucounts) { spin_lock(&shmlock_user_lock); dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_MEMLOCK, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); spin_unlock(&shmlock_user_lock); put_ucounts(ucounts); }
20 1 6 7 169 31 6 6 6 6 6 4 4 169 169 6 163 163 6 163 47 43 4 42 21 21 21 163 162 166 166 3 163 163 46 183 182 2 45 162 1 1 163 161 1 162 16 12 11 4 12 6 6 67 67 45 46 40 6 164 164 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 // SPDX-License-Identifier: GPL-2.0-only /* * Page Attribute Table (PAT) support: handle memory caching attributes in page tables. * * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> * Suresh B Siddha <suresh.b.siddha@intel.com> * * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. * * Basic principles: * * PAT is a CPU feature supported by all modern x86 CPUs, to allow the firmware and * the kernel to set one of a handful of 'caching type' attributes for physical * memory ranges: uncached, write-combining, write-through, write-protected, * and the most commonly used and default attribute: write-back caching. * * PAT support supersedes and augments MTRR support in a compatible fashion: MTRR is * a hardware interface to enumerate a limited number of physical memory ranges * and set their caching attributes explicitly, programmed into the CPU via MSRs. * Even modern CPUs have MTRRs enabled - but these are typically not touched * by the kernel or by user-space (such as the X server), we rely on PAT for any * additional cache attribute logic. * * PAT doesn't work via explicit memory ranges, but uses page table entries to add * cache attribute information to the mapped memory range: there's 3 bits used, * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT), with the 8 possible values mapped by the * CPU to actual cache attributes via an MSR loaded into the CPU (MSR_IA32_CR_PAT). * * ( There's a metric ton of finer details, such as compatibility with CPU quirks * that only support 4 types of PAT entries, and interaction with MTRRs, see * below for details. ) */ #include <linux/seq_file.h> #include <linux/memblock.h> #include <linux/debugfs.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/pfn_t.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/fs.h> #include <linux/rbtree.h> #include <asm/cpu_device_id.h> #include <asm/cacheflush.h> #include <asm/cacheinfo.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/fcntl.h> #include <asm/e820/api.h> #include <asm/mtrr.h> #include <asm/page.h> #include <asm/msr.h> #include <asm/memtype.h> #include <asm/io.h> #include "memtype.h" #include "../mm_internal.h" #undef pr_fmt #define pr_fmt(fmt) "" fmt static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); static u64 __ro_after_init pat_msr_val; /* * PAT support is enabled by default, but can be disabled for * various user-requested or hardware-forced reasons: */ static void __init pat_disable(const char *msg_reason) { if (pat_disabled) return; pat_disabled = true; pr_info("x86/PAT: %s\n", msg_reason); memory_caching_control &= ~CACHE_PAT; } static int __init nopat(char *str) { pat_disable("PAT support disabled via boot option."); return 0; } early_param("nopat", nopat); bool pat_enabled(void) { return !pat_disabled; } EXPORT_SYMBOL_GPL(pat_enabled); int pat_debug_enable; static int __init pat_debug_setup(char *str) { pat_debug_enable = 1; return 1; } __setup("debugpat", pat_debug_setup); #ifdef CONFIG_X86_PAT /* * X86 PAT uses page flags arch_1 and arch_2 together to keep track of * memory type of pages that have backing page struct. * * X86 PAT supports 4 different memory types: * - _PAGE_CACHE_MODE_WB * - _PAGE_CACHE_MODE_WC * - _PAGE_CACHE_MODE_UC_MINUS * - _PAGE_CACHE_MODE_WT * * _PAGE_CACHE_MODE_WB is the default type. */ #define _PGMT_WB 0 #define _PGMT_WC (1UL << PG_arch_1) #define _PGMT_UC_MINUS (1UL << PG_arch_2) #define _PGMT_WT (1UL << PG_arch_2 | 1UL << PG_arch_1) #define _PGMT_MASK (1UL << PG_arch_2 | 1UL << PG_arch_1) #define _PGMT_CLEAR_MASK (~_PGMT_MASK) static inline enum page_cache_mode get_page_memtype(struct page *pg) { unsigned long pg_flags = pg->flags & _PGMT_MASK; if (pg_flags == _PGMT_WB) return _PAGE_CACHE_MODE_WB; else if (pg_flags == _PGMT_WC) return _PAGE_CACHE_MODE_WC; else if (pg_flags == _PGMT_UC_MINUS) return _PAGE_CACHE_MODE_UC_MINUS; else return _PAGE_CACHE_MODE_WT; } static inline void set_page_memtype(struct page *pg, enum page_cache_mode memtype) { unsigned long memtype_flags; unsigned long old_flags; unsigned long new_flags; switch (memtype) { case _PAGE_CACHE_MODE_WC: memtype_flags = _PGMT_WC; break; case _PAGE_CACHE_MODE_UC_MINUS: memtype_flags = _PGMT_UC_MINUS; break; case _PAGE_CACHE_MODE_WT: memtype_flags = _PGMT_WT; break; case _PAGE_CACHE_MODE_WB: default: memtype_flags = _PGMT_WB; break; } old_flags = READ_ONCE(pg->flags); do { new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags; } while (!try_cmpxchg(&pg->flags, &old_flags, new_flags)); } #else static inline enum page_cache_mode get_page_memtype(struct page *pg) { return -1; } static inline void set_page_memtype(struct page *pg, enum page_cache_mode memtype) { } #endif #define CM(c) (_PAGE_CACHE_MODE_ ## c) static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val, char *msg) { enum page_cache_mode cache; char *cache_mode; switch (pat_val) { case X86_MEMTYPE_UC: cache = CM(UC); cache_mode = "UC "; break; case X86_MEMTYPE_WC: cache = CM(WC); cache_mode = "WC "; break; case X86_MEMTYPE_WT: cache = CM(WT); cache_mode = "WT "; break; case X86_MEMTYPE_WP: cache = CM(WP); cache_mode = "WP "; break; case X86_MEMTYPE_WB: cache = CM(WB); cache_mode = "WB "; break; case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; default: cache = CM(WB); cache_mode = "WB "; break; } memcpy(msg, cache_mode, 4); return cache; } #undef CM /* * Update the cache mode to pgprot translation tables according to PAT * configuration. * Using lower indices is preferred, so we start with highest index. */ static void __init init_cache_modes(u64 pat) { enum page_cache_mode cache; char pat_msg[33]; int i; pat_msg[32] = 0; for (i = 7; i >= 0; i--) { cache = pat_get_cache_mode((pat >> (i * 8)) & 7, pat_msg + 4 * i); update_cache_mode_entry(i, cache); } pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg); } void pat_cpu_init(void) { if (!boot_cpu_has(X86_FEATURE_PAT)) { /* * If this happens we are on a secondary CPU, but switched to * PAT on the boot CPU. We have no way to undo PAT. */ panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n"); } wrmsrl(MSR_IA32_CR_PAT, pat_msr_val); __flush_tlb_all(); } /** * pat_bp_init - Initialize the PAT MSR value and PAT table * * This function initializes PAT MSR value and PAT table with an OS-defined * value to enable additional cache attributes, WC, WT and WP. * * This function prepares the calls of pat_cpu_init() via cache_cpu_init() * on all CPUs. */ void __init pat_bp_init(void) { struct cpuinfo_x86 *c = &boot_cpu_data; if (!IS_ENABLED(CONFIG_X86_PAT)) pr_info_once("x86/PAT: PAT support disabled because CONFIG_X86_PAT is disabled in the kernel.\n"); if (!cpu_feature_enabled(X86_FEATURE_PAT)) pat_disable("PAT not supported by the CPU."); else rdmsrl(MSR_IA32_CR_PAT, pat_msr_val); if (!pat_msr_val) { pat_disable("PAT support disabled by the firmware."); /* * No PAT. Emulate the PAT table that corresponds to the two * cache bits, PWT (Write Through) and PCD (Cache Disable). * This setup is also the same as the BIOS default setup. * * PTE encoding: * * PCD * |PWT PAT * || slot * 00 0 WB : _PAGE_CACHE_MODE_WB * 01 1 WT : _PAGE_CACHE_MODE_WT * 10 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 11 3 UC : _PAGE_CACHE_MODE_UC * * NOTE: When WC or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ pat_msr_val = PAT_VALUE(WB, WT, UC_MINUS, UC, WB, WT, UC_MINUS, UC); } /* * Xen PV doesn't allow to set PAT MSR, but all cache modes are * supported. */ if (pat_disabled || cpu_feature_enabled(X86_FEATURE_XENPV)) { init_cache_modes(pat_msr_val); return; } if ((c->x86_vfm >= INTEL_PENTIUM_PRO && c->x86_vfm <= INTEL_PENTIUM_M_DOTHAN) || (c->x86_vfm >= INTEL_P4_WILLAMETTE && c->x86_vfm <= INTEL_P4_CEDARMILL)) { /* * PAT support with the lower four entries. Intel Pentium 2, * 3, M, and 4 are affected by PAT errata, which makes the * upper four entries unusable. To be on the safe side, we don't * use those. * * PTE encoding: * PAT * |PCD * ||PWT PAT * ||| slot * 000 0 WB : _PAGE_CACHE_MODE_WB * 001 1 WC : _PAGE_CACHE_MODE_WC * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 011 3 UC : _PAGE_CACHE_MODE_UC * PAT bit unused * * NOTE: When WT or WP is used, it is redirected to UC- per * the default setup in __cachemode2pte_tbl[]. */ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WC, UC_MINUS, UC); } else { /* * Full PAT support. We put WT in slot 7 to improve * robustness in the presence of errata that might cause * the high PAT bit to be ignored. This way, a buggy slot 7 * access will hit slot 3, and slot 3 is UC, so at worst * we lose performance without causing a correctness issue. * Pentium 4 erratum N46 is an example for such an erratum, * although we try not to use PAT at all on affected CPUs. * * PTE encoding: * PAT * |PCD * ||PWT PAT * ||| slot * 000 0 WB : _PAGE_CACHE_MODE_WB * 001 1 WC : _PAGE_CACHE_MODE_WC * 010 2 UC-: _PAGE_CACHE_MODE_UC_MINUS * 011 3 UC : _PAGE_CACHE_MODE_UC * 100 4 WB : Reserved * 101 5 WP : _PAGE_CACHE_MODE_WP * 110 6 UC-: Reserved * 111 7 WT : _PAGE_CACHE_MODE_WT * * The reserved slots are unused, but mapped to their * corresponding types in the presence of PAT errata. */ pat_msr_val = PAT_VALUE(WB, WC, UC_MINUS, UC, WB, WP, UC_MINUS, WT); } memory_caching_control |= CACHE_PAT; init_cache_modes(pat_msr_val); } static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ /* * Does intersection of PAT memory type and MTRR memory type and returns * the resulting memory type as PAT understands it. * (Type in pat and mtrr will not have same value) * The intersection is based on "Effective Memory Type" tables in IA-32 * SDM vol 3a */ static unsigned long pat_x_mtrr_type(u64 start, u64 end, enum page_cache_mode req_type) { /* * Look for MTRR hint to get the effective type in case where PAT * request is for WB. */ if (req_type == _PAGE_CACHE_MODE_WB) { u8 mtrr_type, uniform; mtrr_type = mtrr_type_lookup(start, end, &uniform); if (mtrr_type != MTRR_TYPE_WRBACK) return _PAGE_CACHE_MODE_UC_MINUS; return _PAGE_CACHE_MODE_WB; } return req_type; } struct pagerange_state { unsigned long cur_pfn; int ram; int not_ram; }; static int pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) { struct pagerange_state *state = arg; state->not_ram |= initial_pfn > state->cur_pfn; state->ram |= total_nr_pages > 0; state->cur_pfn = initial_pfn + total_nr_pages; return state->ram && state->not_ram; } static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) { int ret = 0; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; struct pagerange_state state = {start_pfn, 0, 0}; /* * For legacy reasons, physical address range in the legacy ISA * region is tracked as non-RAM. This will allow users of * /dev/mem to map portions of legacy ISA region, even when * some of those portions are listed(or not even listed) with * different e820 types(RAM/reserved/..) */ if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; if (start_pfn < end_pfn) { ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &state, pagerange_is_ram_callback); } return (ret > 0) ? -1 : (state.ram ? 1 : 0); } /* * For RAM pages, we use page flags to mark the pages with appropriate type. * The page flags are limited to four types, WB (default), WC, WT and UC-. * WP request fails with -EINVAL, and UC gets redirected to UC-. Setting * a new memory type is only allowed for a page mapped with the default WB * type. * * Here we do two passes: * - Find the memtype of all the pages in the range, look for any conflicts. * - In case of no conflicts, set the new memtype for pages in the range. */ static int reserve_ram_pages_type(u64 start, u64 end, enum page_cache_mode req_type, enum page_cache_mode *new_type) { struct page *page; u64 pfn; if (req_type == _PAGE_CACHE_MODE_WP) { if (new_type) *new_type = _PAGE_CACHE_MODE_UC_MINUS; return -EINVAL; } if (req_type == _PAGE_CACHE_MODE_UC) { /* We do not support strong UC */ WARN_ON_ONCE(1); req_type = _PAGE_CACHE_MODE_UC_MINUS; } for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { enum page_cache_mode type; page = pfn_to_page(pfn); type = get_page_memtype(page); if (type != _PAGE_CACHE_MODE_WB) { pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n", start, end - 1, type, req_type); if (new_type) *new_type = type; return -EBUSY; } } if (new_type) *new_type = req_type; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, req_type); } return 0; } static int free_ram_pages_type(u64 start, u64 end) { struct page *page; u64 pfn; for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { page = pfn_to_page(pfn); set_page_memtype(page, _PAGE_CACHE_MODE_WB); } return 0; } static u64 sanitize_phys(u64 address) { /* * When changing the memtype for pages containing poison allow * for a "decoy" virtual address (bit 63 clear) passed to * set_memory_X(). __pa() on a "decoy" address results in a * physical address with bit 63 set. * * Decoy addresses are not present for 32-bit builds, see * set_mce_nospec(). */ if (IS_ENABLED(CONFIG_X86_64)) return address & __PHYSICAL_MASK; return address; } /* * req_type typically has one of the: * - _PAGE_CACHE_MODE_WB * - _PAGE_CACHE_MODE_WC * - _PAGE_CACHE_MODE_UC_MINUS * - _PAGE_CACHE_MODE_UC * - _PAGE_CACHE_MODE_WT * * If new_type is NULL, function will return an error if it cannot reserve the * region with req_type. If new_type is non-NULL, function will return * available type in new_type in case of no error. In case of any error * it will return a negative return value. */ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type, enum page_cache_mode *new_type) { struct memtype *entry_new; enum page_cache_mode actual_type; int is_range_ram; int err = 0; start = sanitize_phys(start); /* * The end address passed into this function is exclusive, but * sanitize_phys() expects an inclusive address. */ end = sanitize_phys(end - 1) + 1; if (start >= end) { WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__, start, end - 1, cattr_name(req_type)); return -EINVAL; } if (!pat_enabled()) { /* This is identical to page table setting without PAT */ if (new_type) *new_type = req_type; return 0; } /* Low ISA region is always mapped WB in page table. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) { if (new_type) *new_type = _PAGE_CACHE_MODE_WB; return 0; } /* * Call mtrr_lookup to get the type hint. This is an * optimization for /dev/mem mmap'ers into WB memory (BIOS * tools and ACPI tools). Use WB request for WB memory and use * UC_MINUS otherwise. */ actual_type = pat_x_mtrr_type(start, end, req_type); if (new_type) *new_type = actual_type; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) { err = reserve_ram_pages_type(start, end, req_type, new_type); return err; } else if (is_range_ram < 0) { return -EINVAL; } entry_new = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!entry_new) return -ENOMEM; entry_new->start = start; entry_new->end = end; entry_new->type = actual_type; spin_lock(&memtype_lock); err = memtype_check_insert(entry_new, new_type); if (err) { pr_info("x86/PAT: memtype_reserve failed [mem %#010Lx-%#010Lx], track %s, req %s\n", start, end - 1, cattr_name(entry_new->type), cattr_name(req_type)); kfree(entry_new); spin_unlock(&memtype_lock); return err; } spin_unlock(&memtype_lock); dprintk("memtype_reserve added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", start, end - 1, cattr_name(entry_new->type), cattr_name(req_type), new_type ? cattr_name(*new_type) : "-"); return err; } int memtype_free(u64 start, u64 end) { int is_range_ram; struct memtype *entry_old; if (!pat_enabled()) return 0; start = sanitize_phys(start); end = sanitize_phys(end); /* Low ISA region is always mapped WB. No need to track */ if (x86_platform.is_untracked_pat_range(start, end)) return 0; is_range_ram = pat_pagerange_is_ram(start, end); if (is_range_ram == 1) return free_ram_pages_type(start, end); if (is_range_ram < 0) return -EINVAL; spin_lock(&memtype_lock); entry_old = memtype_erase(start, end); spin_unlock(&memtype_lock); if (IS_ERR(entry_old)) { pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, start, end - 1); return -EINVAL; } kfree(entry_old); dprintk("memtype_free request [mem %#010Lx-%#010Lx]\n", start, end - 1); return 0; } /** * lookup_memtype - Looks up the memory type for a physical address * @paddr: physical address of which memory type needs to be looked up * * Only to be called when PAT is enabled * * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS * or _PAGE_CACHE_MODE_WT. */ static enum page_cache_mode lookup_memtype(u64 paddr) { enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB; struct memtype *entry; if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) return rettype; if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { struct page *page; page = pfn_to_page(paddr >> PAGE_SHIFT); return get_page_memtype(page); } spin_lock(&memtype_lock); entry = memtype_lookup(paddr); if (entry != NULL) rettype = entry->type; else rettype = _PAGE_CACHE_MODE_UC_MINUS; spin_unlock(&memtype_lock); return rettype; } /** * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type * of @pfn cannot be overridden by UC MTRR memory type. * * Only to be called when PAT is enabled. * * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC. * Returns false in other cases. */ bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn) { enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn)); return cm == _PAGE_CACHE_MODE_UC || cm == _PAGE_CACHE_MODE_UC_MINUS || cm == _PAGE_CACHE_MODE_WC; } EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr); /** * memtype_reserve_io - Request a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region * @type: A pointer to memtype, with requested type. On success, requested * or any other compatible type that was available for the region is returned * * On success, returns 0 * On failure, returns non-zero */ int memtype_reserve_io(resource_size_t start, resource_size_t end, enum page_cache_mode *type) { resource_size_t size = end - start; enum page_cache_mode req_type = *type; enum page_cache_mode new_type; int ret; WARN_ON_ONCE(iomem_map_sanity_check(start, size)); ret = memtype_reserve(start, end, req_type, &new_type); if (ret) goto out_err; if (!is_new_memtype_allowed(start, size, req_type, new_type)) goto out_free; if (memtype_kernel_map_sync(start, size, new_type) < 0) goto out_free; *type = new_type; return 0; out_free: memtype_free(start, end); ret = -EBUSY; out_err: return ret; } /** * memtype_free_io - Release a memory type mapping for a region of memory * @start: start (physical address) of the region * @end: end (physical address) of the region */ void memtype_free_io(resource_size_t start, resource_size_t end) { memtype_free(start, end); } #ifdef CONFIG_X86_PAT int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size) { enum page_cache_mode type = _PAGE_CACHE_MODE_WC; return memtype_reserve_io(start, start + size, &type); } EXPORT_SYMBOL(arch_io_reserve_memtype_wc); void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size) { memtype_free_io(start, start + size); } EXPORT_SYMBOL(arch_io_free_memtype_wc); #endif pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) { if (!phys_mem_access_encrypted(pfn << PAGE_SHIFT, size)) vma_prot = pgprot_decrypted(vma_prot); return vma_prot; } #ifdef CONFIG_STRICT_DEVMEM /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { return 1; } #else /* This check is needed to avoid cache aliasing when PAT is enabled */ static inline int range_is_allowed(unsigned long pfn, unsigned long size) { u64 from = ((u64)pfn) << PAGE_SHIFT; u64 to = from + size; u64 cursor = from; if (!pat_enabled()) return 1; while (cursor < to) { if (!devmem_is_allowed(pfn)) return 0; cursor += PAGE_SIZE; pfn++; } return 1; } #endif /* CONFIG_STRICT_DEVMEM */ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_WB; if (!range_is_allowed(pfn, size)) return 0; if (file->f_flags & O_DSYNC) pcm = _PAGE_CACHE_MODE_UC_MINUS; *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | cachemode2protval(pcm)); return 1; } /* * Change the memory type for the physical address range in kernel identity * mapping space if that range is a part of identity map. */ int memtype_kernel_map_sync(u64 base, unsigned long size, enum page_cache_mode pcm) { unsigned long id_sz; if (base > __pa(high_memory-1)) return 0; /* * Some areas in the middle of the kernel identity range * are not mapped, for example the PCI space. */ if (!page_is_ram(base >> PAGE_SHIFT)) return 0; id_sz = (__pa(high_memory-1) <= base + size) ? __pa(high_memory) - base : size; if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) { pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n", current->comm, current->pid, cattr_name(pcm), base, (unsigned long long)(base + size-1)); return -EINVAL; } return 0; } /* * Internal interface to reserve a range of physical memory with prot. * Reserved non RAM regions only and after successful memtype_reserve, * this func also keeps identity mapping (if any) in sync with this new prot. */ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, int strict_prot) { int is_ram = 0; int ret; enum page_cache_mode want_pcm = pgprot2cachemode(*vma_prot); enum page_cache_mode pcm = want_pcm; is_ram = pat_pagerange_is_ram(paddr, paddr + size); /* * reserve_pfn_range() for RAM pages. We do not refcount to keep * track of number of mappings of RAM pages. We can assert that * the type requested matches the type of first page in the range. */ if (is_ram) { if (!pat_enabled()) return 0; pcm = lookup_memtype(paddr); if (want_pcm != pcm) { pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } return 0; } ret = memtype_reserve(paddr, paddr + size, want_pcm, &pcm); if (ret) return ret; if (pcm != want_pcm) { if (strict_prot || !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) { memtype_free(paddr, paddr + size); pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n", current->comm, current->pid, cattr_name(want_pcm), (unsigned long long)paddr, (unsigned long long)(paddr + size - 1), cattr_name(pcm)); return -EINVAL; } /* * We allow returning different type than the one requested in * non strict case. */ *vma_prot = __pgprot((pgprot_val(*vma_prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } if (memtype_kernel_map_sync(paddr, size, pcm) < 0) { memtype_free(paddr, paddr + size); return -EINVAL; } return 0; } /* * Internal interface to free a range of physical memory. * Frees non RAM regions only. */ static void free_pfn_range(u64 paddr, unsigned long size) { int is_ram; is_ram = pat_pagerange_is_ram(paddr, paddr + size); if (is_ram == 0) memtype_free(paddr, paddr + size); } static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, resource_size_t *phys) { struct follow_pfnmap_args args = { .vma = vma, .address = vma->vm_start }; if (follow_pfnmap_start(&args)) return -EINVAL; /* Never return PFNs of anon folios in COW mappings. */ if (!args.special) { follow_pfnmap_end(&args); return -EINVAL; } *prot = pgprot_val(args.pgprot); *phys = (resource_size_t)args.pfn << PAGE_SHIFT; follow_pfnmap_end(&args); return 0; } static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, pgprot_t *pgprot) { unsigned long prot; VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); /* * We need the starting PFN and cachemode used for track_pfn_remap() * that covered the whole VMA. For most mappings, we can obtain that * information from the page tables. For COW mappings, we might now * suddenly have anon folios mapped and follow_phys() will fail. * * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to * detect the PFN. If we need the cachemode as well, we're out of luck * for now and have to fail fork(). */ if (!follow_phys(vma, &prot, paddr)) { if (pgprot) *pgprot = __pgprot(prot); return 0; } if (is_cow_mapping(vma->vm_flags)) { if (pgprot) return -EINVAL; *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; return 0; } WARN_ON_ONCE(1); return -EINVAL; } int track_pfn_copy(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, unsigned long *pfn) { const unsigned long vma_size = src_vma->vm_end - src_vma->vm_start; resource_size_t paddr; pgprot_t pgprot; int rc; if (!(src_vma->vm_flags & VM_PAT)) return 0; /* * Duplicate the PAT information for the dst VMA based on the src * VMA. */ if (get_pat_info(src_vma, &paddr, &pgprot)) return -EINVAL; rc = reserve_pfn_range(paddr, vma_size, &pgprot, 1); if (rc) return rc; /* Reservation for the destination VMA succeeded. */ vm_flags_set(dst_vma, VM_PAT); *pfn = PHYS_PFN(paddr); return 0; } void untrack_pfn_copy(struct vm_area_struct *dst_vma, unsigned long pfn) { untrack_pfn(dst_vma, pfn, dst_vma->vm_end - dst_vma->vm_start, true); /* * Reservation was freed, any copied page tables will get cleaned * up later, but without getting PAT involved again. */ } /* * prot is passed in as a parameter for the new mapping. If the vma has * a linear pfn mapping for the entire range, or no vma is provided, * reserve the entire pfn + size range with single reserve_pfn_range * call. */ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, unsigned long pfn, unsigned long addr, unsigned long size) { resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; enum page_cache_mode pcm; /* reserve the whole chunk starting from paddr */ if (!vma || (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start))) { int ret; ret = reserve_pfn_range(paddr, size, prot, 0); if (ret == 0 && vma) vm_flags_set(vma, VM_PAT); return ret; } if (!pat_enabled()) return 0; /* * For anything smaller than the vma size we set prot based on the * lookup. */ pcm = lookup_memtype(paddr); /* Check memtype for the remaining pages */ while (size > PAGE_SIZE) { size -= PAGE_SIZE; paddr += PAGE_SIZE; if (pcm != lookup_memtype(paddr)) return -EINVAL; } *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); return 0; } void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn) { enum page_cache_mode pcm; if (!pat_enabled()) return; /* Set prot based on lookup */ pcm = lookup_memtype(pfn_t_to_phys(pfn)); *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) | cachemode2protval(pcm)); } /* * untrack_pfn is called while unmapping a pfnmap for a region. * untrack can be called for a specific region indicated by pfn and size or * can be for the entire vma (in which case pfn, size are zero). */ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, bool mm_wr_locked) { resource_size_t paddr; if (vma && !(vma->vm_flags & VM_PAT)) return; /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { if (get_pat_info(vma, &paddr, NULL)) return; size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); if (vma) { if (mm_wr_locked) vm_flags_clear(vma, VM_PAT); else __vm_flags_mod(vma, 0, VM_PAT); } } void untrack_pfn_clear(struct vm_area_struct *vma) { vm_flags_clear(vma, VM_PAT); } pgprot_t pgprot_writecombine(pgprot_t prot) { return __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WC)); } EXPORT_SYMBOL_GPL(pgprot_writecombine); pgprot_t pgprot_writethrough(pgprot_t prot) { return __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WT)); } EXPORT_SYMBOL_GPL(pgprot_writethrough); #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) /* * We are allocating a temporary printout-entry to be passed * between seq_start()/next() and seq_show(): */ static struct memtype *memtype_get_idx(loff_t pos) { struct memtype *entry_print; int ret; entry_print = kzalloc(sizeof(struct memtype), GFP_KERNEL); if (!entry_print) return NULL; spin_lock(&memtype_lock); ret = memtype_copy_nth_element(entry_print, pos); spin_unlock(&memtype_lock); /* Free it on error: */ if (ret) { kfree(entry_print); return NULL; } return entry_print; } static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos == 0) { ++*pos; seq_puts(seq, "PAT memtype list:\n"); } return memtype_get_idx(*pos); } static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { kfree(v); ++*pos; return memtype_get_idx(*pos); } static void memtype_seq_stop(struct seq_file *seq, void *v) { kfree(v); } static int memtype_seq_show(struct seq_file *seq, void *v) { struct memtype *entry_print = (struct memtype *)v; seq_printf(seq, "PAT: [mem 0x%016Lx-0x%016Lx] %s\n", entry_print->start, entry_print->end, cattr_name(entry_print->type)); return 0; } static const struct seq_operations memtype_seq_ops = { .start = memtype_seq_start, .next = memtype_seq_next, .stop = memtype_seq_stop, .show = memtype_seq_show, }; static int memtype_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &memtype_seq_ops); } static const struct file_operations memtype_fops = { .open = memtype_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init pat_memtype_list_init(void) { if (pat_enabled()) { debugfs_create_file("pat_memtype_list", S_IRUSR, arch_debugfs_dir, NULL, &memtype_fops); } return 0; } late_initcall(pat_memtype_list_init); #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */
17 158 142 73 25 7 6 2 1 29 6 25 22 43 45 10 36 7 5 3 28 26 12 15 25 38 34 1 30 29 9 39 19 11 1 9 29 8 45 22 3 12 20 7 17 12 10 33 14 13 12 13 34 30 7 3 13 1 13 1 2 3 4 3 19 3 19 31 116 171 63 1 1 1 2 111 21 148 1 170 56 109 6 25 90 114 114 4 111 25 84 13 110 57 66 90 2 89 1 76 119 35 3 80 62 62 61 36 2 26 2 1 12 9 3 8 3 11 1 9 3 10 2 9 3 6 2 4 10 2 2 2 8 10 2 11 1 12 11 1 11 1 11 1 11 1 10 2 9 3 7 5 11 1 5 6 1 11 1 12 11 1 9 3 10 2 11 1 11 1 12 10 2 7 5 12 3 3 77 79 1 80 53 2 37 13 6 1 5 50 1 5 25 5 23 3 51 4 5 1 3 13 15 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 45 46 45 46 45 45 46 46 46 46 46 46 46 46 46 46 46 27 2 17 37 9 57 55 2 2 55 55 52 6 6 1 6 1 16 15 8 22 15 33 14 46 46 9 9 9 9 9 9 2 7 9 9 7 9 9 9 9 9 9 9 9 9 1 8 9 9 9 3 10 2 124 122 3 79 39 83 64 2 53 66 17 66 1 82 4 79 136 136 135 55 191 193 17 194 135 136 179 38 142 1 26 1 5 1 14 2 2 2 1 1 1 1 13 13 12 13 3 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/mount.h> #include <linux/writeback.h> #include <linux/statfs.h> #include <linux/compat.h> #include <linux/parser.h> #include <linux/ctype.h> #include <linux/namei.h> #include <linux/miscdevice.h> #include <linux/magic.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/crc32c.h> #include <linux/btrfs.h> #include <linux/security.h> #include <linux/fs_parser.h> #include "messages.h" #include "delayed-inode.h" #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "direct-io.h" #include "props.h" #include "xattr.h" #include "bio.h" #include "export.h" #include "compression.h" #include "dev-replace.h" #include "free-space-cache.h" #include "backref.h" #include "space-info.h" #include "sysfs.h" #include "zoned.h" #include "tests/btrfs-tests.h" #include "block-group.h" #include "discard.h" #include "qgroup.h" #include "raid56.h" #include "fs.h" #include "accessors.h" #include "defrag.h" #include "dir-item.h" #include "ioctl.h" #include "scrub.h" #include "verity.h" #include "super.h" #include "extent-tree.h" #define CREATE_TRACE_POINTS #include <trace/events/btrfs.h> static const struct super_operations btrfs_super_ops; static struct file_system_type btrfs_fs_type; static void btrfs_put_super(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); btrfs_info(fs_info, "last unmount of filesystem %pU", fs_info->fs_devices->fsid); close_ctree(fs_info); } /* Store the mount options related information. */ struct btrfs_fs_context { char *subvol_name; u64 subvol_objectid; u64 max_inline; u32 commit_interval; u32 metadata_ratio; u32 thread_pool_size; unsigned long long mount_opt; unsigned long compress_type:4; int compress_level; refcount_t refs; }; enum { Opt_acl, Opt_clear_cache, Opt_commit_interval, Opt_compress, Opt_compress_force, Opt_compress_force_type, Opt_compress_type, Opt_degraded, Opt_device, Opt_fatal_errors, Opt_flushoncommit, Opt_max_inline, Opt_barrier, Opt_datacow, Opt_datasum, Opt_defrag, Opt_discard, Opt_discard_mode, Opt_ratio, Opt_rescan_uuid_tree, Opt_skip_balance, Opt_space_cache, Opt_space_cache_version, Opt_ssd, Opt_ssd_spread, Opt_subvol, Opt_subvol_empty, Opt_subvolid, Opt_thread_pool, Opt_treelog, Opt_user_subvol_rm_allowed, Opt_norecovery, /* Rescue options */ Opt_rescue, Opt_usebackuproot, Opt_nologreplay, /* Debugging options */ Opt_enospc_debug, #ifdef CONFIG_BTRFS_DEBUG Opt_fragment, Opt_fragment_data, Opt_fragment_metadata, Opt_fragment_all, #endif #ifdef CONFIG_BTRFS_FS_REF_VERIFY Opt_ref_verify, #endif Opt_err, }; enum { Opt_fatal_errors_panic, Opt_fatal_errors_bug, }; static const struct constant_table btrfs_parameter_fatal_errors[] = { { "panic", Opt_fatal_errors_panic }, { "bug", Opt_fatal_errors_bug }, {} }; enum { Opt_discard_sync, Opt_discard_async, }; static const struct constant_table btrfs_parameter_discard[] = { { "sync", Opt_discard_sync }, { "async", Opt_discard_async }, {} }; enum { Opt_space_cache_v1, Opt_space_cache_v2, }; static const struct constant_table btrfs_parameter_space_cache[] = { { "v1", Opt_space_cache_v1 }, { "v2", Opt_space_cache_v2 }, {} }; enum { Opt_rescue_usebackuproot, Opt_rescue_nologreplay, Opt_rescue_ignorebadroots, Opt_rescue_ignoredatacsums, Opt_rescue_ignoremetacsums, Opt_rescue_ignoresuperflags, Opt_rescue_parameter_all, }; static const struct constant_table btrfs_parameter_rescue[] = { { "usebackuproot", Opt_rescue_usebackuproot }, { "nologreplay", Opt_rescue_nologreplay }, { "ignorebadroots", Opt_rescue_ignorebadroots }, { "ibadroots", Opt_rescue_ignorebadroots }, { "ignoredatacsums", Opt_rescue_ignoredatacsums }, { "ignoremetacsums", Opt_rescue_ignoremetacsums}, { "ignoresuperflags", Opt_rescue_ignoresuperflags}, { "idatacsums", Opt_rescue_ignoredatacsums }, { "imetacsums", Opt_rescue_ignoremetacsums}, { "isuperflags", Opt_rescue_ignoresuperflags}, { "all", Opt_rescue_parameter_all }, {} }; #ifdef CONFIG_BTRFS_DEBUG enum { Opt_fragment_parameter_data, Opt_fragment_parameter_metadata, Opt_fragment_parameter_all, }; static const struct constant_table btrfs_parameter_fragment[] = { { "data", Opt_fragment_parameter_data }, { "metadata", Opt_fragment_parameter_metadata }, { "all", Opt_fragment_parameter_all }, {} }; #endif static const struct fs_parameter_spec btrfs_fs_parameters[] = { fsparam_flag_no("acl", Opt_acl), fsparam_flag_no("autodefrag", Opt_defrag), fsparam_flag_no("barrier", Opt_barrier), fsparam_flag("clear_cache", Opt_clear_cache), fsparam_u32("commit", Opt_commit_interval), fsparam_flag("compress", Opt_compress), fsparam_string("compress", Opt_compress_type), fsparam_flag("compress-force", Opt_compress_force), fsparam_string("compress-force", Opt_compress_force_type), fsparam_flag_no("datacow", Opt_datacow), fsparam_flag_no("datasum", Opt_datasum), fsparam_flag("degraded", Opt_degraded), fsparam_string("device", Opt_device), fsparam_flag_no("discard", Opt_discard), fsparam_enum("discard", Opt_discard_mode, btrfs_parameter_discard), fsparam_enum("fatal_errors", Opt_fatal_errors, btrfs_parameter_fatal_errors), fsparam_flag_no("flushoncommit", Opt_flushoncommit), fsparam_string("max_inline", Opt_max_inline), fsparam_u32("metadata_ratio", Opt_ratio), fsparam_flag("rescan_uuid_tree", Opt_rescan_uuid_tree), fsparam_flag("skip_balance", Opt_skip_balance), fsparam_flag_no("space_cache", Opt_space_cache), fsparam_enum("space_cache", Opt_space_cache_version, btrfs_parameter_space_cache), fsparam_flag_no("ssd", Opt_ssd), fsparam_flag_no("ssd_spread", Opt_ssd_spread), fsparam_string("subvol", Opt_subvol), fsparam_flag("subvol=", Opt_subvol_empty), fsparam_u64("subvolid", Opt_subvolid), fsparam_u32("thread_pool", Opt_thread_pool), fsparam_flag_no("treelog", Opt_treelog), fsparam_flag("user_subvol_rm_allowed", Opt_user_subvol_rm_allowed), /* Rescue options. */ fsparam_enum("rescue", Opt_rescue, btrfs_parameter_rescue), /* Deprecated, with alias rescue=nologreplay */ __fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL), /* Deprecated, with alias rescue=usebackuproot */ __fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL), /* For compatibility only, alias for "rescue=nologreplay". */ fsparam_flag("norecovery", Opt_norecovery), /* Debugging options. */ fsparam_flag_no("enospc_debug", Opt_enospc_debug), #ifdef CONFIG_BTRFS_DEBUG fsparam_enum("fragment", Opt_fragment, btrfs_parameter_fragment), #endif #ifdef CONFIG_BTRFS_FS_REF_VERIFY fsparam_flag("ref_verify", Opt_ref_verify), #endif {} }; /* No support for restricting writes to btrfs devices yet... */ static inline blk_mode_t btrfs_open_mode(struct fs_context *fc) { return sb_open_mode(fc->sb_flags) & ~BLK_OPEN_RESTRICT_WRITES; } static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct btrfs_fs_context *ctx = fc->fs_private; struct fs_parse_result result; int opt; opt = fs_parse(fc, btrfs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_degraded: btrfs_set_opt(ctx->mount_opt, DEGRADED); break; case Opt_subvol_empty: /* * This exists because we used to allow it on accident, so we're * keeping it to maintain ABI. See 37becec95ac3 ("Btrfs: allow * empty subvol= again"). */ break; case Opt_subvol: kfree(ctx->subvol_name); ctx->subvol_name = kstrdup(param->string, GFP_KERNEL); if (!ctx->subvol_name) return -ENOMEM; break; case Opt_subvolid: ctx->subvol_objectid = result.uint_64; /* subvolid=0 means give me the original fs_tree. */ if (!ctx->subvol_objectid) ctx->subvol_objectid = BTRFS_FS_TREE_OBJECTID; break; case Opt_device: { struct btrfs_device *device; blk_mode_t mode = btrfs_open_mode(fc); mutex_lock(&uuid_mutex); device = btrfs_scan_one_device(param->string, mode, false); mutex_unlock(&uuid_mutex); if (IS_ERR(device)) return PTR_ERR(device); break; } case Opt_datasum: if (result.negated) { btrfs_set_opt(ctx->mount_opt, NODATASUM); } else { btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } break; case Opt_datacow: if (result.negated) { btrfs_clear_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); btrfs_set_opt(ctx->mount_opt, NODATACOW); btrfs_set_opt(ctx->mount_opt, NODATASUM); } else { btrfs_clear_opt(ctx->mount_opt, NODATACOW); } break; case Opt_compress_force: case Opt_compress_force_type: btrfs_set_opt(ctx->mount_opt, FORCE_COMPRESS); fallthrough; case Opt_compress: case Opt_compress_type: /* * Provide the same semantics as older kernels that don't use fs * context, specifying the "compress" option clears * "force-compress" without the need to pass * "compress-force=[no|none]" before specifying "compress". */ if (opt != Opt_compress_force && opt != Opt_compress_force_type) btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); if (opt == Opt_compress || opt == Opt_compress_force) { ctx->compress_type = BTRFS_COMPRESS_ZLIB; ctx->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL; btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (strncmp(param->string, "zlib", 4) == 0) { ctx->compress_type = BTRFS_COMPRESS_ZLIB; ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZLIB, param->string + 4); btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (strncmp(param->string, "lzo", 3) == 0) { ctx->compress_type = BTRFS_COMPRESS_LZO; ctx->compress_level = 0; btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (strncmp(param->string, "zstd", 4) == 0) { ctx->compress_type = BTRFS_COMPRESS_ZSTD; ctx->compress_level = btrfs_compress_str2level(BTRFS_COMPRESS_ZSTD, param->string + 4); btrfs_set_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, NODATACOW); btrfs_clear_opt(ctx->mount_opt, NODATASUM); } else if (strncmp(param->string, "no", 2) == 0) { ctx->compress_level = 0; ctx->compress_type = 0; btrfs_clear_opt(ctx->mount_opt, COMPRESS); btrfs_clear_opt(ctx->mount_opt, FORCE_COMPRESS); } else { btrfs_err(NULL, "unrecognized compression value %s", param->string); return -EINVAL; } break; case Opt_ssd: if (result.negated) { btrfs_set_opt(ctx->mount_opt, NOSSD); btrfs_clear_opt(ctx->mount_opt, SSD); btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); } else { btrfs_set_opt(ctx->mount_opt, SSD); btrfs_clear_opt(ctx->mount_opt, NOSSD); } break; case Opt_ssd_spread: if (result.negated) { btrfs_clear_opt(ctx->mount_opt, SSD_SPREAD); } else { btrfs_set_opt(ctx->mount_opt, SSD); btrfs_set_opt(ctx->mount_opt, SSD_SPREAD); btrfs_clear_opt(ctx->mount_opt, NOSSD); } break; case Opt_barrier: if (result.negated) btrfs_set_opt(ctx->mount_opt, NOBARRIER); else btrfs_clear_opt(ctx->mount_opt, NOBARRIER); break; case Opt_thread_pool: if (result.uint_32 == 0) { btrfs_err(NULL, "invalid value 0 for thread_pool"); return -EINVAL; } ctx->thread_pool_size = result.uint_32; break; case Opt_max_inline: ctx->max_inline = memparse(param->string, NULL); break; case Opt_acl: if (result.negated) { fc->sb_flags &= ~SB_POSIXACL; } else { #ifdef CONFIG_BTRFS_FS_POSIX_ACL fc->sb_flags |= SB_POSIXACL; #else btrfs_err(NULL, "support for ACL not compiled in"); return -EINVAL; #endif } /* * VFS limits the ability to toggle ACL on and off via remount, * despite every file system allowing this. This seems to be * an oversight since we all do, but it'll fail if we're * remounting. So don't set the mask here, we'll check it in * btrfs_reconfigure and do the toggling ourselves. */ if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) fc->sb_flags_mask |= SB_POSIXACL; break; case Opt_treelog: if (result.negated) btrfs_set_opt(ctx->mount_opt, NOTREELOG); else btrfs_clear_opt(ctx->mount_opt, NOTREELOG); break; case Opt_nologreplay: btrfs_warn(NULL, "'nologreplay' is deprecated, use 'rescue=nologreplay' instead"); btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); break; case Opt_norecovery: btrfs_info(NULL, "'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'"); btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); break; case Opt_flushoncommit: if (result.negated) btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT); else btrfs_set_opt(ctx->mount_opt, FLUSHONCOMMIT); break; case Opt_ratio: ctx->metadata_ratio = result.uint_32; break; case Opt_discard: if (result.negated) { btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); btrfs_set_opt(ctx->mount_opt, NODISCARD); } else { btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); } break; case Opt_discard_mode: switch (result.uint_32) { case Opt_discard_sync: btrfs_clear_opt(ctx->mount_opt, DISCARD_ASYNC); btrfs_set_opt(ctx->mount_opt, DISCARD_SYNC); break; case Opt_discard_async: btrfs_clear_opt(ctx->mount_opt, DISCARD_SYNC); btrfs_set_opt(ctx->mount_opt, DISCARD_ASYNC); break; default: btrfs_err(NULL, "unrecognized discard mode value %s", param->key); return -EINVAL; } btrfs_clear_opt(ctx->mount_opt, NODISCARD); break; case Opt_space_cache: if (result.negated) { btrfs_set_opt(ctx->mount_opt, NOSPACECACHE); btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); } else { btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); } break; case Opt_space_cache_version: switch (result.uint_32) { case Opt_space_cache_v1: btrfs_set_opt(ctx->mount_opt, SPACE_CACHE); btrfs_clear_opt(ctx->mount_opt, FREE_SPACE_TREE); break; case Opt_space_cache_v2: btrfs_clear_opt(ctx->mount_opt, SPACE_CACHE); btrfs_set_opt(ctx->mount_opt, FREE_SPACE_TREE); break; default: btrfs_err(NULL, "unrecognized space_cache value %s", param->key); return -EINVAL; } break; case Opt_rescan_uuid_tree: btrfs_set_opt(ctx->mount_opt, RESCAN_UUID_TREE); break; case Opt_clear_cache: btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); break; case Opt_user_subvol_rm_allowed: btrfs_set_opt(ctx->mount_opt, USER_SUBVOL_RM_ALLOWED); break; case Opt_enospc_debug: if (result.negated) btrfs_clear_opt(ctx->mount_opt, ENOSPC_DEBUG); else btrfs_set_opt(ctx->mount_opt, ENOSPC_DEBUG); break; case Opt_defrag: if (result.negated) btrfs_clear_opt(ctx->mount_opt, AUTO_DEFRAG); else btrfs_set_opt(ctx->mount_opt, AUTO_DEFRAG); break; case Opt_usebackuproot: btrfs_warn(NULL, "'usebackuproot' is deprecated, use 'rescue=usebackuproot' instead"); btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); /* If we're loading the backup roots we can't trust the space cache. */ btrfs_set_opt(ctx->mount_opt, CLEAR_CACHE); break; case Opt_skip_balance: btrfs_set_opt(ctx->mount_opt, SKIP_BALANCE); break; case Opt_fatal_errors: switch (result.uint_32) { case Opt_fatal_errors_panic: btrfs_set_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); break; case Opt_fatal_errors_bug: btrfs_clear_opt(ctx->mount_opt, PANIC_ON_FATAL_ERROR); break; default: btrfs_err(NULL, "unrecognized fatal_errors value %s", param->key); return -EINVAL; } break; case Opt_commit_interval: ctx->commit_interval = result.uint_32; if (ctx->commit_interval > BTRFS_WARNING_COMMIT_INTERVAL) { btrfs_warn(NULL, "excessive commit interval %u, use with care", ctx->commit_interval); } if (ctx->commit_interval == 0) ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; break; case Opt_rescue: switch (result.uint_32) { case Opt_rescue_usebackuproot: btrfs_set_opt(ctx->mount_opt, USEBACKUPROOT); break; case Opt_rescue_nologreplay: btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); break; case Opt_rescue_ignorebadroots: btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); break; case Opt_rescue_ignoredatacsums: btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); break; case Opt_rescue_ignoremetacsums: btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); break; case Opt_rescue_ignoresuperflags: btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); break; case Opt_rescue_parameter_all: btrfs_set_opt(ctx->mount_opt, IGNOREDATACSUMS); btrfs_set_opt(ctx->mount_opt, IGNOREMETACSUMS); btrfs_set_opt(ctx->mount_opt, IGNORESUPERFLAGS); btrfs_set_opt(ctx->mount_opt, IGNOREBADROOTS); btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY); break; default: btrfs_info(NULL, "unrecognized rescue option '%s'", param->key); return -EINVAL; } break; #ifdef CONFIG_BTRFS_DEBUG case Opt_fragment: switch (result.uint_32) { case Opt_fragment_parameter_all: btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); break; case Opt_fragment_parameter_metadata: btrfs_set_opt(ctx->mount_opt, FRAGMENT_METADATA); break; case Opt_fragment_parameter_data: btrfs_set_opt(ctx->mount_opt, FRAGMENT_DATA); break; default: btrfs_info(NULL, "unrecognized fragment option '%s'", param->key); return -EINVAL; } break; #endif #ifdef CONFIG_BTRFS_FS_REF_VERIFY case Opt_ref_verify: btrfs_set_opt(ctx->mount_opt, REF_VERIFY); break; #endif default: btrfs_err(NULL, "unrecognized mount option '%s'", param->key); return -EINVAL; } return 0; } /* * Some options only have meaning at mount time and shouldn't persist across * remounts, or be displayed. Clear these at the end of mount and remount code * paths. */ static void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info) { btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT); btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE); btrfs_clear_opt(fs_info->mount_opt, NOSPACECACHE); } static bool check_ro_option(const struct btrfs_fs_info *fs_info, unsigned long long mount_opt, unsigned long long opt, const char *opt_name) { if (mount_opt & opt) { btrfs_err(fs_info, "%s must be used with ro mount option", opt_name); return true; } return false; } bool btrfs_check_options(const struct btrfs_fs_info *info, unsigned long long *mount_opt, unsigned long flags) { bool ret = true; if (!(flags & SB_RDONLY) && (check_ro_option(info, *mount_opt, BTRFS_MOUNT_NOLOGREPLAY, "nologreplay") || check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREBADROOTS, "ignorebadroots") || check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREDATACSUMS, "ignoredatacsums") || check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNOREMETACSUMS, "ignoremetacsums") || check_ro_option(info, *mount_opt, BTRFS_MOUNT_IGNORESUPERFLAGS, "ignoresuperflags"))) ret = false; if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) && !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE) && !btrfs_raw_test_opt(*mount_opt, CLEAR_CACHE)) { btrfs_err(info, "cannot disable free-space-tree"); ret = false; } if (btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE) && !btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) { btrfs_err(info, "cannot disable free-space-tree with block-group-tree feature"); ret = false; } if (btrfs_check_mountopts_zoned(info, mount_opt)) ret = false; if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) { if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { btrfs_info(info, "disk space caching is enabled"); btrfs_warn(info, "space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2"); } if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE)) btrfs_info(info, "using free-space-tree"); } return ret; } /* * This is subtle, we only call this during open_ctree(). We need to pre-load * the mount options with the on-disk settings. Before the new mount API took * effect we would do this on mount and remount. With the new mount API we'll * only do this on the initial mount. * * This isn't a change in behavior, because we're using the current state of the * file system to set the current mount options. If you mounted with special * options to disable these features and then remounted we wouldn't revert the * settings, because mounting without these features cleared the on-disk * settings, so this being called on re-mount is not needed. */ void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info) { if (fs_info->sectorsize < PAGE_SIZE) { btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); if (!btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { btrfs_info(fs_info, "forcing free space tree for sector size %u with page size %lu", fs_info->sectorsize, PAGE_SIZE); btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); } } /* * At this point our mount options are populated, so we only mess with * these settings if we don't have any settings already. */ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE)) return; if (btrfs_is_zoned(fs_info) && btrfs_free_space_cache_v1_active(fs_info)) { btrfs_info(fs_info, "zoned: clearing existing space cache"); btrfs_set_super_cache_generation(fs_info->super_copy, 0); return; } if (btrfs_test_opt(fs_info, SPACE_CACHE)) return; if (btrfs_test_opt(fs_info, NOSPACECACHE)) return; /* * At this point we don't have explicit options set by the user, set * them ourselves based on the state of the file system. */ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); else if (btrfs_free_space_cache_v1_active(fs_info)) btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); } static void set_device_specific_options(struct btrfs_fs_info *fs_info) { if (!btrfs_test_opt(fs_info, NOSSD) && !fs_info->fs_devices->rotating) btrfs_set_opt(fs_info->mount_opt, SSD); /* * For devices supporting discard turn on discard=async automatically, * unless it's already set or disabled. This could be turned off by * nodiscard for the same mount. * * The zoned mode piggy backs on the discard functionality for * resetting a zone. There is no reason to delay the zone reset as it is * fast enough. So, do not enable async discard for zoned mode. */ if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) || btrfs_test_opt(fs_info, DISCARD_ASYNC) || btrfs_test_opt(fs_info, NODISCARD)) && fs_info->fs_devices->discardable && !btrfs_is_zoned(fs_info)) btrfs_set_opt(fs_info->mount_opt, DISCARD_ASYNC); } char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, u64 subvol_objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_root *fs_root = NULL; struct btrfs_root_ref *root_ref; struct btrfs_inode_ref *inode_ref; struct btrfs_key key; struct btrfs_path *path = NULL; char *name = NULL, *ptr; u64 dirid; int len; int ret; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto err; } name = kmalloc(PATH_MAX, GFP_KERNEL); if (!name) { ret = -ENOMEM; goto err; } ptr = name + PATH_MAX - 1; ptr[0] = '\0'; /* * Walk up the subvolume trees in the tree of tree roots by root * backrefs until we hit the top-level subvolume. */ while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) { key.objectid = subvol_objectid; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = (u64)-1; ret = btrfs_search_backwards(root, &key, path); if (ret < 0) { goto err; } else if (ret > 0) { ret = -ENOENT; goto err; } subvol_objectid = key.offset; root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_root_ref); len = btrfs_root_ref_name_len(path->nodes[0], root_ref); ptr -= len + 1; if (ptr < name) { ret = -ENAMETOOLONG; goto err; } read_extent_buffer(path->nodes[0], ptr + 1, (unsigned long)(root_ref + 1), len); ptr[0] = '/'; dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref); btrfs_release_path(path); fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true); if (IS_ERR(fs_root)) { ret = PTR_ERR(fs_root); fs_root = NULL; goto err; } /* * Walk up the filesystem tree by inode refs until we hit the * root directory. */ while (dirid != BTRFS_FIRST_FREE_OBJECTID) { key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; ret = btrfs_search_backwards(fs_root, &key, path); if (ret < 0) { goto err; } else if (ret > 0) { ret = -ENOENT; goto err; } dirid = key.offset; inode_ref = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(path->nodes[0], inode_ref); ptr -= len + 1; if (ptr < name) { ret = -ENAMETOOLONG; goto err; } read_extent_buffer(path->nodes[0], ptr + 1, (unsigned long)(inode_ref + 1), len); ptr[0] = '/'; btrfs_release_path(path); } btrfs_put_root(fs_root); fs_root = NULL; } btrfs_free_path(path); if (ptr == name + PATH_MAX - 1) { name[0] = '/'; name[1] = '\0'; } else { memmove(name, ptr, name + PATH_MAX - ptr); } return name; err: btrfs_put_root(fs_root); btrfs_free_path(path); kfree(name); return ERR_PTR(ret); } static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_dir_item *di; struct btrfs_path *path; struct btrfs_key location; struct fscrypt_str name = FSTR_INIT("default", 7); u64 dir_id; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* * Find the "default" dir item which points to the root item that we * will mount by default if we haven't been given a specific subvolume * to mount. */ dir_id = btrfs_super_root_dir(fs_info->super_copy); di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0); if (IS_ERR(di)) { btrfs_free_path(path); return PTR_ERR(di); } if (!di) { /* * Ok the default dir item isn't there. This is weird since * it's always been there, but don't freak out, just try and * mount the top-level subvolume. */ btrfs_free_path(path); *objectid = BTRFS_FS_TREE_OBJECTID; return 0; } btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); btrfs_free_path(path); *objectid = location.objectid; return 0; } static int btrfs_fill_super(struct super_block *sb, struct btrfs_fs_devices *fs_devices) { struct btrfs_inode *inode; struct btrfs_fs_info *fs_info = btrfs_sb(sb); int err; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = BTRFS_SUPER_MAGIC; sb->s_op = &btrfs_super_ops; sb->s_d_op = &btrfs_dentry_operations; sb->s_export_op = &btrfs_export_ops; #ifdef CONFIG_FS_VERITY sb->s_vop = &btrfs_verityops; #endif sb->s_xattr = btrfs_xattr_handlers; sb->s_time_gran = 1; sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; err = super_setup_bdi(sb); if (err) { btrfs_err(fs_info, "super_setup_bdi failed"); return err; } err = open_ctree(sb, fs_devices); if (err) { btrfs_err(fs_info, "open_ctree failed: %d", err); return err; } inode = btrfs_iget(BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root); if (IS_ERR(inode)) { err = PTR_ERR(inode); btrfs_handle_fs_error(fs_info, err, NULL); goto fail_close; } sb->s_root = d_make_root(&inode->vfs_inode); if (!sb->s_root) { err = -ENOMEM; goto fail_close; } sb->s_flags |= SB_ACTIVE; return 0; fail_close: close_ctree(fs_info); return err; } int btrfs_sync_fs(struct super_block *sb, int wait) { struct btrfs_trans_handle *trans; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root = fs_info->tree_root; trace_btrfs_sync_fs(fs_info, wait); if (!wait) { filemap_flush(fs_info->btree_inode->i_mapping); return 0; } btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { /* no transaction, don't bother */ if (PTR_ERR(trans) == -ENOENT) { /* * Exit unless we have some pending changes * that need to go through commit */ if (!test_bit(BTRFS_FS_NEED_TRANS_COMMIT, &fs_info->flags)) return 0; /* * A non-blocking test if the fs is frozen. We must not * start a new transaction here otherwise a deadlock * happens. The pending operations are delayed to the * next commit after thawing. */ if (sb_start_write_trylock(sb)) sb_end_write(sb); else return 0; trans = btrfs_start_transaction(root, 0); } if (IS_ERR(trans)) return PTR_ERR(trans); } return btrfs_commit_transaction(trans); } static void print_rescue_option(struct seq_file *seq, const char *s, bool *printed) { seq_printf(seq, "%s%s", (*printed) ? ":" : ",rescue=", s); *printed = true; } static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) { struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb); const char *compress_type; const char *subvol_name; bool printed = false; if (btrfs_test_opt(info, DEGRADED)) seq_puts(seq, ",degraded"); if (btrfs_test_opt(info, NODATASUM)) seq_puts(seq, ",nodatasum"); if (btrfs_test_opt(info, NODATACOW)) seq_puts(seq, ",nodatacow"); if (btrfs_test_opt(info, NOBARRIER)) seq_puts(seq, ",nobarrier"); if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) seq_printf(seq, ",max_inline=%llu", info->max_inline); if (info->thread_pool_size != min_t(unsigned long, num_online_cpus() + 2, 8)) seq_printf(seq, ",thread_pool=%u", info->thread_pool_size); if (btrfs_test_opt(info, COMPRESS)) { compress_type = btrfs_compress_type2str(info->compress_type); if (btrfs_test_opt(info, FORCE_COMPRESS)) seq_printf(seq, ",compress-force=%s", compress_type); else seq_printf(seq, ",compress=%s", compress_type); if (info->compress_level) seq_printf(seq, ":%d", info->compress_level); } if (btrfs_test_opt(info, NOSSD)) seq_puts(seq, ",nossd"); if (btrfs_test_opt(info, SSD_SPREAD)) seq_puts(seq, ",ssd_spread"); else if (btrfs_test_opt(info, SSD)) seq_puts(seq, ",ssd"); if (btrfs_test_opt(info, NOTREELOG)) seq_puts(seq, ",notreelog"); if (btrfs_test_opt(info, NOLOGREPLAY)) print_rescue_option(seq, "nologreplay", &printed); if (btrfs_test_opt(info, USEBACKUPROOT)) print_rescue_option(seq, "usebackuproot", &printed); if (btrfs_test_opt(info, IGNOREBADROOTS)) print_rescue_option(seq, "ignorebadroots", &printed); if (btrfs_test_opt(info, IGNOREDATACSUMS)) print_rescue_option(seq, "ignoredatacsums", &printed); if (btrfs_test_opt(info, IGNOREMETACSUMS)) print_rescue_option(seq, "ignoremetacsums", &printed); if (btrfs_test_opt(info, IGNORESUPERFLAGS)) print_rescue_option(seq, "ignoresuperflags", &printed); if (btrfs_test_opt(info, FLUSHONCOMMIT)) seq_puts(seq, ",flushoncommit"); if (btrfs_test_opt(info, DISCARD_SYNC)) seq_puts(seq, ",discard"); if (btrfs_test_opt(info, DISCARD_ASYNC)) seq_puts(seq, ",discard=async"); if (!(info->sb->s_flags & SB_POSIXACL)) seq_puts(seq, ",noacl"); if (btrfs_free_space_cache_v1_active(info)) seq_puts(seq, ",space_cache"); else if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE)) seq_puts(seq, ",space_cache=v2"); else seq_puts(seq, ",nospace_cache"); if (btrfs_test_opt(info, RESCAN_UUID_TREE)) seq_puts(seq, ",rescan_uuid_tree"); if (btrfs_test_opt(info, CLEAR_CACHE)) seq_puts(seq, ",clear_cache"); if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED)) seq_puts(seq, ",user_subvol_rm_allowed"); if (btrfs_test_opt(info, ENOSPC_DEBUG)) seq_puts(seq, ",enospc_debug"); if (btrfs_test_opt(info, AUTO_DEFRAG)) seq_puts(seq, ",autodefrag"); if (btrfs_test_opt(info, SKIP_BALANCE)) seq_puts(seq, ",skip_balance"); if (info->metadata_ratio) seq_printf(seq, ",metadata_ratio=%u", info->metadata_ratio); if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR)) seq_puts(seq, ",fatal_errors=panic"); if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL) seq_printf(seq, ",commit=%u", info->commit_interval); #ifdef CONFIG_BTRFS_DEBUG if (btrfs_test_opt(info, FRAGMENT_DATA)) seq_puts(seq, ",fragment=data"); if (btrfs_test_opt(info, FRAGMENT_METADATA)) seq_puts(seq, ",fragment=metadata"); #endif if (btrfs_test_opt(info, REF_VERIFY)) seq_puts(seq, ",ref_verify"); seq_printf(seq, ",subvolid=%llu", btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); subvol_name = btrfs_get_subvol_name_from_objectid(info, btrfs_root_id(BTRFS_I(d_inode(dentry))->root)); if (!IS_ERR(subvol_name)) { seq_show_option(seq, "subvol", subvol_name); kfree(subvol_name); } return 0; } /* * subvolumes are identified by ino 256 */ static inline int is_subvolume_inode(struct inode *inode) { if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) return 1; return 0; } static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid, struct vfsmount *mnt) { struct dentry *root; int ret; if (!subvol_name) { if (!subvol_objectid) { ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb), &subvol_objectid); if (ret) { root = ERR_PTR(ret); goto out; } } subvol_name = btrfs_get_subvol_name_from_objectid( btrfs_sb(mnt->mnt_sb), subvol_objectid); if (IS_ERR(subvol_name)) { root = ERR_CAST(subvol_name); subvol_name = NULL; goto out; } } root = mount_subtree(mnt, subvol_name); /* mount_subtree() drops our reference on the vfsmount. */ mnt = NULL; if (!IS_ERR(root)) { struct super_block *s = root->d_sb; struct btrfs_fs_info *fs_info = btrfs_sb(s); struct inode *root_inode = d_inode(root); u64 root_objectid = btrfs_root_id(BTRFS_I(root_inode)->root); ret = 0; if (!is_subvolume_inode(root_inode)) { btrfs_err(fs_info, "'%s' is not a valid subvolume", subvol_name); ret = -EINVAL; } if (subvol_objectid && root_objectid != subvol_objectid) { /* * This will also catch a race condition where a * subvolume which was passed by ID is renamed and * another subvolume is renamed over the old location. */ btrfs_err(fs_info, "subvol '%s' does not match subvolid %llu", subvol_name, subvol_objectid); ret = -EINVAL; } if (ret) { dput(root); root = ERR_PTR(ret); deactivate_locked_super(s); } } out: mntput(mnt); kfree(subvol_name); return root; } static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, u32 new_pool_size, u32 old_pool_size) { if (new_pool_size == old_pool_size) return; fs_info->thread_pool_size = new_pool_size; btrfs_info(fs_info, "resize thread pool %d -> %d", old_pool_size, new_pool_size); btrfs_workqueue_set_max(fs_info->workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); workqueue_set_max_active(fs_info->endio_workers, new_pool_size); workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); } static inline void btrfs_remount_begin(struct btrfs_fs_info *fs_info, unsigned long long old_opts, int flags) { if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || (flags & SB_RDONLY))) { /* wait for any defraggers to finish */ wait_event(fs_info->transaction_wait, (atomic_read(&fs_info->defrag_running) == 0)); if (flags & SB_RDONLY) sync_filesystem(fs_info->sb); } } static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, unsigned long long old_opts) { const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); /* * We need to cleanup all defragable inodes if the autodefragment is * close or the filesystem is read only. */ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) { btrfs_cleanup_defrag_inodes(fs_info); } /* If we toggled discard async */ if (!btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && btrfs_test_opt(fs_info, DISCARD_ASYNC)) btrfs_discard_resume(fs_info); else if (btrfs_raw_test_opt(old_opts, DISCARD_ASYNC) && !btrfs_test_opt(fs_info, DISCARD_ASYNC)) btrfs_discard_cleanup(fs_info); /* If we toggled space cache */ if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) btrfs_set_free_space_cache_v1_active(fs_info, cache_opt); } static int btrfs_remount_rw(struct btrfs_fs_info *fs_info) { int ret; if (BTRFS_FS_ERROR(fs_info)) { btrfs_err(fs_info, "remounting read-write after error is not allowed"); return -EINVAL; } if (fs_info->fs_devices->rw_devices == 0) return -EACCES; if (!btrfs_check_rw_degradable(fs_info, NULL)) { btrfs_warn(fs_info, "too many missing devices, writable remount is not allowed"); return -EACCES; } if (btrfs_super_log_root(fs_info->super_copy) != 0) { btrfs_warn(fs_info, "mount required to replay tree-log, cannot remount read-write"); return -EINVAL; } /* * NOTE: when remounting with a change that does writes, don't put it * anywhere above this point, as we are not sure to be safe to write * until we pass the above checks. */ ret = btrfs_start_pre_rw_mount(fs_info); if (ret) return ret; btrfs_clear_sb_rdonly(fs_info->sb); set_bit(BTRFS_FS_OPEN, &fs_info->flags); /* * If we've gone from readonly -> read-write, we need to get our * sync/async discard lists in the right state. */ btrfs_discard_resume(fs_info); return 0; } static int btrfs_remount_ro(struct btrfs_fs_info *fs_info) { /* * This also happens on 'umount -rf' or on shutdown, when the * filesystem is busy. */ cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_data_reclaim_work); btrfs_discard_cleanup(fs_info); /* Wait for the uuid_scan task to finish */ down(&fs_info->uuid_tree_rescan_sem); /* Avoid complains from lockdep et al. */ up(&fs_info->uuid_tree_rescan_sem); btrfs_set_sb_rdonly(fs_info->sb); /* * Setting SB_RDONLY will put the cleaner thread to sleep at the next * loop if it's already active. If it's already asleep, we'll leave * unused block groups on disk until we're mounted read-write again * unless we clean them up here. */ btrfs_delete_unused_bgs(fs_info); /* * The cleaner task could be already running before we set the flag * BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock). We must make * sure that after we finish the remount, i.e. after we call * btrfs_commit_super(), the cleaner can no longer start a transaction * - either because it was dropping a dead root, running delayed iputs * or deleting an unused block group (the cleaner picked a block * group from the list of unused block groups before we were able to * in the previous call to btrfs_delete_unused_bgs()). */ wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING, TASK_UNINTERRUPTIBLE); /* * We've set the superblock to RO mode, so we might have made the * cleaner task sleep without running all pending delayed iputs. Go * through all the delayed iputs here, so that if an unmount happens * without remounting RW we don't end up at finishing close_ctree() * with a non-empty list of delayed iputs. */ btrfs_run_delayed_iputs(fs_info); btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); btrfs_pause_balance(fs_info); /* * Pause the qgroup rescan worker if it is running. We don't want it to * be still running after we are in RO mode, as after that, by the time * we unmount, it might have left a transaction open, so we would leak * the transaction and/or crash. */ btrfs_qgroup_wait_for_completion(fs_info, false); return btrfs_commit_super(fs_info); } static void btrfs_ctx_to_info(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) { fs_info->max_inline = ctx->max_inline; fs_info->commit_interval = ctx->commit_interval; fs_info->metadata_ratio = ctx->metadata_ratio; fs_info->thread_pool_size = ctx->thread_pool_size; fs_info->mount_opt = ctx->mount_opt; fs_info->compress_type = ctx->compress_type; fs_info->compress_level = ctx->compress_level; } static void btrfs_info_to_ctx(struct btrfs_fs_info *fs_info, struct btrfs_fs_context *ctx) { ctx->max_inline = fs_info->max_inline; ctx->commit_interval = fs_info->commit_interval; ctx->metadata_ratio = fs_info->metadata_ratio; ctx->thread_pool_size = fs_info->thread_pool_size; ctx->mount_opt = fs_info->mount_opt; ctx->compress_type = fs_info->compress_type; ctx->compress_level = fs_info->compress_level; } #define btrfs_info_if_set(fs_info, old_ctx, opt, fmt, args...) \ do { \ if ((!old_ctx || !btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ btrfs_info(fs_info, fmt, ##args); \ } while (0) #define btrfs_info_if_unset(fs_info, old_ctx, opt, fmt, args...) \ do { \ if ((old_ctx && btrfs_raw_test_opt(old_ctx->mount_opt, opt)) && \ !btrfs_raw_test_opt(fs_info->mount_opt, opt)) \ btrfs_info(fs_info, fmt, ##args); \ } while (0) static void btrfs_emit_options(struct btrfs_fs_info *info, struct btrfs_fs_context *old) { btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); btrfs_info_if_set(info, old, DEGRADED, "allowing degraded mounts"); btrfs_info_if_set(info, old, NODATASUM, "setting nodatasum"); btrfs_info_if_set(info, old, SSD, "enabling ssd optimizations"); btrfs_info_if_set(info, old, SSD_SPREAD, "using spread ssd allocation scheme"); btrfs_info_if_set(info, old, NOBARRIER, "turning off barriers"); btrfs_info_if_set(info, old, NOTREELOG, "disabling tree log"); btrfs_info_if_set(info, old, NOLOGREPLAY, "disabling log replay at mount time"); btrfs_info_if_set(info, old, FLUSHONCOMMIT, "turning on flush-on-commit"); btrfs_info_if_set(info, old, DISCARD_SYNC, "turning on sync discard"); btrfs_info_if_set(info, old, DISCARD_ASYNC, "turning on async discard"); btrfs_info_if_set(info, old, FREE_SPACE_TREE, "enabling free space tree"); btrfs_info_if_set(info, old, SPACE_CACHE, "enabling disk space caching"); btrfs_info_if_set(info, old, CLEAR_CACHE, "force clearing of disk cache"); btrfs_info_if_set(info, old, AUTO_DEFRAG, "enabling auto defrag"); btrfs_info_if_set(info, old, FRAGMENT_DATA, "fragmenting data"); btrfs_info_if_set(info, old, FRAGMENT_METADATA, "fragmenting metadata"); btrfs_info_if_set(info, old, REF_VERIFY, "doing ref verification"); btrfs_info_if_set(info, old, USEBACKUPROOT, "trying to use backup root at mount time"); btrfs_info_if_set(info, old, IGNOREBADROOTS, "ignoring bad roots"); btrfs_info_if_set(info, old, IGNOREDATACSUMS, "ignoring data csums"); btrfs_info_if_set(info, old, IGNOREMETACSUMS, "ignoring meta csums"); btrfs_info_if_set(info, old, IGNORESUPERFLAGS, "ignoring unknown super block flags"); btrfs_info_if_unset(info, old, NODATACOW, "setting datacow"); btrfs_info_if_unset(info, old, SSD, "not using ssd optimizations"); btrfs_info_if_unset(info, old, SSD_SPREAD, "not using spread ssd allocation scheme"); btrfs_info_if_unset(info, old, NOBARRIER, "turning off barriers"); btrfs_info_if_unset(info, old, NOTREELOG, "enabling tree log"); btrfs_info_if_unset(info, old, SPACE_CACHE, "disabling disk space caching"); btrfs_info_if_unset(info, old, FREE_SPACE_TREE, "disabling free space tree"); btrfs_info_if_unset(info, old, AUTO_DEFRAG, "disabling auto defrag"); btrfs_info_if_unset(info, old, COMPRESS, "use no compression"); /* Did the compression settings change? */ if (btrfs_test_opt(info, COMPRESS) && (!old || old->compress_type != info->compress_type || old->compress_level != info->compress_level || (!btrfs_raw_test_opt(old->mount_opt, FORCE_COMPRESS) && btrfs_raw_test_opt(info->mount_opt, FORCE_COMPRESS)))) { const char *compress_type = btrfs_compress_type2str(info->compress_type); btrfs_info(info, "%s %s compression, level %d", btrfs_test_opt(info, FORCE_COMPRESS) ? "force" : "use", compress_type, info->compress_level); } if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE) btrfs_info(info, "max_inline set to %llu", info->max_inline); } static int btrfs_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_fs_context *ctx = fc->fs_private; struct btrfs_fs_context old_ctx; int ret = 0; bool mount_reconfigure = (fc->s_fs_info != NULL); btrfs_info_to_ctx(fs_info, &old_ctx); /* * This is our "bind mount" trick, we don't want to allow the user to do * anything other than mount a different ro/rw and a different subvol, * all of the mount options should be maintained. */ if (mount_reconfigure) ctx->mount_opt = old_ctx.mount_opt; sync_filesystem(sb); set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); if (!btrfs_check_options(fs_info, &ctx->mount_opt, fc->sb_flags)) return -EINVAL; ret = btrfs_check_features(fs_info, !(fc->sb_flags & SB_RDONLY)); if (ret < 0) return ret; btrfs_ctx_to_info(fs_info, ctx); btrfs_remount_begin(fs_info, old_ctx.mount_opt, fc->sb_flags); btrfs_resize_thread_pool(fs_info, fs_info->thread_pool_size, old_ctx.thread_pool_size); if ((bool)btrfs_test_opt(fs_info, FREE_SPACE_TREE) != (bool)btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && (!sb_rdonly(sb) || (fc->sb_flags & SB_RDONLY))) { btrfs_warn(fs_info, "remount supports changing free space tree only from RO to RW"); /* Make sure free space cache options match the state on disk. */ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { btrfs_set_opt(fs_info->mount_opt, FREE_SPACE_TREE); btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE); } if (btrfs_free_space_cache_v1_active(fs_info)) { btrfs_clear_opt(fs_info->mount_opt, FREE_SPACE_TREE); btrfs_set_opt(fs_info->mount_opt, SPACE_CACHE); } } ret = 0; if (!sb_rdonly(sb) && (fc->sb_flags & SB_RDONLY)) ret = btrfs_remount_ro(fs_info); else if (sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY)) ret = btrfs_remount_rw(fs_info); if (ret) goto restore; /* * If we set the mask during the parameter parsing VFS would reject the * remount. Here we can set the mask and the value will be updated * appropriately. */ if ((fc->sb_flags & SB_POSIXACL) != (sb->s_flags & SB_POSIXACL)) fc->sb_flags_mask |= SB_POSIXACL; btrfs_emit_options(fs_info, &old_ctx); wake_up_process(fs_info->transaction_kthread); btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); btrfs_clear_oneshot_options(fs_info); clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); return 0; restore: btrfs_ctx_to_info(fs_info, &old_ctx); btrfs_remount_cleanup(fs_info, old_ctx.mount_opt); clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state); return ret; } /* Used to sort the devices by max_avail(descending sort) */ static int btrfs_cmp_device_free_bytes(const void *a, const void *b) { const struct btrfs_device_info *dev_info1 = a; const struct btrfs_device_info *dev_info2 = b; if (dev_info1->max_avail > dev_info2->max_avail) return -1; else if (dev_info1->max_avail < dev_info2->max_avail) return 1; return 0; } /* * sort the devices by max_avail, in which max free extent size of each device * is stored.(Descending Sort) */ static inline void btrfs_descending_sort_devices( struct btrfs_device_info *devices, size_t nr_devices) { sort(devices, nr_devices, sizeof(struct btrfs_device_info), btrfs_cmp_device_free_bytes, NULL); } /* * The helper to calc the free space on the devices that can be used to store * file data. */ static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info, u64 *free_bytes) { struct btrfs_device_info *devices_info; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_device *device; u64 type; u64 avail_space; u64 min_stripe_size; int num_stripes = 1; int i = 0, nr_devices; const struct btrfs_raid_attr *rattr; /* * We aren't under the device list lock, so this is racy-ish, but good * enough for our purposes. */ nr_devices = fs_info->fs_devices->open_devices; if (!nr_devices) { smp_mb(); nr_devices = fs_info->fs_devices->open_devices; ASSERT(nr_devices); if (!nr_devices) { *free_bytes = 0; return 0; } } devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), GFP_KERNEL); if (!devices_info) return -ENOMEM; /* calc min stripe number for data space allocation */ type = btrfs_data_alloc_profile(fs_info); rattr = &btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)]; if (type & BTRFS_BLOCK_GROUP_RAID0) num_stripes = nr_devices; else if (type & BTRFS_BLOCK_GROUP_RAID1_MASK) num_stripes = rattr->ncopies; else if (type & BTRFS_BLOCK_GROUP_RAID10) num_stripes = 4; /* Adjust for more than 1 stripe per device */ min_stripe_size = rattr->dev_stripes * BTRFS_STRIPE_LEN; rcu_read_lock(); list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) || !device->bdev || test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) continue; if (i >= nr_devices) break; avail_space = device->total_bytes - device->bytes_used; /* align with stripe_len */ avail_space = rounddown(avail_space, BTRFS_STRIPE_LEN); /* * Ensure we have at least min_stripe_size on top of the * reserved space on the device. */ if (avail_space <= BTRFS_DEVICE_RANGE_RESERVED + min_stripe_size) continue; avail_space -= BTRFS_DEVICE_RANGE_RESERVED; devices_info[i].dev = device; devices_info[i].max_avail = avail_space; i++; } rcu_read_unlock(); nr_devices = i; btrfs_descending_sort_devices(devices_info, nr_devices); i = nr_devices - 1; avail_space = 0; while (nr_devices >= rattr->devs_min) { num_stripes = min(num_stripes, nr_devices); if (devices_info[i].max_avail >= min_stripe_size) { int j; u64 alloc_size; avail_space += devices_info[i].max_avail * num_stripes; alloc_size = devices_info[i].max_avail; for (j = i + 1 - num_stripes; j <= i; j++) devices_info[j].max_avail -= alloc_size; } i--; nr_devices--; } kfree(devices_info); *free_bytes = avail_space; return 0; } /* * Calculate numbers for 'df', pessimistic in case of mixed raid profiles. * * If there's a redundant raid level at DATA block groups, use the respective * multiplier to scale the sizes. * * Unused device space usage is based on simulating the chunk allocator * algorithm that respects the device sizes and order of allocations. This is * a close approximation of the actual use but there are other factors that may * change the result (like a new metadata chunk). * * If metadata is exhausted, f_bavail will be 0. */ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); struct btrfs_super_block *disk_super = fs_info->super_copy; struct btrfs_space_info *found; u64 total_used = 0; u64 total_free_data = 0; u64 total_free_meta = 0; u32 bits = fs_info->sectorsize_bits; __be32 *fsid = (__be32 *)fs_info->fs_devices->fsid; unsigned factor = 1; struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; int ret; u64 thresh = 0; int mixed = 0; list_for_each_entry(found, &fs_info->space_info, list) { if (found->flags & BTRFS_BLOCK_GROUP_DATA) { int i; total_free_data += found->disk_total - found->disk_used; total_free_data -= btrfs_account_ro_block_groups_free_space(found); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { if (!list_empty(&found->block_groups[i])) factor = btrfs_bg_type_to_factor( btrfs_raid_array[i].bg_flag); } } /* * Metadata in mixed block group profiles are accounted in data */ if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) { if (found->flags & BTRFS_BLOCK_GROUP_DATA) mixed = 1; else total_free_meta += found->disk_total - found->disk_used; } total_used += found->disk_used; } buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor); buf->f_blocks >>= bits; buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits); /* Account global block reserve as used, it's in logical size already */ spin_lock(&block_rsv->lock); /* Mixed block groups accounting is not byte-accurate, avoid overflow */ if (buf->f_bfree >= block_rsv->size >> bits) buf->f_bfree -= block_rsv->size >> bits; else buf->f_bfree = 0; spin_unlock(&block_rsv->lock); buf->f_bavail = div_u64(total_free_data, factor); ret = btrfs_calc_avail_data_space(fs_info, &total_free_data); if (ret) return ret; buf->f_bavail += div_u64(total_free_data, factor); buf->f_bavail = buf->f_bavail >> bits; /* * We calculate the remaining metadata space minus global reserve. If * this is (supposedly) smaller than zero, there's no space. But this * does not hold in practice, the exhausted state happens where's still * some positive delta. So we apply some guesswork and compare the * delta to a 4M threshold. (Practically observed delta was ~2M.) * * We probably cannot calculate the exact threshold value because this * depends on the internal reservations requested by various * operations, so some operations that consume a few metadata will * succeed even if the Avail is zero. But this is better than the other * way around. */ thresh = SZ_4M; /* * We only want to claim there's no available space if we can no longer * allocate chunks for our metadata profile and our global reserve will * not fit in the free metadata space. If we aren't ->full then we * still can allocate chunks and thus are fine using the currently * calculated f_bavail. */ if (!mixed && block_rsv->space_info->full && (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size)) buf->f_bavail = 0; buf->f_type = BTRFS_SUPER_MAGIC; buf->f_bsize = fs_info->sectorsize; buf->f_namelen = BTRFS_NAME_LEN; /* We treat it as constant endianness (it doesn't matter _which_) because we want the fsid to come out the same whether mounted on a big-endian or little-endian host */ buf->f_fsid.val[0] = be32_to_cpu(fsid[0]) ^ be32_to_cpu(fsid[2]); buf->f_fsid.val[1] = be32_to_cpu(fsid[1]) ^ be32_to_cpu(fsid[3]); /* Mask in the root object ID too, to disambiguate subvols */ buf->f_fsid.val[0] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root) >> 32; buf->f_fsid.val[1] ^= btrfs_root_id(BTRFS_I(d_inode(dentry))->root); return 0; } static int btrfs_fc_test_super(struct super_block *sb, struct fs_context *fc) { struct btrfs_fs_info *p = fc->s_fs_info; struct btrfs_fs_info *fs_info = btrfs_sb(sb); return fs_info->fs_devices == p->fs_devices; } static int btrfs_get_tree_super(struct fs_context *fc) { struct btrfs_fs_info *fs_info = fc->s_fs_info; struct btrfs_fs_context *ctx = fc->fs_private; struct btrfs_fs_devices *fs_devices = NULL; struct block_device *bdev; struct btrfs_device *device; struct super_block *sb; blk_mode_t mode = btrfs_open_mode(fc); int ret; btrfs_ctx_to_info(fs_info, ctx); mutex_lock(&uuid_mutex); /* * With 'true' passed to btrfs_scan_one_device() (mount time) we expect * either a valid device or an error. */ device = btrfs_scan_one_device(fc->source, mode, true); ASSERT(device != NULL); if (IS_ERR(device)) { mutex_unlock(&uuid_mutex); return PTR_ERR(device); } fs_devices = device->fs_devices; fs_info->fs_devices = fs_devices; ret = btrfs_open_devices(fs_devices, mode, &btrfs_fs_type); mutex_unlock(&uuid_mutex); if (ret) return ret; if (!(fc->sb_flags & SB_RDONLY) && fs_devices->rw_devices == 0) { ret = -EACCES; goto error; } bdev = fs_devices->latest_dev->bdev; /* * From now on the error handling is not straightforward. * * If successful, this will transfer the fs_info into the super block, * and fc->s_fs_info will be NULL. However if there's an existing * super, we'll still have fc->s_fs_info populated. If we error * completely out it'll be cleaned up when we drop the fs_context, * otherwise it's tied to the lifetime of the super_block. */ sb = sget_fc(fc, btrfs_fc_test_super, set_anon_super_fc); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto error; } set_device_specific_options(fs_info); if (sb->s_root) { btrfs_close_devices(fs_devices); /* * At this stage we may have RO flag mismatch between * fc->sb_flags and sb->s_flags. Caller should detect such * mismatch and reconfigure with sb->s_umount rwsem held if * needed. */ } else { snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev); shrinker_debugfs_rename(sb->s_shrink, "sb-btrfs:%s", sb->s_id); btrfs_sb(sb)->bdev_holder = &btrfs_fs_type; ret = btrfs_fill_super(sb, fs_devices); if (ret) { deactivate_locked_super(sb); return ret; } } btrfs_clear_oneshot_options(fs_info); fc->root = dget(sb->s_root); return 0; error: btrfs_close_devices(fs_devices); return ret; } /* * Ever since commit 0723a0473fb4 ("btrfs: allow mounting btrfs subvolumes * with different ro/rw options") the following works: * * (i) mount /dev/sda3 -o subvol=foo,ro /mnt/foo * (ii) mount /dev/sda3 -o subvol=bar,rw /mnt/bar * * which looks nice and innocent but is actually pretty intricate and deserves * a long comment. * * On another filesystem a subvolume mount is close to something like: * * (iii) # create rw superblock + initial mount * mount -t xfs /dev/sdb /opt/ * * # create ro bind mount * mount --bind -o ro /opt/foo /mnt/foo * * # unmount initial mount * umount /opt * * Of course, there's some special subvolume sauce and there's the fact that the * sb->s_root dentry is really swapped after mount_subtree(). But conceptually * it's very close and will help us understand the issue. * * The old mount API didn't cleanly distinguish between a mount being made ro * and a superblock being made ro. The only way to change the ro state of * either object was by passing ms_rdonly. If a new mount was created via * mount(2) such as: * * mount("/dev/sdb", "/mnt", "xfs", ms_rdonly, null); * * the MS_RDONLY flag being specified had two effects: * * (1) MNT_READONLY was raised -> the resulting mount got * @mnt->mnt_flags |= MNT_READONLY raised. * * (2) MS_RDONLY was passed to the filesystem's mount method and the filesystems * made the superblock ro. Note, how SB_RDONLY has the same value as * ms_rdonly and is raised whenever MS_RDONLY is passed through mount(2). * * Creating a subtree mount via (iii) ends up leaving a rw superblock with a * subtree mounted ro. * * But consider the effect on the old mount API on btrfs subvolume mounting * which combines the distinct step in (iii) into a single step. * * By issuing (i) both the mount and the superblock are turned ro. Now when (ii) * is issued the superblock is ro and thus even if the mount created for (ii) is * rw it wouldn't help. Hence, btrfs needed to transition the superblock from ro * to rw for (ii) which it did using an internal remount call. * * IOW, subvolume mounting was inherently complicated due to the ambiguity of * MS_RDONLY in mount(2). Note, this ambiguity has mount(8) always translate * "ro" to MS_RDONLY. IOW, in both (i) and (ii) "ro" becomes MS_RDONLY when * passed by mount(8) to mount(2). * * Enter the new mount API. The new mount API disambiguates making a mount ro * and making a superblock ro. * * (3) To turn a mount ro the MOUNT_ATTR_ONLY flag can be used with either * fsmount() or mount_setattr() this is a pure VFS level change for a * specific mount or mount tree that is never seen by the filesystem itself. * * (4) To turn a superblock ro the "ro" flag must be used with * fsconfig(FSCONFIG_SET_FLAG, "ro"). This option is seen by the filesystem * in fc->sb_flags. * * But, currently the util-linux mount command already utilizes the new mount * API and is still setting fsconfig(FSCONFIG_SET_FLAG, "ro") no matter if it's * btrfs or not, setting the whole super block RO. To make per-subvolume mounting * work with different options work we need to keep backward compatibility. */ static int btrfs_reconfigure_for_mount(struct fs_context *fc, struct vfsmount *mnt) { int ret = 0; if (fc->sb_flags & SB_RDONLY) return ret; down_write(&mnt->mnt_sb->s_umount); if (!(fc->sb_flags & SB_RDONLY) && (mnt->mnt_sb->s_flags & SB_RDONLY)) ret = btrfs_reconfigure(fc); up_write(&mnt->mnt_sb->s_umount); return ret; } static int btrfs_get_tree_subvol(struct fs_context *fc) { struct btrfs_fs_info *fs_info = NULL; struct btrfs_fs_context *ctx = fc->fs_private; struct fs_context *dup_fc; struct dentry *dentry; struct vfsmount *mnt; int ret = 0; /* * Setup a dummy root and fs_info for test/set super. This is because * we don't actually fill this stuff out until open_ctree, but we need * then open_ctree will properly initialize the file system specific * settings later. btrfs_init_fs_info initializes the static elements * of the fs_info (locks and such) to make cleanup easier if we find a * superblock with our given fs_devices later on at sget() time. */ fs_info = kvzalloc(sizeof(struct btrfs_fs_info), GFP_KERNEL); if (!fs_info) return -ENOMEM; fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_KERNEL); if (!fs_info->super_copy || !fs_info->super_for_commit) { btrfs_free_fs_info(fs_info); return -ENOMEM; } btrfs_init_fs_info(fs_info); dup_fc = vfs_dup_fs_context(fc); if (IS_ERR(dup_fc)) { btrfs_free_fs_info(fs_info); return PTR_ERR(dup_fc); } /* * When we do the sget_fc this gets transferred to the sb, so we only * need to set it on the dup_fc as this is what creates the super block. */ dup_fc->s_fs_info = fs_info; /* * We'll do the security settings in our btrfs_get_tree_super() mount * loop, they were duplicated into dup_fc, we can drop the originals * here. */ security_free_mnt_opts(&fc->security); fc->security = NULL; mnt = fc_mount(dup_fc); if (IS_ERR(mnt)) { put_fs_context(dup_fc); return PTR_ERR(mnt); } ret = btrfs_reconfigure_for_mount(dup_fc, mnt); put_fs_context(dup_fc); if (ret) { mntput(mnt); return ret; } /* * This free's ->subvol_name, because if it isn't set we have to * allocate a buffer to hold the subvol_name, so we just drop our * reference to it here. */ dentry = mount_subvol(ctx->subvol_name, ctx->subvol_objectid, mnt); ctx->subvol_name = NULL; if (IS_ERR(dentry)) return PTR_ERR(dentry); fc->root = dentry; return 0; } static int btrfs_get_tree(struct fs_context *fc) { /* * Since we use mount_subtree to mount the default/specified subvol, we * have to do mounts in two steps. * * First pass through we call btrfs_get_tree_subvol(), this is just a * wrapper around fc_mount() to call back into here again, and this time * we'll call btrfs_get_tree_super(). This will do the open_ctree() and * everything to open the devices and file system. Then we return back * with a fully constructed vfsmount in btrfs_get_tree_subvol(), and * from there we can do our mount_subvol() call, which will lookup * whichever subvol we're mounting and setup this fc with the * appropriate dentry for the subvol. */ if (fc->s_fs_info) return btrfs_get_tree_super(fc); return btrfs_get_tree_subvol(fc); } static void btrfs_kill_super(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); kill_anon_super(sb); btrfs_free_fs_info(fs_info); } static void btrfs_free_fs_context(struct fs_context *fc) { struct btrfs_fs_context *ctx = fc->fs_private; struct btrfs_fs_info *fs_info = fc->s_fs_info; if (fs_info) btrfs_free_fs_info(fs_info); if (ctx && refcount_dec_and_test(&ctx->refs)) { kfree(ctx->subvol_name); kfree(ctx); } } static int btrfs_dup_fs_context(struct fs_context *fc, struct fs_context *src_fc) { struct btrfs_fs_context *ctx = src_fc->fs_private; /* * Give a ref to our ctx to this dup, as we want to keep it around for * our original fc so we can have the subvolume name or objectid. * * We unset ->source in the original fc because the dup needs it for * mounting, and then once we free the dup it'll free ->source, so we * need to make sure we're only pointing to it in one fc. */ refcount_inc(&ctx->refs); fc->fs_private = ctx; fc->source = src_fc->source; src_fc->source = NULL; return 0; } static const struct fs_context_operations btrfs_fs_context_ops = { .parse_param = btrfs_parse_param, .reconfigure = btrfs_reconfigure, .get_tree = btrfs_get_tree, .dup = btrfs_dup_fs_context, .free = btrfs_free_fs_context, }; static int btrfs_init_fs_context(struct fs_context *fc) { struct btrfs_fs_context *ctx; ctx = kzalloc(sizeof(struct btrfs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; refcount_set(&ctx->refs, 1); fc->fs_private = ctx; fc->ops = &btrfs_fs_context_ops; if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { btrfs_info_to_ctx(btrfs_sb(fc->root->d_sb), ctx); } else { ctx->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); ctx->max_inline = BTRFS_DEFAULT_MAX_INLINE; ctx->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; } #ifdef CONFIG_BTRFS_FS_POSIX_ACL fc->sb_flags |= SB_POSIXACL; #endif fc->sb_flags |= SB_I_VERSION; return 0; } static struct file_system_type btrfs_fs_type = { .owner = THIS_MODULE, .name = "btrfs", .init_fs_context = btrfs_init_fs_context, .parameters = btrfs_fs_parameters, .kill_sb = btrfs_kill_super, .fs_flags = FS_REQUIRES_DEV | FS_BINARY_MOUNTDATA | FS_ALLOW_IDMAP | FS_MGTIME, }; MODULE_ALIAS_FS("btrfs"); static int btrfs_control_open(struct inode *inode, struct file *file) { /* * The control file's private_data is used to hold the * transaction when it is started and is used to keep * track of whether a transaction is already in progress. */ file->private_data = NULL; return 0; } /* * Used by /dev/btrfs-control for devices ioctls. */ static long btrfs_control_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct btrfs_ioctl_vol_args *vol; struct btrfs_device *device = NULL; dev_t devt = 0; int ret = -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vol = memdup_user((void __user *)arg, sizeof(*vol)); if (IS_ERR(vol)) return PTR_ERR(vol); ret = btrfs_check_ioctl_vol_args_path(vol); if (ret < 0) goto out; switch (cmd) { case BTRFS_IOC_SCAN_DEV: mutex_lock(&uuid_mutex); /* * Scanning outside of mount can return NULL which would turn * into 0 error code. */ device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); ret = PTR_ERR_OR_ZERO(device); mutex_unlock(&uuid_mutex); break; case BTRFS_IOC_FORGET_DEV: if (vol->name[0] != 0) { ret = lookup_bdev(vol->name, &devt); if (ret) break; } ret = btrfs_forget_devices(devt); break; case BTRFS_IOC_DEVICES_READY: mutex_lock(&uuid_mutex); /* * Scanning outside of mount can return NULL which would turn * into 0 error code. */ device = btrfs_scan_one_device(vol->name, BLK_OPEN_READ, false); if (IS_ERR_OR_NULL(device)) { mutex_unlock(&uuid_mutex); if (IS_ERR(device)) ret = PTR_ERR(device); else ret = 0; break; } ret = !(device->fs_devices->num_devices == device->fs_devices->total_devices); mutex_unlock(&uuid_mutex); break; case BTRFS_IOC_GET_SUPPORTED_FEATURES: ret = btrfs_ioctl_get_supported_features((void __user*)arg); break; } out: kfree(vol); return ret; } static int btrfs_freeze(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); set_bit(BTRFS_FS_FROZEN, &fs_info->flags); /* * We don't need a barrier here, we'll wait for any transaction that * could be in progress on other threads (and do delayed iputs that * we want to avoid on a frozen filesystem), or do the commit * ourselves. */ return btrfs_commit_current_transaction(fs_info->tree_root); } static int check_dev_super(struct btrfs_device *dev) { struct btrfs_fs_info *fs_info = dev->fs_info; struct btrfs_super_block *sb; u64 last_trans; u16 csum_type; int ret = 0; /* This should be called with fs still frozen. */ ASSERT(test_bit(BTRFS_FS_FROZEN, &fs_info->flags)); /* Missing dev, no need to check. */ if (!dev->bdev) return 0; /* Only need to check the primary super block. */ sb = btrfs_read_dev_one_super(dev->bdev, 0, true); if (IS_ERR(sb)) return PTR_ERR(sb); /* Verify the checksum. */ csum_type = btrfs_super_csum_type(sb); if (csum_type != btrfs_super_csum_type(fs_info->super_copy)) { btrfs_err(fs_info, "csum type changed, has %u expect %u", csum_type, btrfs_super_csum_type(fs_info->super_copy)); ret = -EUCLEAN; goto out; } if (btrfs_check_super_csum(fs_info, sb)) { btrfs_err(fs_info, "csum for on-disk super block no longer matches"); ret = -EUCLEAN; goto out; } /* Btrfs_validate_super() includes fsid check against super->fsid. */ ret = btrfs_validate_super(fs_info, sb, 0); if (ret < 0) goto out; last_trans = btrfs_get_last_trans_committed(fs_info); if (btrfs_super_generation(sb) != last_trans) { btrfs_err(fs_info, "transid mismatch, has %llu expect %llu", btrfs_super_generation(sb), last_trans); ret = -EUCLEAN; goto out; } out: btrfs_release_disk_super(sb); return ret; } static int btrfs_unfreeze(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_device *device; int ret = 0; /* * Make sure the fs is not changed by accident (like hibernation then * modified by other OS). * If we found anything wrong, we mark the fs error immediately. * * And since the fs is frozen, no one can modify the fs yet, thus * we don't need to hold device_list_mutex. */ list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { ret = check_dev_super(device); if (ret < 0) { btrfs_handle_fs_error(fs_info, ret, "super block on devid %llu got modified unexpectedly", device->devid); break; } } clear_bit(BTRFS_FS_FROZEN, &fs_info->flags); /* * We still return 0, to allow VFS layer to unfreeze the fs even the * above checks failed. Since the fs is either fine or read-only, we're * safe to continue, without causing further damage. */ return 0; } static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); /* * There should be always a valid pointer in latest_dev, it may be stale * for a short moment in case it's being deleted but still valid until * the end of RCU grace period. */ rcu_read_lock(); seq_escape(m, btrfs_dev_name(fs_info->fs_devices->latest_dev), " \t\n\\"); rcu_read_unlock(); return 0; } static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_control *sc) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); const s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); trace_btrfs_extent_map_shrinker_count(fs_info, nr); return nr; } static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc) { const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan); struct btrfs_fs_info *fs_info = btrfs_sb(sb); btrfs_free_extent_maps(fs_info, nr_to_scan); /* The extent map shrinker runs asynchronously, so always return 0. */ return 0; } static const struct super_operations btrfs_super_ops = { .drop_inode = btrfs_drop_inode, .evict_inode = btrfs_evict_inode, .put_super = btrfs_put_super, .sync_fs = btrfs_sync_fs, .show_options = btrfs_show_options, .show_devname = btrfs_show_devname, .alloc_inode = btrfs_alloc_inode, .destroy_inode = btrfs_destroy_inode, .free_inode = btrfs_free_inode, .statfs = btrfs_statfs, .freeze_fs = btrfs_freeze, .unfreeze_fs = btrfs_unfreeze, .nr_cached_objects = btrfs_nr_cached_objects, .free_cached_objects = btrfs_free_cached_objects, }; static const struct file_operations btrfs_ctl_fops = { .open = btrfs_control_open, .unlocked_ioctl = btrfs_control_ioctl, .compat_ioctl = compat_ptr_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice btrfs_misc = { .minor = BTRFS_MINOR, .name = "btrfs-control", .fops = &btrfs_ctl_fops }; MODULE_ALIAS_MISCDEV(BTRFS_MINOR); MODULE_ALIAS("devname:btrfs-control"); static int __init btrfs_interface_init(void) { return misc_register(&btrfs_misc); } static __cold void btrfs_interface_exit(void) { misc_deregister(&btrfs_misc); } static int __init btrfs_print_mod_info(void) { static const char options[] = "" #ifdef CONFIG_BTRFS_EXPERIMENTAL ", experimental=on" #endif #ifdef CONFIG_BTRFS_DEBUG ", debug=on" #endif #ifdef CONFIG_BTRFS_ASSERT ", assert=on" #endif #ifdef CONFIG_BTRFS_FS_REF_VERIFY ", ref-verify=on" #endif #ifdef CONFIG_BLK_DEV_ZONED ", zoned=yes" #else ", zoned=no" #endif #ifdef CONFIG_FS_VERITY ", fsverity=yes" #else ", fsverity=no" #endif ; #ifdef CONFIG_BTRFS_EXPERIMENTAL if (btrfs_get_mod_read_policy() == NULL) pr_info("Btrfs loaded%s\n", options); else pr_info("Btrfs loaded%s, read_policy=%s\n", options, btrfs_get_mod_read_policy()); #else pr_info("Btrfs loaded%s\n", options); #endif return 0; } static int register_btrfs(void) { return register_filesystem(&btrfs_fs_type); } static void unregister_btrfs(void) { unregister_filesystem(&btrfs_fs_type); } /* Helper structure for long init/exit functions. */ struct init_sequence { int (*init_func)(void); /* Can be NULL if the init_func doesn't need cleanup. */ void (*exit_func)(void); }; static const struct init_sequence mod_init_seq[] = { { .init_func = btrfs_props_init, .exit_func = NULL, }, { .init_func = btrfs_init_sysfs, .exit_func = btrfs_exit_sysfs, }, { .init_func = btrfs_init_compress, .exit_func = btrfs_exit_compress, }, { .init_func = btrfs_init_cachep, .exit_func = btrfs_destroy_cachep, }, { .init_func = btrfs_init_dio, .exit_func = btrfs_destroy_dio, }, { .init_func = btrfs_transaction_init, .exit_func = btrfs_transaction_exit, }, { .init_func = btrfs_ctree_init, .exit_func = btrfs_ctree_exit, }, { .init_func = btrfs_free_space_init, .exit_func = btrfs_free_space_exit, }, { .init_func = extent_state_init_cachep, .exit_func = extent_state_free_cachep, }, { .init_func = extent_buffer_init_cachep, .exit_func = extent_buffer_free_cachep, }, { .init_func = btrfs_bioset_init, .exit_func = btrfs_bioset_exit, }, { .init_func = extent_map_init, .exit_func = extent_map_exit, #ifdef CONFIG_BTRFS_EXPERIMENTAL }, { .init_func = btrfs_read_policy_init, .exit_func = NULL, #endif }, { .init_func = ordered_data_init, .exit_func = ordered_data_exit, }, { .init_func = btrfs_delayed_inode_init, .exit_func = btrfs_delayed_inode_exit, }, { .init_func = btrfs_auto_defrag_init, .exit_func = btrfs_auto_defrag_exit, }, { .init_func = btrfs_delayed_ref_init, .exit_func = btrfs_delayed_ref_exit, }, { .init_func = btrfs_prelim_ref_init, .exit_func = btrfs_prelim_ref_exit, }, { .init_func = btrfs_interface_init, .exit_func = btrfs_interface_exit, }, { .init_func = btrfs_print_mod_info, .exit_func = NULL, }, { .init_func = btrfs_run_sanity_tests, .exit_func = NULL, }, { .init_func = register_btrfs, .exit_func = unregister_btrfs, } }; static bool mod_init_result[ARRAY_SIZE(mod_init_seq)]; static __always_inline void btrfs_exit_btrfs_fs(void) { int i; for (i = ARRAY_SIZE(mod_init_seq) - 1; i >= 0; i--) { if (!mod_init_result[i]) continue; if (mod_init_seq[i].exit_func) mod_init_seq[i].exit_func(); mod_init_result[i] = false; } } static void __exit exit_btrfs_fs(void) { btrfs_exit_btrfs_fs(); btrfs_cleanup_fs_uuids(); } static int __init init_btrfs_fs(void) { int ret; int i; for (i = 0; i < ARRAY_SIZE(mod_init_seq); i++) { ASSERT(!mod_init_result[i]); ret = mod_init_seq[i].init_func(); if (ret < 0) { btrfs_exit_btrfs_fs(); return ret; } mod_init_result[i] = true; } return 0; } late_initcall(init_btrfs_fs); module_exit(exit_btrfs_fs) MODULE_DESCRIPTION("B-Tree File System (BTRFS)"); MODULE_LICENSE("GPL"); MODULE_SOFTDEP("pre: crc32c"); MODULE_SOFTDEP("pre: xxhash64"); MODULE_SOFTDEP("pre: sha256"); MODULE_SOFTDEP("pre: blake2b-256");
11 3 8 9 2 7 7 7 1 2 2 4 22 22 22 10 3 5 4 1 1 3 3 21 4 17 18 2 1 17 2 2 8 3 1 4 2 1 2 1 2 1 1 1 1 4 1 1 2 2 37 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2006-2010 Patrick McHardy <kaber@trash.net> */ #include <linux/types.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <linux/in.h> #include <linux/icmp.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/checksum.h> #include <linux/netfilter_ipv4.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" static const unsigned int nf_ct_icmp_timeout = 30*HZ; bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple) { const struct icmphdr *hp; struct icmphdr _hdr; hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hp == NULL) return false; tuple->dst.u.icmp.type = hp->type; tuple->src.u.icmp.id = hp->un.echo.id; tuple->dst.u.icmp.code = hp->code; return true; } /* Add 1; spaces filled with 0. */ static const u_int8_t invmap[] = { [ICMP_ECHO] = ICMP_ECHOREPLY + 1, [ICMP_ECHOREPLY] = ICMP_ECHO + 1, [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1, [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1, [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1, [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1, [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1, [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1 }; bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { if (orig->dst.u.icmp.type >= sizeof(invmap) || !invmap[orig->dst.u.icmp.type]) return false; tuple->src.u.icmp.id = orig->src.u.icmp.id; tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1; tuple->dst.u.icmp.code = orig->dst.u.icmp.code; return true; } /* Returns verdict for packet, or -1 for invalid. */ int nf_conntrack_icmp_packet(struct nf_conn *ct, struct sk_buff *skb, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ unsigned int *timeout = nf_ct_timeout_lookup(ct); static const u_int8_t valid_new[] = { [ICMP_ECHO] = 1, [ICMP_TIMESTAMP] = 1, [ICMP_INFO_REQUEST] = 1, [ICMP_ADDRESS] = 1 }; if (state->pf != NFPROTO_IPV4) return -NF_ACCEPT; if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) || !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) { /* Can't create a new ICMP `conn' with this. */ pr_debug("icmp: can't create new conn with type %u\n", ct->tuplehash[0].tuple.dst.u.icmp.type); nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); return -NF_ACCEPT; } if (!timeout) timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout; nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Check inner header is related to any of the existing connections */ int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state, u8 l4proto, union nf_inet_addr *outer_daddr) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_zone *zone; enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; union nf_inet_addr *ct_daddr; enum ip_conntrack_dir dir; struct nf_conn *ct; WARN_ON(skb_nfct(skb)); zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuplepr(skb, dataoff, state->pf, state->net, &origtuple)) return -NF_ACCEPT; /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ if (!nf_ct_invert_tuple(&innertuple, &origtuple)) return -NF_ACCEPT; h = nf_conntrack_find_get(state->net, zone, &innertuple); if (!h) return -NF_ACCEPT; /* Consider: A -> T (=This machine) -> B * Conntrack entry will look like this: * Original: A->B * Reply: B->T (SNAT case) OR A * * When this function runs, we got packet that looks like this: * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..). * * Above nf_conntrack_find_get() makes lookup based on inner_hdr, * so we should expect that destination of the found connection * matches outer header destination address. * * In above example, we can consider these two cases: * 1. Error coming in reply direction from B or M (middle box) to * T (SNAT case) or A. * Inner saddr will be B, dst will be T or A. * The found conntrack will be reply tuple (B->T/A). * 2. Error coming in original direction from A or M to B. * Inner saddr will be A, inner daddr will be B. * The found conntrack will be original tuple (A->B). * * In both cases, conntrack[dir].dst == inner.dst. * * A bogus packet could look like this: * Inner: B->T * Outer: B->X (other machine reachable by T). * * In this case, lookup yields connection A->B and will * set packet from B->X as *RELATED*, even though no connection * from X was ever seen. */ ct = nf_ct_tuplehash_to_ctrack(h); dir = NF_CT_DIRECTION(h); ct_daddr = &ct->tuplehash[dir].tuple.dst.u3; if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) { if (state->pf == AF_INET) { nf_l4proto_log_invalid(skb, state, l4proto, "outer daddr %pI4 != inner %pI4", &outer_daddr->ip, &ct_daddr->ip); } else if (state->pf == AF_INET6) { nf_l4proto_log_invalid(skb, state, l4proto, "outer daddr %pI6 != inner %pI6", &outer_daddr->ip6, &ct_daddr->ip6); } nf_ct_put(ct); return -NF_ACCEPT; } ctinfo = IP_CT_RELATED; if (dir == IP_CT_DIR_REPLY) ctinfo += IP_CT_IS_REPLY; /* Update skb to refer to this connection */ nf_ct_set(skb, ct, ctinfo); return NF_ACCEPT; } static void icmp_error_log(const struct sk_buff *skb, const struct nf_hook_state *state, const char *msg) { nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg); } /* Small and modified version of icmp_rcv */ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state) { union nf_inet_addr outer_daddr; const struct icmphdr *icmph; struct icmphdr _ih; /* Not enough header? */ icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); if (icmph == NULL) { icmp_error_log(skb, state, "short packet"); return -NF_ACCEPT; } /* See nf_conntrack_proto_tcp.c */ if (state->net->ct.sysctl_checksum && state->hook == NF_INET_PRE_ROUTING && nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) { icmp_error_log(skb, state, "bad hw icmp checksum"); return -NF_ACCEPT; } /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) { icmp_error_log(skb, state, "invalid icmp type"); return -NF_ACCEPT; } /* Need to track icmp error message? */ if (!icmp_is_err(icmph->type)) return NF_ACCEPT; memset(&outer_daddr, 0, sizeof(outer_daddr)); outer_daddr.ip = ip_hdr(skb)->daddr; dataoff += sizeof(*icmph); return nf_conntrack_inet_error(tmpl, skb, dataoff, state, IPPROTO_ICMP, &outer_daddr); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> static int icmp_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) || nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) || nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static const struct nla_policy icmp_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_ICMP_TYPE] = { .type = NLA_U8 }, [CTA_PROTO_ICMP_CODE] = { .type = NLA_U8 }, [CTA_PROTO_ICMP_ID] = { .type = NLA_U16 }, }; static int icmp_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *tuple, u_int32_t flags) { if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) { if (!tb[CTA_PROTO_ICMP_TYPE]) return -EINVAL; tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]); if (tuple->dst.u.icmp.type >= sizeof(invmap) || !invmap[tuple->dst.u.icmp.type]) return -EINVAL; } if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) { if (!tb[CTA_PROTO_ICMP_CODE]) return -EINVAL; tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]); } if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) { if (!tb[CTA_PROTO_ICMP_ID]) return -EINVAL; tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]); } return 0; } static unsigned int icmp_nlattr_tuple_size(void) { static unsigned int size __read_mostly; if (!size) size = nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1); return size; } #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeout = data; struct nf_icmp_net *in = nf_icmp_pernet(net); if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { if (!timeout) timeout = &in->timeout; *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; } else if (timeout) { /* Set default ICMP timeout. */ *timeout = in->timeout; } return 0; } static int icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ void nf_conntrack_icmp_init_net(struct net *net) { struct nf_icmp_net *in = nf_icmp_pernet(net); in->timeout = nf_ct_icmp_timeout; } const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = { .l4proto = IPPROTO_ICMP, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = icmp_tuple_to_nlattr, .nlattr_tuple_size = icmp_nlattr_tuple_size, .nlattr_to_tuple = icmp_nlattr_to_tuple, .nla_policy = icmp_nla_policy, #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = icmp_timeout_nlattr_to_obj, .obj_to_nlattr = icmp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_ICMP_MAX, .obj_size = sizeof(unsigned int), .nla_policy = icmp_timeout_nla_policy, }, #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ };
539 39 24 356 358 405 406 34 35 35 636 40 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_BKEY_BUF_H #define _BCACHEFS_BKEY_BUF_H #include "bcachefs.h" #include "bkey.h" struct bkey_buf { struct bkey_i *k; u64 onstack[12]; }; static inline void bch2_bkey_buf_realloc(struct bkey_buf *s, struct bch_fs *c, unsigned u64s) { if (s->k == (void *) s->onstack && u64s > ARRAY_SIZE(s->onstack)) { s->k = mempool_alloc(&c->large_bkey_pool, GFP_NOFS); memcpy(s->k, s->onstack, sizeof(s->onstack)); } } static inline void bch2_bkey_buf_reassemble(struct bkey_buf *s, struct bch_fs *c, struct bkey_s_c k) { bch2_bkey_buf_realloc(s, c, k.k->u64s); bkey_reassemble(s->k, k); } static inline void bch2_bkey_buf_copy(struct bkey_buf *s, struct bch_fs *c, struct bkey_i *src) { bch2_bkey_buf_realloc(s, c, src->k.u64s); bkey_copy(s->k, src); } static inline void bch2_bkey_buf_unpack(struct bkey_buf *s, struct bch_fs *c, struct btree *b, struct bkey_packed *src) { bch2_bkey_buf_realloc(s, c, BKEY_U64s + bkeyp_val_u64s(&b->format, src)); bch2_bkey_unpack(b, s->k, src); } static inline void bch2_bkey_buf_init(struct bkey_buf *s) { s->k = (void *) s->onstack; } static inline void bch2_bkey_buf_exit(struct bkey_buf *s, struct bch_fs *c) { if (s->k != (void *) s->onstack) mempool_free(s->k, &c->large_bkey_pool); s->k = NULL; } #endif /* _BCACHEFS_BKEY_BUF_H */
1 397 609 609 397 609 607 608 608 606 40 1 37 1 37 41 38 2 41 37 41 42 41 1 4 4 2 2 4 4 37 37 4 4 4 16 1 1 1 1 1 1 6 4 3 3 1 2 3 5 1 1 9 1 8 2 2 2 2 1 3 3 3 1 2 1 10 10 4 1 3 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 // SPDX-License-Identifier: GPL-2.0 /* * IPv6 Address Label subsystem * for the IPv6 "Default" Source Address Selection * * Copyright (C)2007 USAGI/WIDE Project */ /* * Author: * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org> */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/rcupdate.h> #include <linux/in6.h> #include <linux/slab.h> #include <net/addrconf.h> #include <linux/if_addrlabel.h> #include <linux/netlink.h> #include <linux/rtnetlink.h> #if 0 #define ADDRLABEL(x...) printk(x) #else #define ADDRLABEL(x...) do { ; } while (0) #endif /* * Policy Table */ struct ip6addrlbl_entry { struct in6_addr prefix; int prefixlen; int ifindex; int addrtype; u32 label; struct hlist_node list; struct rcu_head rcu; }; /* * Default policy table (RFC6724 + extensions) * * prefix addr_type label * ------------------------------------------------------------------------- * ::1/128 LOOPBACK 0 * ::/0 N/A 1 * 2002::/16 N/A 2 * ::/96 COMPATv4 3 * ::ffff:0:0/96 V4MAPPED 4 * fc00::/7 N/A 5 ULA (RFC 4193) * 2001::/32 N/A 6 Teredo (RFC 4380) * 2001:10::/28 N/A 7 ORCHID (RFC 4843) * fec0::/10 N/A 11 Site-local * (deprecated by RFC3879) * 3ffe::/16 N/A 12 6bone * * Note: 0xffffffff is used if we do not have any policies. * Note: Labels for ULA and 6to4 are different from labels listed in RFC6724. */ #define IPV6_ADDR_LABEL_DEFAULT 0xffffffffUL static const __net_initconst struct ip6addrlbl_init_table { const struct in6_addr *prefix; int prefixlen; u32 label; } ip6addrlbl_init_table[] = { { /* ::/0 */ .prefix = &in6addr_any, .label = 1, }, { /* fc00::/7 */ .prefix = &(struct in6_addr){ { { 0xfc } } } , .prefixlen = 7, .label = 5, }, { /* fec0::/10 */ .prefix = &(struct in6_addr){ { { 0xfe, 0xc0 } } }, .prefixlen = 10, .label = 11, }, { /* 2002::/16 */ .prefix = &(struct in6_addr){ { { 0x20, 0x02 } } }, .prefixlen = 16, .label = 2, }, { /* 3ffe::/16 */ .prefix = &(struct in6_addr){ { { 0x3f, 0xfe } } }, .prefixlen = 16, .label = 12, }, { /* 2001::/32 */ .prefix = &(struct in6_addr){ { { 0x20, 0x01 } } }, .prefixlen = 32, .label = 6, }, { /* 2001:10::/28 */ .prefix = &(struct in6_addr){ { { 0x20, 0x01, 0x00, 0x10 } } }, .prefixlen = 28, .label = 7, }, { /* ::ffff:0:0 */ .prefix = &(struct in6_addr){ { { [10] = 0xff, [11] = 0xff } } }, .prefixlen = 96, .label = 4, }, { /* ::/96 */ .prefix = &in6addr_any, .prefixlen = 96, .label = 3, }, { /* ::1/128 */ .prefix = &in6addr_loopback, .prefixlen = 128, .label = 0, } }; /* Find label */ static bool __ip6addrlbl_match(const struct ip6addrlbl_entry *p, const struct in6_addr *addr, int addrtype, int ifindex) { if (p->ifindex && p->ifindex != ifindex) return false; if (p->addrtype && p->addrtype != addrtype) return false; if (!ipv6_prefix_equal(addr, &p->prefix, p->prefixlen)) return false; return true; } static struct ip6addrlbl_entry *__ipv6_addr_label(struct net *net, const struct in6_addr *addr, int type, int ifindex) { struct ip6addrlbl_entry *p; hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) { if (__ip6addrlbl_match(p, addr, type, ifindex)) return p; } return NULL; } u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr, int type, int ifindex) { u32 label; struct ip6addrlbl_entry *p; type &= IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK; rcu_read_lock(); p = __ipv6_addr_label(net, addr, type, ifindex); label = p ? p->label : IPV6_ADDR_LABEL_DEFAULT; rcu_read_unlock(); ADDRLABEL(KERN_DEBUG "%s(addr=%pI6, type=%d, ifindex=%d) => %08x\n", __func__, addr, type, ifindex, label); return label; } /* allocate one entry */ static struct ip6addrlbl_entry *ip6addrlbl_alloc(const struct in6_addr *prefix, int prefixlen, int ifindex, u32 label) { struct ip6addrlbl_entry *newp; int addrtype; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label); addrtype = ipv6_addr_type(prefix) & (IPV6_ADDR_MAPPED | IPV6_ADDR_COMPATv4 | IPV6_ADDR_LOOPBACK); switch (addrtype) { case IPV6_ADDR_MAPPED: if (prefixlen > 96) return ERR_PTR(-EINVAL); if (prefixlen < 96) addrtype = 0; break; case IPV6_ADDR_COMPATv4: if (prefixlen != 96) addrtype = 0; break; case IPV6_ADDR_LOOPBACK: if (prefixlen != 128) addrtype = 0; break; } newp = kmalloc(sizeof(*newp), GFP_KERNEL); if (!newp) return ERR_PTR(-ENOMEM); ipv6_addr_prefix(&newp->prefix, prefix, prefixlen); newp->prefixlen = prefixlen; newp->ifindex = ifindex; newp->addrtype = addrtype; newp->label = label; INIT_HLIST_NODE(&newp->list); return newp; } /* add a label */ static int __ip6addrlbl_add(struct net *net, struct ip6addrlbl_entry *newp, int replace) { struct ip6addrlbl_entry *last = NULL, *p = NULL; struct hlist_node *n; int ret = 0; ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp, replace); hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { if (p->prefixlen == newp->prefixlen && p->ifindex == newp->ifindex && ipv6_addr_equal(&p->prefix, &newp->prefix)) { if (!replace) { ret = -EEXIST; goto out; } hlist_replace_rcu(&p->list, &newp->list); kfree_rcu(p, rcu); goto out; } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) || (p->prefixlen < newp->prefixlen)) { hlist_add_before_rcu(&newp->list, &p->list); goto out; } last = p; } if (last) hlist_add_behind_rcu(&newp->list, &last->list); else hlist_add_head_rcu(&newp->list, &net->ipv6.ip6addrlbl_table.head); out: if (!ret) WRITE_ONCE(net->ipv6.ip6addrlbl_table.seq, net->ipv6.ip6addrlbl_table.seq + 1); return ret; } /* add a label */ static int ip6addrlbl_add(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex, u32 label, int replace) { struct ip6addrlbl_entry *newp; int ret = 0; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d, label=%u, replace=%d)\n", __func__, prefix, prefixlen, ifindex, (unsigned int)label, replace); newp = ip6addrlbl_alloc(prefix, prefixlen, ifindex, label); if (IS_ERR(newp)) return PTR_ERR(newp); spin_lock(&net->ipv6.ip6addrlbl_table.lock); ret = __ip6addrlbl_add(net, newp, replace); spin_unlock(&net->ipv6.ip6addrlbl_table.lock); if (ret) kfree(newp); return ret; } /* remove a label */ static int __ip6addrlbl_del(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex) { struct ip6addrlbl_entry *p = NULL; struct hlist_node *n; int ret = -ESRCH; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { if (p->prefixlen == prefixlen && p->ifindex == ifindex && ipv6_addr_equal(&p->prefix, prefix)) { hlist_del_rcu(&p->list); kfree_rcu(p, rcu); ret = 0; break; } } return ret; } static int ip6addrlbl_del(struct net *net, const struct in6_addr *prefix, int prefixlen, int ifindex) { struct in6_addr prefix_buf; int ret; ADDRLABEL(KERN_DEBUG "%s(prefix=%pI6, prefixlen=%d, ifindex=%d)\n", __func__, prefix, prefixlen, ifindex); ipv6_addr_prefix(&prefix_buf, prefix, prefixlen); spin_lock(&net->ipv6.ip6addrlbl_table.lock); ret = __ip6addrlbl_del(net, &prefix_buf, prefixlen, ifindex); spin_unlock(&net->ipv6.ip6addrlbl_table.lock); return ret; } /* add default label */ static int __net_init ip6addrlbl_net_init(struct net *net) { struct ip6addrlbl_entry *p = NULL; struct hlist_node *n; int err; int i; ADDRLABEL(KERN_DEBUG "%s\n", __func__); spin_lock_init(&net->ipv6.ip6addrlbl_table.lock); INIT_HLIST_HEAD(&net->ipv6.ip6addrlbl_table.head); for (i = 0; i < ARRAY_SIZE(ip6addrlbl_init_table); i++) { err = ip6addrlbl_add(net, ip6addrlbl_init_table[i].prefix, ip6addrlbl_init_table[i].prefixlen, 0, ip6addrlbl_init_table[i].label, 0); if (err) goto err_ip6addrlbl_add; } return 0; err_ip6addrlbl_add: hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { hlist_del_rcu(&p->list); kfree_rcu(p, rcu); } return err; } static void __net_exit ip6addrlbl_net_exit(struct net *net) { struct ip6addrlbl_entry *p = NULL; struct hlist_node *n; /* Remove all labels belonging to the exiting net */ spin_lock(&net->ipv6.ip6addrlbl_table.lock); hlist_for_each_entry_safe(p, n, &net->ipv6.ip6addrlbl_table.head, list) { hlist_del_rcu(&p->list); kfree_rcu(p, rcu); } spin_unlock(&net->ipv6.ip6addrlbl_table.lock); } static struct pernet_operations ipv6_addr_label_ops = { .init = ip6addrlbl_net_init, .exit = ip6addrlbl_net_exit, }; int __init ipv6_addr_label_init(void) { return register_pernet_subsys(&ipv6_addr_label_ops); } void ipv6_addr_label_cleanup(void) { unregister_pernet_subsys(&ipv6_addr_label_ops); } static const struct nla_policy ifal_policy[IFAL_MAX+1] = { [IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), }, [IFAL_LABEL] = { .len = sizeof(u32), }, }; static bool addrlbl_ifindex_exists(struct net *net, int ifindex) { struct net_device *dev; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); rcu_read_unlock(); return dev != NULL; } static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct ifaddrlblmsg *ifal; struct nlattr *tb[IFAL_MAX+1]; struct in6_addr *pfx; u32 label; int err = 0; err = nlmsg_parse_deprecated(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, extack); if (err < 0) return err; ifal = nlmsg_data(nlh); if (ifal->ifal_family != AF_INET6 || ifal->ifal_prefixlen > 128) return -EINVAL; if (!tb[IFAL_ADDRESS]) return -EINVAL; pfx = nla_data(tb[IFAL_ADDRESS]); if (!tb[IFAL_LABEL]) return -EINVAL; label = nla_get_u32(tb[IFAL_LABEL]); if (label == IPV6_ADDR_LABEL_DEFAULT) return -EINVAL; switch (nlh->nlmsg_type) { case RTM_NEWADDRLABEL: if (ifal->ifal_index && !addrlbl_ifindex_exists(net, ifal->ifal_index)) return -EINVAL; err = ip6addrlbl_add(net, pfx, ifal->ifal_prefixlen, ifal->ifal_index, label, nlh->nlmsg_flags & NLM_F_REPLACE); break; case RTM_DELADDRLABEL: err = ip6addrlbl_del(net, pfx, ifal->ifal_prefixlen, ifal->ifal_index); break; default: err = -EOPNOTSUPP; } return err; } static void ip6addrlbl_putmsg(struct nlmsghdr *nlh, int prefixlen, int ifindex, u32 lseq) { struct ifaddrlblmsg *ifal = nlmsg_data(nlh); ifal->ifal_family = AF_INET6; ifal->__ifal_reserved = 0; ifal->ifal_prefixlen = prefixlen; ifal->ifal_flags = 0; ifal->ifal_index = ifindex; ifal->ifal_seq = lseq; }; static int ip6addrlbl_fill(struct sk_buff *skb, const struct ip6addrlbl_entry *p, u32 lseq, u32 portid, u32 seq, int event, unsigned int flags) { struct nlmsghdr *nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrlblmsg), flags); if (!nlh) return -EMSGSIZE; ip6addrlbl_putmsg(nlh, p->prefixlen, p->ifindex, lseq); if (nla_put_in6_addr(skb, IFAL_ADDRESS, &p->prefix) < 0 || nla_put_u32(skb, IFAL_LABEL, p->label) < 0) { nlmsg_cancel(skb, nlh); return -EMSGSIZE; } nlmsg_end(skb, nlh); return 0; } static int ip6addrlbl_valid_dump_req(const struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct ifaddrlblmsg *ifal; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for address label dump request"); return -EINVAL; } ifal = nlmsg_data(nlh); if (ifal->__ifal_reserved || ifal->ifal_prefixlen || ifal->ifal_flags || ifal->ifal_index || ifal->ifal_seq) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address label dump request"); return -EINVAL; } if (nlmsg_attrlen(nlh, sizeof(*ifal))) { NL_SET_ERR_MSG_MOD(extack, "Invalid data after header for address label dump request"); return -EINVAL; } return 0; } static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) { const struct nlmsghdr *nlh = cb->nlh; struct net *net = sock_net(skb->sk); struct ip6addrlbl_entry *p; int idx = 0, s_idx = cb->args[0]; int err = 0; u32 lseq; if (cb->strict_check) { err = ip6addrlbl_valid_dump_req(nlh, cb->extack); if (err < 0) return err; } rcu_read_lock(); lseq = READ_ONCE(net->ipv6.ip6addrlbl_table.seq); hlist_for_each_entry_rcu(p, &net->ipv6.ip6addrlbl_table.head, list) { if (idx >= s_idx) { err = ip6addrlbl_fill(skb, p, lseq, NETLINK_CB(cb->skb).portid, nlh->nlmsg_seq, RTM_NEWADDRLABEL, NLM_F_MULTI); if (err < 0) break; } idx++; } rcu_read_unlock(); cb->args[0] = idx; return err; } static inline int ip6addrlbl_msgsize(void) { return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg)) + nla_total_size(16) /* IFAL_ADDRESS */ + nla_total_size(4); /* IFAL_LABEL */ } static int ip6addrlbl_valid_get_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { struct ifaddrlblmsg *ifal; int i, err; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifal))) { NL_SET_ERR_MSG_MOD(extack, "Invalid header for addrlabel get request"); return -EINVAL; } if (!netlink_strict_get_check(skb)) return nlmsg_parse_deprecated(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, extack); ifal = nlmsg_data(nlh); if (ifal->__ifal_reserved || ifal->ifal_flags || ifal->ifal_seq) { NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for addrlabel get request"); return -EINVAL; } err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifal), tb, IFAL_MAX, ifal_policy, extack); if (err) return err; for (i = 0; i <= IFAL_MAX; i++) { if (!tb[i]) continue; switch (i) { case IFAL_ADDRESS: break; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in addrlabel get request"); return -EINVAL; } } return 0; } static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct ifaddrlblmsg *ifal; struct nlattr *tb[IFAL_MAX+1]; struct in6_addr *addr; u32 lseq; int err = 0; struct ip6addrlbl_entry *p; struct sk_buff *skb; err = ip6addrlbl_valid_get_req(in_skb, nlh, tb, extack); if (err < 0) return err; ifal = nlmsg_data(nlh); if (ifal->ifal_family != AF_INET6 || ifal->ifal_prefixlen != 128) return -EINVAL; if (ifal->ifal_index && !addrlbl_ifindex_exists(net, ifal->ifal_index)) return -EINVAL; if (!tb[IFAL_ADDRESS]) return -EINVAL; addr = nla_data(tb[IFAL_ADDRESS]); skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL); if (!skb) return -ENOBUFS; err = -ESRCH; rcu_read_lock(); p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); lseq = READ_ONCE(net->ipv6.ip6addrlbl_table.seq); if (p) err = ip6addrlbl_fill(skb, p, lseq, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWADDRLABEL, 0); rcu_read_unlock(); if (err < 0) { WARN_ON(err == -EMSGSIZE); kfree_skb(skb); } else { err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); } return err; } static const struct rtnl_msg_handler ipv6_adddr_label_rtnl_msg_handlers[] __initconst_or_module = { {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDRLABEL, .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDRLABEL, .doit = ip6addrlbl_newdel, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDRLABEL, .doit = ip6addrlbl_get, .dumpit = ip6addrlbl_dump, .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, }; int __init ipv6_addr_label_rtnl_register(void) { return rtnl_register_many(ipv6_adddr_label_rtnl_msg_handlers); }
19 21 8 27 8 70 7 5 1 3 1 4 2 1 1 3 1 3 68 75 38 74 7 70 27 40 8 67 29 25 3 22 19 19 10 7 4 19 21 1 10 10 20 20 22 16 17 17 9 8 17 16 10 2 8 4 4 8 8 10 9 6 2 6 4 3 3 1 1 3 2 5 3 8 8 8 4 4 8 8 8 8 1 7 70 37 16 33 49 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/minix/dir.c * * Copyright (C) 1991, 1992 Linus Torvalds * * minix directory handling functions * * Updated to filesystem version 3 by Daniel Aragones */ #include "minix.h" #include <linux/buffer_head.h> #include <linux/highmem.h> #include <linux/swap.h> typedef struct minix_dir_entry minix_dirent; typedef struct minix3_dir_entry minix3_dirent; static int minix_readdir(struct file *, struct dir_context *); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, .iterate_shared = minix_readdir, .fsync = generic_file_fsync, }; /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = PAGE_SIZE; if (page_nr == (inode->i_size >> PAGE_SHIFT)) last_byte = inode->i_size & (PAGE_SIZE - 1); return last_byte; } static void dir_commit_chunk(struct folio *folio, loff_t pos, unsigned len) { struct address_space *mapping = folio->mapping; struct inode *dir = mapping->host; block_write_end(NULL, mapping, pos, len, len, folio, NULL); if (pos+len > dir->i_size) { i_size_write(dir, pos+len); mark_inode_dirty(dir); } folio_unlock(folio); } static int minix_handle_dirsync(struct inode *dir) { int err; err = filemap_write_and_wait(dir->i_mapping); if (!err) err = sync_inode_metadata(dir, 1); return err; } static void *dir_get_folio(struct inode *dir, unsigned long n, struct folio **foliop) { struct folio *folio = read_mapping_folio(dir->i_mapping, n, NULL); if (IS_ERR(folio)) return ERR_CAST(folio); *foliop = folio; return kmap_local_folio(folio, 0); } static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi) { return (void*)((char*)de + sbi->s_dirsize); } static int minix_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; struct minix_sb_info *sbi = minix_sb(sb); unsigned chunk_size = sbi->s_dirsize; unsigned long npages = dir_pages(inode); unsigned long pos = ctx->pos; unsigned offset; unsigned long n; ctx->pos = pos = ALIGN(pos, chunk_size); if (pos >= inode->i_size) return 0; offset = pos & ~PAGE_MASK; n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; struct folio *folio; kaddr = dir_get_folio(inode, n, &folio); if (IS_ERR(kaddr)) continue; p = kaddr+offset; limit = kaddr + minix_last_byte(inode, n) - chunk_size; for ( ; p <= limit; p = minix_next_entry(p, sbi)) { const char *name; __u32 inumber; if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber) { unsigned l = strnlen(name, sbi->s_namelen); if (!dir_emit(ctx, name, l, inumber, DT_UNKNOWN)) { folio_release_kmap(folio, p); return 0; } } ctx->pos += chunk_size; } folio_release_kmap(folio, kaddr); } return 0; } static inline int namecompare(int len, int maxlen, const char * name, const char * buffer) { if (len < maxlen && buffer[len]) return 0; return !memcmp(name, buffer, len); } /* * minix_find_entry() * * finds an entry in the specified directory with the wanted name. * It does NOT read the inode of the * entry - you'll have to do that yourself if you want to. * * On Success folio_release_kmap() should be called on *foliop. */ minix_dirent *minix_find_entry(struct dentry *dentry, struct folio **foliop) { const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct inode * dir = d_inode(dentry->d_parent); struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); unsigned long n; unsigned long npages = dir_pages(dir); char *p; char *namx; __u32 inumber; for (n = 0; n < npages; n++) { char *kaddr, *limit; kaddr = dir_get_folio(dir, n, foliop); if (IS_ERR(kaddr)) continue; limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; namx = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; namx = de->name; inumber = de->inode; } if (!inumber) continue; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto found; } folio_release_kmap(*foliop, kaddr); } return NULL; found: return (minix_dirent *)p; } int minix_add_link(struct dentry *dentry, struct inode *inode) { struct inode *dir = d_inode(dentry->d_parent); const char * name = dentry->d_name.name; int namelen = dentry->d_name.len; struct super_block * sb = dir->i_sb; struct minix_sb_info * sbi = minix_sb(sb); struct folio *folio = NULL; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr, *p; minix_dirent *de; minix3_dirent *de3; loff_t pos; int err; char *namx = NULL; __u32 inumber; /* * We take care of directory expansion in the same loop * This code plays outside i_size, so it locks the page * to protect that region. */ for (n = 0; n <= npages; n++) { char *limit, *dir_end; kaddr = dir_get_folio(dir, n, &folio); if (IS_ERR(kaddr)) return PTR_ERR(kaddr); folio_lock(folio); dir_end = kaddr + minix_last_byte(dir, n); limit = kaddr + PAGE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; if (sbi->s_version == MINIX_V3) { namx = de3->name; inumber = de3->inode; } else { namx = de->name; inumber = de->inode; } if (p == dir_end) { /* We hit i_size */ if (sbi->s_version == MINIX_V3) de3->inode = 0; else de->inode = 0; goto got_it; } if (!inumber) goto got_it; err = -EEXIST; if (namecompare(namelen, sbi->s_namelen, name, namx)) goto out_unlock; } folio_unlock(folio); folio_release_kmap(folio, kaddr); } BUG(); return -EINVAL; got_it: pos = folio_pos(folio) + offset_in_folio(folio, p); err = minix_prepare_chunk(folio, pos, sbi->s_dirsize); if (err) goto out_unlock; memcpy (namx, name, namelen); if (sbi->s_version == MINIX_V3) { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4); de3->inode = inode->i_ino; } else { memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2); de->inode = inode->i_ino; } dir_commit_chunk(folio, pos, sbi->s_dirsize); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); mark_inode_dirty(dir); err = minix_handle_dirsync(dir); out_put: folio_release_kmap(folio, kaddr); return err; out_unlock: folio_unlock(folio); goto out_put; } int minix_delete_entry(struct minix_dir_entry *de, struct folio *folio) { struct inode *inode = folio->mapping->host; loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); struct minix_sb_info *sbi = minix_sb(inode->i_sb); unsigned len = sbi->s_dirsize; int err; folio_lock(folio); err = minix_prepare_chunk(folio, pos, len); if (err) { folio_unlock(folio); return err; } if (sbi->s_version == MINIX_V3) ((minix3_dirent *)de)->inode = 0; else de->inode = 0; dir_commit_chunk(folio, pos, len); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); mark_inode_dirty(inode); return minix_handle_dirsync(inode); } int minix_make_empty(struct inode *inode, struct inode *dir) { struct folio *folio = filemap_grab_folio(inode->i_mapping, 0); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *kaddr; int err; if (IS_ERR(folio)) return PTR_ERR(folio); err = minix_prepare_chunk(folio, 0, 2 * sbi->s_dirsize); if (err) { folio_unlock(folio); goto fail; } kaddr = kmap_local_folio(folio, 0); memset(kaddr, 0, folio_size(folio)); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; de3->inode = inode->i_ino; strcpy(de3->name, "."); de3 = minix_next_entry(de3, sbi); de3->inode = dir->i_ino; strcpy(de3->name, ".."); } else { minix_dirent *de = (minix_dirent *)kaddr; de->inode = inode->i_ino; strcpy(de->name, "."); de = minix_next_entry(de, sbi); de->inode = dir->i_ino; strcpy(de->name, ".."); } kunmap_local(kaddr); dir_commit_chunk(folio, 0, 2 * sbi->s_dirsize); err = minix_handle_dirsync(inode); fail: folio_put(folio); return err; } /* * routine to check that the specified directory is empty (for rmdir) */ int minix_empty_dir(struct inode * inode) { struct folio *folio = NULL; unsigned long i, npages = dir_pages(inode); struct minix_sb_info *sbi = minix_sb(inode->i_sb); char *name, *kaddr; __u32 inumber; for (i = 0; i < npages; i++) { char *p, *limit; kaddr = dir_get_folio(inode, i, &folio); if (IS_ERR(kaddr)) continue; limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)p; name = de3->name; inumber = de3->inode; } else { minix_dirent *de = (minix_dirent *)p; name = de->name; inumber = de->inode; } if (inumber != 0) { /* check for . and .. */ if (name[0] != '.') goto not_empty; if (!name[1]) { if (inumber != inode->i_ino) goto not_empty; } else if (name[1] != '.') goto not_empty; else if (name[2]) goto not_empty; } } folio_release_kmap(folio, kaddr); } return 1; not_empty: folio_release_kmap(folio, kaddr); return 0; } /* Releases the page */ int minix_set_link(struct minix_dir_entry *de, struct folio *folio, struct inode *inode) { struct inode *dir = folio->mapping->host; struct minix_sb_info *sbi = minix_sb(dir->i_sb); loff_t pos = folio_pos(folio) + offset_in_folio(folio, de); int err; folio_lock(folio); err = minix_prepare_chunk(folio, pos, sbi->s_dirsize); if (err) { folio_unlock(folio); return err; } if (sbi->s_version == MINIX_V3) ((minix3_dirent *)de)->inode = inode->i_ino; else de->inode = inode->i_ino; dir_commit_chunk(folio, pos, sbi->s_dirsize); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); mark_inode_dirty(dir); return minix_handle_dirsync(dir); } struct minix_dir_entry *minix_dotdot(struct inode *dir, struct folio **foliop) { struct minix_sb_info *sbi = minix_sb(dir->i_sb); struct minix_dir_entry *de = dir_get_folio(dir, 0, foliop); if (!IS_ERR(de)) return minix_next_entry(de, sbi); return NULL; } ino_t minix_inode_by_name(struct dentry *dentry) { struct folio *folio; struct minix_dir_entry *de = minix_find_entry(dentry, &folio); ino_t res = 0; if (de) { struct inode *inode = folio->mapping->host; struct minix_sb_info *sbi = minix_sb(inode->i_sb); if (sbi->s_version == MINIX_V3) res = ((minix3_dirent *) de)->inode; else res = de->inode; folio_release_kmap(folio, de); } return res; }
85 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KDEV_T_H #define _LINUX_KDEV_T_H #include <uapi/linux/kdev_t.h> #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) #define print_dev_t(buffer, dev) \ sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev)) #define format_dev_t(buffer, dev) \ ({ \ sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \ buffer; \ }) /* acceptable for old filesystems */ static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } #endif
29 15 15 4 4 34 33 94 95 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 // SPDX-License-Identifier: GPL-2.0-or-later /* * OSS compatible sequencer driver * * registration of device and proc * * Copyright (C) 1998,99 Takashi Iwai <tiwai@suse.de> */ #include <linux/init.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/compat.h> #include <sound/core.h> #include <sound/minors.h> #include <sound/initval.h> #include "seq_oss_device.h" #include "seq_oss_synth.h" /* * module option */ MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("OSS-compatible sequencer module"); MODULE_LICENSE("GPL"); /* Takashi says this is really only for sound-service-0-, but this is OK. */ MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_SEQUENCER); MODULE_ALIAS_SNDRV_MINOR(SNDRV_MINOR_OSS_MUSIC); /* * prototypes */ static int register_device(void); static void unregister_device(void); #ifdef CONFIG_SND_PROC_FS static int register_proc(void); static void unregister_proc(void); #else static inline int register_proc(void) { return 0; } static inline void unregister_proc(void) {} #endif static int odev_open(struct inode *inode, struct file *file); static int odev_release(struct inode *inode, struct file *file); static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset); static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset); static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static __poll_t odev_poll(struct file *file, poll_table * wait); /* * module interface */ static struct snd_seq_driver seq_oss_synth_driver = { .driver = { .name = KBUILD_MODNAME, .probe = snd_seq_oss_synth_probe, .remove = snd_seq_oss_synth_remove, }, .id = SNDRV_SEQ_DEV_ID_OSS, .argsize = sizeof(struct snd_seq_oss_reg), }; static int __init alsa_seq_oss_init(void) { int rc; rc = register_device(); if (rc < 0) goto error; rc = register_proc(); if (rc < 0) { unregister_device(); goto error; } rc = snd_seq_oss_create_client(); if (rc < 0) { unregister_proc(); unregister_device(); goto error; } rc = snd_seq_driver_register(&seq_oss_synth_driver); if (rc < 0) { snd_seq_oss_delete_client(); unregister_proc(); unregister_device(); goto error; } /* success */ snd_seq_oss_synth_init(); error: return rc; } static void __exit alsa_seq_oss_exit(void) { snd_seq_driver_unregister(&seq_oss_synth_driver); snd_seq_oss_delete_client(); unregister_proc(); unregister_device(); } module_init(alsa_seq_oss_init) module_exit(alsa_seq_oss_exit) /* * ALSA minor device interface */ static DEFINE_MUTEX(register_mutex); static int odev_open(struct inode *inode, struct file *file) { int level, rc; if (iminor(inode) == SNDRV_MINOR_OSS_MUSIC) level = SNDRV_SEQ_OSS_MODE_MUSIC; else level = SNDRV_SEQ_OSS_MODE_SYNTH; mutex_lock(&register_mutex); rc = snd_seq_oss_open(file, level); mutex_unlock(&register_mutex); return rc; } static int odev_release(struct inode *inode, struct file *file) { struct seq_oss_devinfo *dp; dp = file->private_data; if (!dp) return 0; mutex_lock(&register_mutex); snd_seq_oss_release(dp); mutex_unlock(&register_mutex); return 0; } static ssize_t odev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_read(dp, buf, count); } static ssize_t odev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; return snd_seq_oss_write(dp, buf, count, file); } static long odev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct seq_oss_devinfo *dp; long rc; dp = file->private_data; if (snd_BUG_ON(!dp)) return -ENXIO; if (cmd != SNDCTL_SEQ_SYNC && mutex_lock_interruptible(&register_mutex)) return -ERESTARTSYS; rc = snd_seq_oss_ioctl(dp, cmd, arg); if (cmd != SNDCTL_SEQ_SYNC) mutex_unlock(&register_mutex); return rc; } #ifdef CONFIG_COMPAT static long odev_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) { return odev_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } #else #define odev_ioctl_compat NULL #endif static __poll_t odev_poll(struct file *file, poll_table * wait) { struct seq_oss_devinfo *dp; dp = file->private_data; if (snd_BUG_ON(!dp)) return EPOLLERR; return snd_seq_oss_poll(dp, file, wait); } /* * registration of sequencer minor device */ static const struct file_operations seq_oss_f_ops = { .owner = THIS_MODULE, .read = odev_read, .write = odev_write, .open = odev_open, .release = odev_release, .poll = odev_poll, .unlocked_ioctl = odev_ioctl, .compat_ioctl = odev_ioctl_compat, .llseek = noop_llseek, }; static int __init register_device(void) { int rc; mutex_lock(&register_mutex); rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0, &seq_oss_f_ops, NULL); if (rc < 0) { pr_err("ALSA: seq_oss: can't register device seq\n"); mutex_unlock(&register_mutex); return rc; } rc = snd_register_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC, NULL, 0, &seq_oss_f_ops, NULL); if (rc < 0) { pr_err("ALSA: seq_oss: can't register device music\n"); snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0); mutex_unlock(&register_mutex); return rc; } mutex_unlock(&register_mutex); return 0; } static void unregister_device(void) { mutex_lock(&register_mutex); if (snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_MUSIC, NULL, 0) < 0) pr_err("ALSA: seq_oss: error unregister device music\n"); if (snd_unregister_oss_device(SNDRV_OSS_DEVICE_TYPE_SEQUENCER, NULL, 0) < 0) pr_err("ALSA: seq_oss: error unregister device seq\n"); mutex_unlock(&register_mutex); } /* * /proc interface */ #ifdef CONFIG_SND_PROC_FS static struct snd_info_entry *info_entry; static void info_read(struct snd_info_entry *entry, struct snd_info_buffer *buf) { mutex_lock(&register_mutex); snd_iprintf(buf, "OSS sequencer emulation version %s\n", SNDRV_SEQ_OSS_VERSION_STR); snd_seq_oss_system_info_read(buf); snd_seq_oss_synth_info_read(buf); snd_seq_oss_midi_info_read(buf); mutex_unlock(&register_mutex); } static int __init register_proc(void) { struct snd_info_entry *entry; entry = snd_info_create_module_entry(THIS_MODULE, SNDRV_SEQ_OSS_PROCNAME, snd_seq_root); if (entry == NULL) return -ENOMEM; entry->content = SNDRV_INFO_CONTENT_TEXT; entry->private_data = NULL; entry->c.text.read = info_read; if (snd_info_register(entry) < 0) { snd_info_free_entry(entry); return -ENOMEM; } info_entry = entry; return 0; } static void unregister_proc(void) { snd_info_free_entry(info_entry); info_entry = NULL; } #endif /* CONFIG_SND_PROC_FS */
12 2 10 1 1 1 5 3 4 2 2 1 1 1 3 3 3 1 12 12 1 1 2 3 1 1 2 3 1 1 11 3 3 1 1 1 3 3 4 4 1 3 2 3 1 2 3 3 2 1 1 1 1 5 5 5 1 4 4 4 4 14 4 12 1 11 8 3 2 9 12 5 5 2 3 2 1 1 9 9 8 1 1 1 5 1 1 3 3 2 1 1 1 1 10 10 10 10 10 2 2 10 4 4 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2011 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) "llcp: %s: " fmt, __func__ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/sched/signal.h> #include "nfc.h" #include "llcp.h" static int sock_wait_state(struct sock *sk, int state, unsigned long timeo) { DECLARE_WAITQUEUE(wait, current); int err = 0; pr_debug("sk %p", sk); add_wait_queue(sk_sleep(sk), &wait); set_current_state(TASK_INTERRUPTIBLE); while (sk->sk_state != state) { if (!timeo) { err = -EINPROGRESS; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); set_current_state(TASK_INTERRUPTIBLE); err = sock_error(sk); if (err) break; } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); return err; } static struct proto llcp_sock_proto = { .name = "NFC_LLCP", .owner = THIS_MODULE, .obj_size = sizeof(struct nfc_llcp_sock), }; static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); /* This is going to be a listening socket, dsap must be 0 */ if (llcp_addr.dsap != 0) return -EINVAL; lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, llcp_addr.service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(llcp_addr.service_name, llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { ret = -ENOMEM; goto sock_llcp_put_local; } llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -EADDRINUSE; goto free_service_name; } llcp_sock->reserved_ssap = llcp_sock->ssap; nfc_llcp_sock_link(&local->sockets, sk); pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap); sk->sk_state = LLCP_BOUND; nfc_put_device(dev); release_sock(sk); return 0; free_service_name: kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; sock_llcp_put_local: nfc_llcp_local_put(llcp_sock->local); llcp_sock->local = NULL; llcp_sock->dev = NULL; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_local *local; struct nfc_dev *dev; struct sockaddr_nfc_llcp llcp_addr; int len, ret = 0; if (!addr || alen < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_NFC) return -EINVAL; pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family); memset(&llcp_addr, 0, sizeof(llcp_addr)); len = min_t(unsigned int, sizeof(llcp_addr), alen); memcpy(&llcp_addr, addr, len); lock_sock(sk); if (sk->sk_state != LLCP_CLOSED) { ret = -EBADFD; goto error; } dev = nfc_get_device(llcp_addr.dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->nfc_protocol = llcp_addr.nfc_protocol; nfc_llcp_sock_link(&local->raw_sockets, sk); sk->sk_state = LLCP_BOUND; put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int ret = 0; pr_debug("sk %p backlog %d\n", sk, backlog); lock_sock(sk); if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM) || sk->sk_state != LLCP_BOUND) { ret = -EBADFD; goto error; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; pr_debug("Socket listening\n"); sk->sk_state = LLCP_LISTEN; error: release_sock(sk); return ret; } static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); u32 opt; int err = 0; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case NFC_LLCP_RW: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt > LLCP_MAX_RW) { err = -EINVAL; break; } llcp_sock->rw = (u8) opt; break; case NFC_LLCP_MIUX: if (sk->sk_state == LLCP_CONNECTED || sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt > LLCP_MAX_MIUX) { err = -EINVAL; break; } llcp_sock->miux = cpu_to_be16((u16) opt); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); pr_debug("%p rw %d miux %d\n", llcp_sock, llcp_sock->rw, llcp_sock->miux); return err; } static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct nfc_llcp_local *local; struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int len, err = 0; u16 miux, remote_miu; u8 rw; pr_debug("%p optname %d\n", sk, optname); if (level != SOL_NFC) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; local = llcp_sock->local; if (!local) return -ENODEV; len = min_t(u32, len, sizeof(u32)); lock_sock(sk); switch (optname) { case NFC_LLCP_RW: rw = llcp_sock->rw > LLCP_MAX_RW ? local->rw : llcp_sock->rw; if (put_user(rw, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_MIUX: miux = be16_to_cpu(llcp_sock->miux) > LLCP_MAX_MIUX ? be16_to_cpu(local->miux) : be16_to_cpu(llcp_sock->miux); if (put_user(miux, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_MIU: remote_miu = llcp_sock->remote_miu > LLCP_MAX_MIU ? local->remote_miu : llcp_sock->remote_miu; if (put_user(remote_miu, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_LTO: if (put_user(local->remote_lto / 10, (u32 __user *) optval)) err = -EFAULT; break; case NFC_LLCP_REMOTE_RW: if (put_user(llcp_sock->remote_rw, (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); if (put_user(len, optlen)) return -EFAULT; return err; } void nfc_llcp_accept_unlink(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("state %d\n", sk->sk_state); list_del_init(&llcp_sock->accept_queue); sk_acceptq_removed(llcp_sock->parent); llcp_sock->parent = NULL; sock_put(sk); } void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct nfc_llcp_sock *llcp_sock_parent = nfc_llcp_sock(parent); /* Lock will be free from unlink */ sock_hold(sk); list_add_tail(&llcp_sock->accept_queue, &llcp_sock_parent->accept_queue); llcp_sock->parent = parent; sk_acceptq_added(parent); } struct sock *nfc_llcp_accept_dequeue(struct sock *parent, struct socket *newsock) { struct nfc_llcp_sock *lsk, *n, *llcp_parent; struct sock *sk; llcp_parent = nfc_llcp_sock(parent); list_for_each_entry_safe(lsk, n, &llcp_parent->accept_queue, accept_queue) { sk = &lsk->sk; lock_sock(sk); if (sk->sk_state == LLCP_CLOSED) { release_sock(sk); nfc_llcp_accept_unlink(sk); continue; } if (sk->sk_state == LLCP_CONNECTED || !newsock) { list_del_init(&lsk->accept_queue); sock_put(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); pr_debug("Returning sk state %d\n", sk->sk_state); sk_acceptq_removed(parent); return sk; } release_sock(sk); } return NULL; } static int llcp_sock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { DECLARE_WAITQUEUE(wait, current); struct sock *sk = sock->sk, *new_sk; long timeo; int ret = 0; pr_debug("parent %p\n", sk); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_state != LLCP_LISTEN) { ret = -EBADFD; goto error; } timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); /* Wait for an incoming connection. */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) { set_current_state(TASK_INTERRUPTIBLE); if (!timeo) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; } release_sock(sk); timeo = schedule_timeout(timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } __set_current_state(TASK_RUNNING); remove_wait_queue(sk_sleep(sk), &wait); if (ret) goto error; newsock->state = SS_CONNECTED; pr_debug("new socket %p\n", new_sk); error: release_sock(sk); return ret; } static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr); if (llcp_sock == NULL || llcp_sock->dev == NULL) return -EBADFD; pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx, llcp_sock->dsap, llcp_sock->ssap); memset(llcp_addr, 0, sizeof(*llcp_addr)); lock_sock(sk); if (!llcp_sock->dev) { release_sock(sk); return -EBADFD; } llcp_addr->sa_family = AF_NFC; llcp_addr->dev_idx = llcp_sock->dev->idx; llcp_addr->target_idx = llcp_sock->target_idx; llcp_addr->nfc_protocol = llcp_sock->nfc_protocol; llcp_addr->dsap = llcp_sock->dsap; llcp_addr->ssap = llcp_sock->ssap; llcp_addr->service_name_len = llcp_sock->service_name_len; memcpy(llcp_addr->service_name, llcp_sock->service_name, llcp_addr->service_name_len); release_sock(sk); return sizeof(struct sockaddr_nfc_llcp); } static inline __poll_t llcp_accept_poll(struct sock *parent) { struct nfc_llcp_sock *llcp_sock, *parent_sock; struct sock *sk; parent_sock = nfc_llcp_sock(parent); list_for_each_entry(llcp_sock, &parent_sock->accept_queue, accept_queue) { sk = &llcp_sock->sk; if (sk->sk_state == LLCP_CONNECTED) return EPOLLIN | EPOLLRDNORM; } return 0; } static __poll_t llcp_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; __poll_t mask = 0; pr_debug("%p\n", sk); sock_poll_wait(file, sock, wait); if (sk->sk_state == LLCP_LISTEN) return llcp_accept_poll(sk); if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) mask |= EPOLLERR | (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) mask |= EPOLLIN | EPOLLRDNORM; if (sk->sk_state == LLCP_CLOSED) mask |= EPOLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= EPOLLHUP; if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; else sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); pr_debug("mask 0x%x\n", mask); return mask; } static int llcp_sock_release(struct socket *sock) { struct sock *sk = sock->sk; struct nfc_llcp_local *local; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int err = 0; if (!sk) return 0; pr_debug("%p\n", sk); local = llcp_sock->local; if (local == NULL) { err = -ENODEV; goto out; } lock_sock(sk); /* Send a DISC */ if (sk->sk_state == LLCP_CONNECTED) nfc_llcp_send_disconnect(llcp_sock); if (sk->sk_state == LLCP_LISTEN) { struct nfc_llcp_sock *lsk, *n; struct sock *accept_sk; list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue, accept_queue) { accept_sk = &lsk->sk; lock_sock(accept_sk); nfc_llcp_send_disconnect(lsk); nfc_llcp_accept_unlink(accept_sk); release_sock(accept_sk); } } if (sock->type == SOCK_RAW) nfc_llcp_sock_unlink(&local->raw_sockets, sk); else nfc_llcp_sock_unlink(&local->sockets, sk); if (llcp_sock->reserved_ssap < LLCP_SAP_MAX) nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap); release_sock(sk); out: sock_orphan(sk); sock_put(sk); return err; } static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr, int len, int flags) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); struct sockaddr_nfc_llcp *addr = (struct sockaddr_nfc_llcp *)_addr; struct nfc_dev *dev; struct nfc_llcp_local *local; int ret = 0; pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags); if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC) return -EINVAL; if (addr->service_name_len == 0 && addr->dsap == 0) return -EINVAL; pr_debug("addr dev_idx=%u target_idx=%u protocol=%u\n", addr->dev_idx, addr->target_idx, addr->nfc_protocol); lock_sock(sk); if (sk->sk_state == LLCP_CONNECTED) { ret = -EISCONN; goto error; } if (sk->sk_state == LLCP_CONNECTING) { ret = -EINPROGRESS; goto error; } dev = nfc_get_device(addr->dev_idx); if (dev == NULL) { ret = -ENODEV; goto error; } local = nfc_llcp_find_local(dev); if (local == NULL) { ret = -ENODEV; goto put_dev; } device_lock(&dev->dev); if (dev->dep_link_up == false) { ret = -ENOLINK; device_unlock(&dev->dev); goto sock_llcp_put_local; } device_unlock(&dev->dev); if (local->rf_mode == NFC_RF_INITIATOR && addr->target_idx != local->target_idx) { ret = -ENOLINK; goto sock_llcp_put_local; } llcp_sock->dev = dev; llcp_sock->local = local; llcp_sock->ssap = nfc_llcp_get_local_ssap(local); if (llcp_sock->ssap == LLCP_SAP_MAX) { ret = -ENOMEM; goto sock_llcp_nullify; } llcp_sock->reserved_ssap = llcp_sock->ssap; if (addr->service_name_len == 0) llcp_sock->dsap = addr->dsap; else llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->nfc_protocol = addr->nfc_protocol; llcp_sock->service_name_len = min_t(unsigned int, addr->service_name_len, NFC_LLCP_MAX_SERVICE_NAME); llcp_sock->service_name = kmemdup(addr->service_name, llcp_sock->service_name_len, GFP_KERNEL); if (!llcp_sock->service_name) { ret = -ENOMEM; goto sock_llcp_release; } nfc_llcp_sock_link(&local->connecting_sockets, sk); ret = nfc_llcp_send_connect(llcp_sock); if (ret) goto sock_unlink; sk->sk_state = LLCP_CONNECTING; ret = sock_wait_state(sk, LLCP_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); if (ret && ret != -EINPROGRESS) goto sock_unlink; release_sock(sk); return ret; sock_unlink: nfc_llcp_sock_unlink(&local->connecting_sockets, sk); kfree(llcp_sock->service_name); llcp_sock->service_name = NULL; sock_llcp_release: nfc_llcp_put_ssap(local, llcp_sock->ssap); sock_llcp_nullify: llcp_sock->local = NULL; llcp_sock->dev = NULL; sock_llcp_put_local: nfc_llcp_local_put(local); put_dev: nfc_put_device(dev); error: release_sock(sk); return ret; } static int llcp_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); int ret; pr_debug("sock %p sk %p", sock, sk); ret = sock_error(sk); if (ret) return ret; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; lock_sock(sk); if (!llcp_sock->local) { release_sock(sk); return -ENODEV; } if (sk->sk_type == SOCK_DGRAM) { if (sk->sk_state != LLCP_BOUND) { release_sock(sk); return -ENOTCONN; } DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, addr, msg->msg_name); if (msg->msg_namelen < sizeof(*addr)) { release_sock(sk); return -EINVAL; } release_sock(sk); return nfc_llcp_send_ui_frame(llcp_sock, addr->dsap, addr->ssap, msg, len); } if (sk->sk_state != LLCP_CONNECTED) { release_sock(sk); return -ENOTCONN; } release_sock(sk); return nfc_llcp_send_i_frame(llcp_sock, msg, len); } static int llcp_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; unsigned int copied, rlen; struct sk_buff *skb, *cskb; int err = 0; pr_debug("%p %zu\n", sk, len); lock_sock(sk); if (sk->sk_state == LLCP_CLOSED && skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); return 0; } release_sock(sk); if (flags & (MSG_OOB)) return -EOPNOTSUPP; skb = skb_recv_datagram(sk, flags, &err); if (!skb) { pr_err("Recv datagram failed state %d %d %d", sk->sk_state, err, sock_error(sk)); if (sk->sk_shutdown & RCV_SHUTDOWN) return 0; return err; } rlen = skb->len; /* real length of skb */ copied = min_t(unsigned int, rlen, len); cskb = skb; if (skb_copy_datagram_msg(cskb, 0, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; } sock_recv_timestamp(msg, sk, skb); if (sk->sk_type == SOCK_DGRAM && msg->msg_name) { struct nfc_llcp_ui_cb *ui_cb = nfc_llcp_ui_skb_cb(skb); DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, sockaddr, msg->msg_name); msg->msg_namelen = sizeof(struct sockaddr_nfc_llcp); pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); memset(sockaddr, 0, sizeof(*sockaddr)); sockaddr->sa_family = AF_NFC; sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; sockaddr->dsap = ui_cb->dsap; sockaddr->ssap = ui_cb->ssap; } /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { /* SOCK_STREAM: re-queue skb if it contains unreceived data */ if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { skb_pull(skb, copied); if (skb->len) { skb_queue_head(&sk->sk_receive_queue, skb); goto done; } } kfree_skb(skb); } /* XXX Queue backlogged skbs */ done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; return copied; } static const struct proto_ops llcp_sock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_sock_bind, .connect = llcp_sock_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = llcp_sock_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = llcp_sock_listen, .shutdown = sock_no_shutdown, .setsockopt = nfc_llcp_setsockopt, .getsockopt = nfc_llcp_getsockopt, .sendmsg = llcp_sock_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static const struct proto_ops llcp_rawsock_ops = { .family = PF_NFC, .owner = THIS_MODULE, .bind = llcp_raw_sock_bind, .connect = sock_no_connect, .release = llcp_sock_release, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = llcp_sock_getname, .poll = llcp_sock_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .sendmsg = sock_no_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, }; static void llcp_sock_destruct(struct sock *sk) { struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); pr_debug("%p\n", sk); if (sk->sk_state == LLCP_CONNECTED) nfc_put_device(llcp_sock->dev); skb_queue_purge(&sk->sk_receive_queue); nfc_llcp_sock_free(llcp_sock); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Freeing alive NFC LLCP socket %p\n", sk); return; } } struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp, int kern) { struct sock *sk; struct nfc_llcp_sock *llcp_sock; sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern); if (!sk) return NULL; llcp_sock = nfc_llcp_sock(sk); sock_init_data(sock, sk); sk->sk_state = LLCP_CLOSED; sk->sk_protocol = NFC_SOCKPROTO_LLCP; sk->sk_type = type; sk->sk_destruct = llcp_sock_destruct; llcp_sock->ssap = 0; llcp_sock->dsap = LLCP_SAP_SDP; llcp_sock->rw = LLCP_MAX_RW + 1; llcp_sock->miux = cpu_to_be16(LLCP_MAX_MIUX + 1); llcp_sock->send_n = llcp_sock->send_ack_n = 0; llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; llcp_sock->remote_ready = 1; llcp_sock->reserved_ssap = LLCP_SAP_MAX; nfc_llcp_socket_remote_param_init(llcp_sock); skb_queue_head_init(&llcp_sock->tx_queue); skb_queue_head_init(&llcp_sock->tx_pending_queue); INIT_LIST_HEAD(&llcp_sock->accept_queue); if (sock != NULL) sock->state = SS_UNCONNECTED; return sk; } void nfc_llcp_sock_free(struct nfc_llcp_sock *sock) { kfree(sock->service_name); skb_queue_purge(&sock->tx_queue); skb_queue_purge(&sock->tx_pending_queue); list_del_init(&sock->accept_queue); sock->parent = NULL; nfc_llcp_local_put(sock->local); } static int llcp_sock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto, int kern) { struct sock *sk; pr_debug("%p\n", sock); if (sock->type != SOCK_STREAM && sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; if (sock->type == SOCK_RAW) { if (!capable(CAP_NET_RAW)) return -EPERM; sock->ops = &llcp_rawsock_ops; } else { sock->ops = &llcp_sock_ops; } sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern); if (sk == NULL) return -ENOMEM; return 0; } static const struct nfc_protocol llcp_nfc_proto = { .id = NFC_SOCKPROTO_LLCP, .proto = &llcp_sock_proto, .owner = THIS_MODULE, .create = llcp_sock_create }; int __init nfc_llcp_sock_init(void) { return nfc_proto_register(&llcp_nfc_proto); } void nfc_llcp_sock_exit(void) { nfc_proto_unregister(&llcp_nfc_proto); }
4 4 4 4 2 1 1 4 4 13 54 54 55 54 1 4 1 36 44 50 11 36 38 50 45 5 50 50 39 15 5 20 19 20 20 20 1 1 11 11 3 1 1 1 9 11 20 3 2 18 65 33 33 6 1 32 87 88 1 84 3 1 85 84 3 2 85 55 55 73 51 38 3 28 29 22 10 88 88 47 70 88 9 24 59 61 40 80 59 3 51 12 8 7 87 7 7 128 128 46 92 119 111 1 13 13 68 68 32 32 38 119 119 16 14 1 1 5 16 73 47 37 1 33 62 51 89 78 78 49 36 46 90 33 13 13 49 33 68 74 89 7 86 8 1 89 76 46 89 89 33 73 1 88 90 89 89 90 5 4 80 49 18 1 55 33 34 34 98 98 98 7 94 89 14 98 2 8 7 6 9 13 106 13 4 5 74 45 44 16 2 13 1 8 5 12 78 21 119 119 13 106 9 35 59 13 22 6 95 3 98 98 1 7 14 60 52 92 46 80 5 57 59 59 2 5 4 57 58 88 9 34 71 3 7 38 2 38 31 9 38 31 5 5 1 77 6 23 60 28 13 1 14 14 101 4 101 14 88 16 13 89 89 33 59 33 13 19 54 42 45 72 72 34 101 61 18 36 11 37 37 37 37 35 1 24 14 15 36 37 37 4 21 18 35 7 2 3 34 34 5 33 32 10 34 5 33 34 35 28 2 15 16 1 1 26 1 25 2 29 29 19 13 37 31 12 13 29 11 48 4 44 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2002, 2004 Oracle. All rights reserved. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/byteorder.h> #include <linux/swap.h> #include <linux/mpage.h> #include <linux/quotaops.h> #include <linux/blkdev.h> #include <linux/uio.h> #include <linux/mm.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "aops.h" #include "dlmglue.h" #include "extent_map.h" #include "file.h" #include "inode.h" #include "journal.h" #include "suballoc.h" #include "super.h" #include "symlink.h" #include "refcounttree.h" #include "ocfs2_trace.h" #include "buffer_head_io.h" #include "dir.h" #include "namei.h" #include "sysfile.h" static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = -EIO; int status; struct ocfs2_dinode *fe = NULL; struct buffer_head *bh = NULL; struct buffer_head *buffer_cache_bh = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); trace_ocfs2_symlink_get_block( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); BUG_ON(ocfs2_inode_is_fast_symlink(inode)); if ((iblock << inode->i_sb->s_blocksize_bits) > PATH_MAX + 1) { mlog(ML_ERROR, "block offset > PATH_MAX: %llu", (unsigned long long)iblock); goto bail; } status = ocfs2_read_inode_block(inode, &bh); if (status < 0) { mlog_errno(status); goto bail; } fe = (struct ocfs2_dinode *) bh->b_data; if ((u64)iblock >= ocfs2_clusters_to_blocks(inode->i_sb, le32_to_cpu(fe->i_clusters))) { err = -ENOMEM; mlog(ML_ERROR, "block offset is outside the allocated size: " "%llu\n", (unsigned long long)iblock); goto bail; } /* We don't use the page cache to create symlink data, so if * need be, copy it over from the buffer cache. */ if (!buffer_uptodate(bh_result) && ocfs2_inode_is_new(inode)) { u64 blkno = le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock; buffer_cache_bh = sb_getblk(osb->sb, blkno); if (!buffer_cache_bh) { err = -ENOMEM; mlog(ML_ERROR, "couldn't getblock for symlink!\n"); goto bail; } /* we haven't locked out transactions, so a commit * could've happened. Since we've got a reference on * the bh, even if it commits while we're doing the * copy, the data is still good. */ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) { memcpy_to_folio(bh_result->b_folio, bh_result->b_size * iblock, buffer_cache_bh->b_data, bh_result->b_size); set_buffer_uptodate(bh_result); } brelse(buffer_cache_bh); } map_bh(bh_result, inode->i_sb, le64_to_cpu(fe->id2.i_list.l_recs[0].e_blkno) + iblock); err = 0; bail: brelse(bh); return err; } static int ocfs2_lock_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret = 0; struct ocfs2_inode_info *oi = OCFS2_I(inode); down_read(&oi->ip_alloc_sem); ret = ocfs2_get_block(inode, iblock, bh_result, create); up_read(&oi->ip_alloc_sem); return ret; } int ocfs2_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int err = 0; unsigned int ext_flags; u64 max_blocks = bh_result->b_size >> inode->i_blkbits; u64 p_blkno, count, past_eof; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); trace_ocfs2_get_block((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)iblock, bh_result, create); if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) mlog(ML_NOTICE, "get_block on system inode 0x%p (%lu)\n", inode, inode->i_ino); if (S_ISLNK(inode->i_mode)) { /* this always does I/O for some reason. */ err = ocfs2_symlink_get_block(inode, iblock, bh_result, create); goto bail; } err = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, &count, &ext_flags); if (err) { mlog(ML_ERROR, "get_blocks() failed, inode: 0x%p, " "block: %llu\n", inode, (unsigned long long)iblock); goto bail; } if (max_blocks < count) count = max_blocks; /* * ocfs2 never allocates in this function - the only time we * need to use BH_New is when we're extending i_size on a file * system which doesn't support holes, in which case BH_New * allows __block_write_begin() to zero. * * If we see this on a sparse file system, then a truncate has * raced us and removed the cluster. In this case, we clear * the buffers dirty and uptodate bits and let the buffer code * ignore it as a hole. */ if (create && p_blkno == 0 && ocfs2_sparse_alloc(osb)) { clear_buffer_dirty(bh_result); clear_buffer_uptodate(bh_result); goto bail; } /* Treat the unwritten extent as a hole for zeroing purposes. */ if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = count << inode->i_blkbits; if (!ocfs2_sparse_alloc(osb)) { if (p_blkno == 0) { err = -EIO; mlog(ML_ERROR, "iblock = %llu p_blkno = %llu blkno=(%llu)\n", (unsigned long long)iblock, (unsigned long long)p_blkno, (unsigned long long)OCFS2_I(inode)->ip_blkno); mlog(ML_ERROR, "Size %llu, clusters %u\n", (unsigned long long)i_size_read(inode), OCFS2_I(inode)->ip_clusters); dump_stack(); goto bail; } } past_eof = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); trace_ocfs2_get_block_end((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)past_eof); if (create && (iblock >= past_eof)) set_buffer_new(bh_result); bail: if (err < 0) err = -EIO; return err; } int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh) { loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL)) { ocfs2_error(inode->i_sb, "Inode %llu lost inline data flag\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); return -EROFS; } size = i_size_read(inode); if (size > folio_size(folio) || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)size); return -EROFS; } folio_fill_tail(folio, 0, di->id2.i_data.id_data, size); folio_mark_uptodate(folio); return 0; } static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio) { int ret; struct buffer_head *di_bh = NULL; BUG_ON(!folio_test_locked(folio)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); if (ret) { mlog_errno(ret); goto out; } ret = ocfs2_read_inline_data(inode, folio, di_bh); out: folio_unlock(folio); brelse(di_bh); return ret; } static int ocfs2_read_folio(struct file *file, struct folio *folio) { struct inode *inode = folio->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); loff_t start = folio_pos(folio); int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index); ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&oi->ip_alloc_sem) == 0) { /* * Unlock the folio and cycle ip_alloc_sem so that we don't * busyloop waiting for ip_alloc_sem to unlock */ ret = AOP_TRUNCATED_PAGE; folio_unlock(folio); unlock = 0; down_read(&oi->ip_alloc_sem); up_read(&oi->ip_alloc_sem); goto out_inode_unlock; } /* * i_size might have just been updated as we grabbed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the folio they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { folio_zero_segment(folio, 0, folio_size(folio)); folio_mark_uptodate(folio); ret = 0; goto out_alloc; } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) ret = ocfs2_readpage_inline(inode, folio); else ret = block_read_full_folio(folio, ocfs2_get_block); unlock = 0; out_alloc: up_read(&oi->ip_alloc_sem); out_inode_unlock: ocfs2_inode_unlock(inode, 0); out: if (unlock) folio_unlock(folio); return ret; } /* * This is used only for read-ahead. Failures or difficult to handle * situations are safe to ignore. * * Right now, we don't bother with BH_Boundary - in-inode extent lists * are quite large (243 extents on 4k blocks), so most inodes don't * grow out to a tree. If need be, detecting boundary extents could * trivially be added in a future version of ocfs2_get_block(). */ static void ocfs2_readahead(struct readahead_control *rac) { int ret; struct inode *inode = rac->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); /* * Use the nonblocking flag for the dlm code to avoid page * lock inversion, but don't bother with retrying. */ ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK); if (ret) return; if (down_read_trylock(&oi->ip_alloc_sem) == 0) goto out_unlock; /* * Don't bother with inline-data. There isn't anything * to read-ahead in that case anyway... */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) goto out_up; /* * Check whether a remote node truncated this file - we just * drop out in that case as it's not worth handling here. */ if (readahead_pos(rac) >= i_size_read(inode)) goto out_up; mpage_readahead(rac, ocfs2_get_block); out_up: up_read(&oi->ip_alloc_sem); out_unlock: ocfs2_inode_unlock(inode, 0); } /* Note: Because we don't support holes, our allocation has * already happened (allocation writes zeros to the file data) * so we don't have to worry about ordered writes in * ocfs2_writepages. * * ->writepages is called during the process of invalidating the page cache * during blocked lock processing. It can't block on any cluster locks * to during block mapping. It's relying on the fact that the block * mapping can't have disappeared under the dirty pages that it is * being asked to write back. */ static int ocfs2_writepages(struct address_space *mapping, struct writeback_control *wbc) { return mpage_writepages(mapping, wbc, ocfs2_get_block); } /* Taken from ext3. We don't necessarily need the full blown * functionality yet, but IMHO it's better to cut and paste the whole * thing so we can avoid introducing our own bugs (and easily pick up * their fixes when they happen) --Mark */ int walk_page_buffers( handle_t *handle, struct buffer_head *head, unsigned from, unsigned to, int *partial, int (*fn)( handle_t *handle, struct buffer_head *bh)) { struct buffer_head *bh; unsigned block_start, block_end; unsigned blocksize = head->b_size; int err, ret = 0; struct buffer_head *next; for ( bh = head, block_start = 0; ret == 0 && (bh != head || !block_start); block_start = block_end, bh = next) { next = bh->b_this_page; block_end = block_start + blocksize; if (block_end <= from || block_start >= to) { if (partial && !buffer_uptodate(bh)) *partial = 1; continue; } err = (*fn)(handle, bh); if (!ret) ret = err; } return ret; } static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) { sector_t status; u64 p_blkno = 0; int err = 0; struct inode *inode = mapping->host; trace_ocfs2_bmap((unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)block); /* * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasseѕ the file system for actual I/O. We really can't allow * that on refcounted inodes, so we have to skip out here. And yes, * 0 is the magic code for a bmap error.. */ if (ocfs2_is_refcount_inode(inode)) return 0; /* We don't need to lock journal system files, since they aren't * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { err = ocfs2_inode_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } down_read(&OCFS2_I(inode)->ip_alloc_sem); } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); if (!INODE_JOURNAL(inode)) { up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); } if (err) { mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", (unsigned long long)block); mlog_errno(err); goto bail; } bail: status = err ? 0 : p_blkno; return status; } static bool ocfs2_release_folio(struct folio *folio, gfp_t wait) { if (!folio_buffers(folio)) return false; return try_to_free_buffers(folio); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, u32 cpos, unsigned int *start, unsigned int *end) { unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; cluster_end = cluster_start + osb->s_clustersize; } BUG_ON(cluster_start > PAGE_SIZE); BUG_ON(cluster_end > PAGE_SIZE); if (start) *start = cluster_start; if (end) *end = cluster_end; } /* * 'from' and 'to' are the region in the page to avoid zeroing. * * If pagesize > clustersize, this function will avoid zeroing outside * of the cluster boundary. * * from == to == 0 is code for "zero the entire cluster region" */ static void ocfs2_clear_folio_regions(struct folio *folio, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { void *kaddr; unsigned int cluster_start, cluster_end; ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); kaddr = kmap_local_folio(folio, 0); if (from || to) { if (from > cluster_start) memset(kaddr + cluster_start, 0, from - cluster_start); if (to < cluster_end) memset(kaddr + to, 0, cluster_end - to); } else { memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } kunmap_local(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an * allocating one, which means ocfs2_map_folio_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio, unsigned int block_start) { u64 offset = folio_pos(folio) + block_start; if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) return 1; if (i_size_read(inode) > offset) return 1; return 0; } /* * Some of this taken from __block_write_begin(). We already have our * mapping by now though, and the entire write will be allocating or * it won't, so not much need to use BH_New. * * This will also skip zeroing, which is handled externally. */ int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; unsigned int bsize = i_blocksize(inode); head = folio_buffers(folio); if (!head) head = create_empty_buffers(folio, bsize, 0); for (bh = head, block_start = 0; bh != head || !block_start; bh = bh->b_this_page, block_start += bsize) { block_end = block_start + bsize; clear_buffer_new(bh); /* * Ignore blocks outside of our i/o range - * they may belong to unallocated clusters. */ if (block_start >= to || block_end <= from) { if (folio_test_uptodate(folio)) set_buffer_uptodate(bh); continue; } /* * For an allocating write with cluster size >= page * size, we always write the entire page. */ if (new) set_buffer_new(bh); if (!buffer_mapped(bh)) { map_bh(bh, inode->i_sb, *p_blkno); clean_bdev_bh_alias(bh); } if (folio_test_uptodate(folio)) { set_buffer_uptodate(bh); } else if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_new(bh) && ocfs2_should_read_blk(inode, folio, block_start) && (block_start < from || block_end > to)) { bh_read_nowait(bh, 0); *wait_bh++=bh; } *p_blkno = *p_blkno + 1; } /* * If we issued read requests - let them complete. */ while(wait_bh > wait) { wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) ret = -EIO; } if (ret == 0 || !new) return ret; /* * If we get -EIO above, zero out any newly allocated blocks * to avoid exposing stale data. */ bh = head; block_start = 0; do { block_end = block_start + bsize; if (block_end <= from) goto next_bh; if (block_start >= to) break; folio_zero_range(folio, block_start, bh->b_size); set_buffer_uptodate(bh); mark_buffer_dirty(bh); next_bh: block_start = block_end; bh = bh->b_this_page; } while (bh != head); return ret; } #if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else #define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) #endif #define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) struct ocfs2_unwritten_extent { struct list_head ue_node; struct list_head ue_ip_node; u32 ue_cpos; u32 ue_phys; }; /* * Describe the state of a single cluster to be written to. */ struct ocfs2_write_cluster_desc { u32 c_cpos; u32 c_phys; /* * Give this a unique field because c_phys eventually gets * filled. */ unsigned c_new; unsigned c_clear_unwritten; unsigned c_needs_zero; }; struct ocfs2_write_ctxt { /* Logical cluster position / len of write */ u32 w_cpos; u32 w_clen; /* First cluster allocated in a nonsparse extend */ u32 w_first_new_cpos; /* Type of caller. Must be one of buffer, mmap, direct. */ ocfs2_write_type_t w_type; struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE]; /* * This is true if page_size > cluster_size. * * It triggers a set of special cases during write which might * have to deal with allocating writes to partial pages. */ unsigned int w_large_pages; /* * Folios involved in this write. * * w_target_folio is the folio being written to by the user. * * w_folios is an array of folios which always contains * w_target_folio, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped * pages adjacent to w_target_folio which need to be written * out in so that future reads from that region will get * zero's. */ unsigned int w_num_folios; struct folio *w_folios[OCFS2_MAX_CTXT_PAGES]; struct folio *w_target_folio; /* * w_target_locked is used for page_mkwrite path indicating no unlocking * against w_target_folio in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; /* * ocfs2_write_end() uses this to know what the real range to * write in the target should be. */ unsigned int w_target_from; unsigned int w_target_to; /* * We could use journal_current_handle() but this is cleaner, * IMHO -Mark */ handle_t *w_handle; struct buffer_head *w_di_bh; struct ocfs2_cached_dealloc_ctxt w_dealloc; struct list_head w_unwritten_list; unsigned int w_unwritten_count; }; void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios) { int i; for(i = 0; i < num_folios; i++) { if (!folios[i]) continue; folio_unlock(folios[i]); folio_mark_accessed(folios[i]); folio_put(folios[i]); } } static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() * to write_end(). The caller must hold a ref on w_target_folio. */ if (wc->w_target_locked) { BUG_ON(!wc->w_target_folio); for (i = 0; i < wc->w_num_folios; i++) { if (wc->w_target_folio == wc->w_folios[i]) { wc->w_folios[i] = NULL; break; } } folio_mark_accessed(wc->w_target_folio); folio_put(wc->w_target_folio); } ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios); } static void ocfs2_free_unwritten_list(struct inode *inode, struct list_head *head) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *tmp = NULL; list_for_each_entry_safe(ue, tmp, head, ue_node) { list_del(&ue->ue_node); spin_lock(&oi->ip_lock); list_del(&ue->ue_ip_node); spin_unlock(&oi->ip_lock); kfree(ue); } } static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); ocfs2_unlock_folios(wc); brelse(wc->w_di_bh); kfree(wc); } static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, struct ocfs2_super *osb, loff_t pos, unsigned len, ocfs2_write_type_t type, struct buffer_head *di_bh) { u32 cend; struct ocfs2_write_ctxt *wc; wc = kzalloc(sizeof(struct ocfs2_write_ctxt), GFP_NOFS); if (!wc) return -ENOMEM; wc->w_cpos = pos >> osb->s_clustersize_bits; wc->w_first_new_cpos = UINT_MAX; cend = (pos + len - 1) >> osb->s_clustersize_bits; wc->w_clen = cend - wc->w_cpos + 1; get_bh(di_bh); wc->w_di_bh = di_bh; wc->w_type = type; if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; ocfs2_init_dealloc_ctxt(&wc->w_dealloc); INIT_LIST_HEAD(&wc->w_unwritten_list); *wcp = wc; return 0; } /* * If a page has any new buffers, zero them out here, and mark them uptodate * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; BUG_ON(!folio_test_locked(folio)); head = folio_buffers(folio); if (!head) return; bh = head; block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { if (!folio_test_uptodate(folio)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); folio_zero_segment(folio, start, end); set_buffer_uptodate(bh); } clear_buffer_new(bh); mark_buffer_dirty(bh); } } block_start = block_end; bh = bh->b_this_page; } while (bh != head); } /* * Only called when we have a failure during allocating write to write * zero's to the newly allocated region. */ static void ocfs2_write_failure(struct inode *inode, struct ocfs2_write_ctxt *wc, loff_t user_pos, unsigned user_len) { int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; if (wc->w_target_folio) ocfs2_zero_new_buffers(wc->w_target_folio, from, to); for (i = 0; i < wc->w_num_folios; i++) { struct folio *folio = wc->w_folios[i]; if (folio && folio_buffers(folio)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); block_commit_write(folio, from, to); } } } static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos, loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; unsigned int user_data_from = 0, user_data_to = 0; ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ new = new | ((i_size_read(inode) <= folio_pos(folio)) && (folio_pos(folio) <= user_pos)); if (folio == wc->w_target_folio) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, cluster_start, cluster_end, new); else ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; } user_data_from = map_from; user_data_to = map_to; if (new) { map_from = cluster_start; map_to = cluster_end; } } else { /* * If we haven't allocated the new folio yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ BUG_ON(!new); map_from = cluster_start; map_to = cluster_end; ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; } } /* * Parts of newly allocated folios need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster * range inside of a folio needs to be written. * * We can skip this if the folio is uptodate - it's already * been zero'd from being read in as a hole. */ if (new && !folio_test_uptodate(folio)) ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); flush_dcache_folio(folio); out: return ret; } /* * This function will only grab one clusters worth of pages. */ static int ocfs2_grab_folios_for_write(struct address_space *mapping, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len, int new, struct folio *mmap_folio) { int ret = 0, i; unsigned long start, target_index, end_index, index; struct inode *inode = mapping->host; loff_t last_byte; target_index = user_pos >> PAGE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For * non allocating write, we just change the one * page. Otherwise, we'll need a whole clusters worth. If we're * writing past i_size, we only need enough pages to cover the * last page of the write. */ if (new) { wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly * touch. This is the page past the end of the write or * i_size, whichever is greater. */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; if ((start + wc->w_num_folios) > end_index) wc->w_num_folios = end_index - start; } else { wc->w_num_folios = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; for(i = 0; i < wc->w_num_folios; i++) { index = start + i; if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_MMAP) { /* * ocfs2_pagemkwrite() is a little different * and wants us to directly use the page * passed in. */ folio_lock(mmap_folio); /* Exit and let the caller retry */ if (mmap_folio->mapping != mapping) { WARN_ON(mmap_folio->mapping); folio_unlock(mmap_folio); ret = -EAGAIN; goto out; } folio_get(mmap_folio); wc->w_folios[i] = mmap_folio; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ wc->w_folios[i] = NULL; continue; } else { wc->w_folios[i] = __filemap_get_folio(mapping, index, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); if (IS_ERR(wc->w_folios[i])) { ret = PTR_ERR(wc->w_folios[i]); mlog_errno(ret); goto out; } } folio_wait_stable(wc->w_folios[i]); if (index == target_index) wc->w_target_folio = wc->w_folios[i]; } out: if (ret) wc->w_target_locked = false; return ret; } /* * Prepare a single cluster for write one cluster into the file. */ static int ocfs2_write_cluster(struct address_space *mapping, u32 *phys, unsigned int new, unsigned int clear_unwritten, unsigned int should_zero, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len) { int ret, i; u64 p_blkno; struct inode *inode = mapping->host; struct ocfs2_extent_tree et; int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1); if (new) { u32 tmp_pos; /* * This is safe to call with the page locks - it won't take * any additional semaphores or cluster locks. */ tmp_pos = cpos; ret = ocfs2_add_inode_data(OCFS2_SB(inode->i_sb), inode, &tmp_pos, 1, !clear_unwritten, wc->w_di_bh, wc->w_handle, data_ac, meta_ac, NULL); /* * This shouldn't happen because we must have already * calculated the correct meta data allocation required. The * internal tree allocation code should know how to increase * transaction credits itself. * * If need be, we could handle -EAGAIN for a * RESTART_TRANS here. */ mlog_bug_on_msg(ret == -EAGAIN, "Inode %llu: EAGAIN return during allocation.\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); if (ret < 0) { mlog_errno(ret); goto out; } } else if (clear_unwritten) { ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_mark_extent_written(inode, &et, wc->w_handle, cpos, 1, *phys, meta_ac, &wc->w_dealloc); if (ret < 0) { mlog_errno(ret); goto out; } } /* * The only reason this should fail is due to an inability to * find the extent added. */ ret = ocfs2_get_clusters(inode, cpos, phys, NULL, NULL); if (ret < 0) { mlog(ML_ERROR, "Get physical blkno failed for inode %llu, " "at logical cluster %u", (unsigned long long)OCFS2_I(inode)->ip_blkno, cpos); goto out; } BUG_ON(*phys == 0); p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *phys); if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); for (i = 0; i < wc->w_num_folios; i++) { int tmpret; /* This is the direct io target page. */ if (wc->w_folios[i] == NULL) { p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits)); continue; } tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc, wc->w_folios[i], cpos, user_pos, user_len, should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) ret = tmpret; } } /* * We only have cleanup to do in case of allocating write. */ if (ret && new) ocfs2_write_failure(inode, wc, user_pos, user_len); out: return ret; } static int ocfs2_write_cluster_by_desc(struct address_space *mapping, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len) { int ret, i; loff_t cluster_off; unsigned int local_len = len; struct ocfs2_write_cluster_desc *desc; struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; /* * We have to make sure that the total write passed in * doesn't extend past a single cluster. */ local_len = len; cluster_off = pos & (osb->s_clustersize - 1); if ((cluster_off + local_len) > osb->s_clustersize) local_len = osb->s_clustersize - cluster_off; ret = ocfs2_write_cluster(mapping, &desc->c_phys, desc->c_new, desc->c_clear_unwritten, desc->c_needs_zero, data_ac, meta_ac, wc, desc->c_cpos, pos, local_len); if (ret) { mlog_errno(ret); goto out; } len -= local_len; pos += local_len; } ret = 0; out: return ret; } /* * ocfs2_write_end() wants to know which parts of the target page it * should complete the write on. It's easiest to compute them ahead of * time when a more complete view of the write is available. */ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, struct ocfs2_write_ctxt *wc, loff_t pos, unsigned len, int alloc) { struct ocfs2_write_cluster_desc *desc; wc->w_target_from = pos & (PAGE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) return; /* * Allocating write - we may have different boundaries based * on page size and cluster size. * * NOTE: We can no longer compute one value from the other as * the actual write length and user provided length may be * different. */ if (wc->w_large_pages) { /* * We only care about the 1st and last cluster within * our range and whether they should be zero'd or not. Either * value may be extended out to the start/end of a * newly allocated cluster. */ desc = &wc->w_desc[0]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, &wc->w_target_from, NULL); desc = &wc->w_desc[wc->w_clen - 1]; if (desc->c_needs_zero) ocfs2_figure_cluster_boundaries(osb, desc->c_cpos, NULL, &wc->w_target_to); } else { wc->w_target_from = 0; wc->w_target_to = PAGE_SIZE; } } /* * Check if this extent is marked UNWRITTEN by direct io. If so, we need not to * do the zero work. And should not to clear UNWRITTEN since it will be cleared * by the direct io procedure. * If this is a new extent that allocated by direct io, we should mark it in * the ip_unwritten_list. */ static int ocfs2_unwritten_check(struct inode *inode, struct ocfs2_write_ctxt *wc, struct ocfs2_write_cluster_desc *desc) { struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL, *new = NULL; int ret = 0; if (!desc->c_needs_zero) return 0; retry: spin_lock(&oi->ip_lock); /* Needs not to zero no metter buffer or direct. The one who is zero * the cluster is doing zero. And he will clear unwritten after all * cluster io finished. */ list_for_each_entry(ue, &oi->ip_unwritten_list, ue_ip_node) { if (desc->c_cpos == ue->ue_cpos) { BUG_ON(desc->c_new); desc->c_needs_zero = 0; desc->c_clear_unwritten = 0; goto unlock; } } if (wc->w_type != OCFS2_WRITE_DIRECT) goto unlock; if (new == NULL) { spin_unlock(&oi->ip_lock); new = kmalloc(sizeof(struct ocfs2_unwritten_extent), GFP_NOFS); if (new == NULL) { ret = -ENOMEM; goto out; } goto retry; } /* This direct write will doing zero. */ new->ue_cpos = desc->c_cpos; new->ue_phys = desc->c_phys; desc->c_clear_unwritten = 0; list_add_tail(&new->ue_ip_node, &oi->ip_unwritten_list); list_add_tail(&new->ue_node, &wc->w_unwritten_list); wc->w_unwritten_count++; new = NULL; unlock: spin_unlock(&oi->ip_lock); out: kfree(new); return ret; } /* * Populate each single-cluster write descriptor in the write context * with information about the i/o to be done. * * Returns the number of clusters that will have to be allocated, as * well as a worst case estimate of the number of extent records that * would have to be created during a write to an unwritten region. */ static int ocfs2_populate_write_desc(struct inode *inode, struct ocfs2_write_ctxt *wc, unsigned int *clusters_to_alloc, unsigned int *extents_to_split) { int ret; struct ocfs2_write_cluster_desc *desc; unsigned int num_clusters = 0; unsigned int ext_flags = 0; u32 phys = 0; int i; *clusters_to_alloc = 0; *extents_to_split = 0; for (i = 0; i < wc->w_clen; i++) { desc = &wc->w_desc[i]; desc->c_cpos = wc->w_cpos + i; if (num_clusters == 0) { /* * Need to look up the next extent record. */ ret = ocfs2_get_clusters(inode, desc->c_cpos, &phys, &num_clusters, &ext_flags); if (ret) { mlog_errno(ret); goto out; } /* We should already CoW the refcountd extent. */ BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); /* * Assume worst case - that we're writing in * the middle of the extent. * * We can assume that the write proceeds from * left to right, in which case the extent * insert code is smart enough to coalesce the * next splits into the previous records created. */ if (ext_flags & OCFS2_EXT_UNWRITTEN) *extents_to_split = *extents_to_split + 2; } else if (phys) { /* * Only increment phys if it doesn't describe * a hole. */ phys++; } /* * If w_first_new_cpos is < UINT_MAX, we have a non-sparse * file that got extended. w_first_new_cpos tells us * where the newly allocated clusters are so we can * zero them. */ if (desc->c_cpos >= wc->w_first_new_cpos) { BUG_ON(phys == 0); desc->c_needs_zero = 1; } desc->c_phys = phys; if (phys == 0) { desc->c_new = 1; desc->c_needs_zero = 1; desc->c_clear_unwritten = 1; *clusters_to_alloc = *clusters_to_alloc + 1; } if (ext_flags & OCFS2_EXT_UNWRITTEN) { desc->c_clear_unwritten = 1; desc->c_needs_zero = 1; } ret = ocfs2_unwritten_check(inode, wc, desc); if (ret) { mlog_errno(ret); goto out; } num_clusters--; } ret = 0; out: return ret; } static int ocfs2_write_begin_inline(struct address_space *mapping, struct inode *inode, struct ocfs2_write_ctxt *wc) { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct folio *folio; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } folio = __filemap_get_folio(mapping, 0, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); if (IS_ERR(folio)) { ocfs2_commit_trans(osb, handle); ret = PTR_ERR(folio); mlog_errno(ret); goto out; } /* * If we don't set w_num_folios then this folio won't get unlocked * and freed on cleanup of the write context. */ wc->w_target_folio = folio; wc->w_folios[0] = folio; wc->w_num_folios = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { ocfs2_commit_trans(osb, handle); mlog_errno(ret); goto out; } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); if (!folio_test_uptodate(folio)) { ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); goto out; } } wc->w_handle = handle; out: return ret; } int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) { struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; if (new_size <= le16_to_cpu(di->id2.i_data.id_count)) return 1; return 0; } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, struct inode *inode, loff_t pos, size_t len, struct folio *mmap_folio, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_dinode *di = NULL; trace_ocfs2_try_to_write_inline_data((unsigned long long)oi->ip_blkno, len, (unsigned long long)pos, oi->ip_dyn_features); /* * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { if (mmap_folio == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; /* * The write won't fit - we have to give this inode an * inline extent list now. */ ret = ocfs2_convert_inline_data_to_extents(inode, wc->w_di_bh); if (ret) mlog_errno(ret); goto out; } /* * Check whether the inode can accept inline data. */ if (oi->ip_clusters != 0 || i_size_read(inode) != 0) return 0; /* * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; if (mmap_folio || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; do_inline_write: ret = ocfs2_write_begin_inline(mapping, inode, wc); if (ret) { mlog_errno(ret); goto out; } /* * This signals to the caller that the data can be written * inline. */ written = 1; out: return written ? written : ret; } /* * This function only does anything for file systems which can't * handle sparse files. * * What we want to do here is fill in any hole between the current end * of allocation and the end of our write. That way the rest of the * write path can treat it as an non-allocating write, which has no * special case code for sparse/nonsparse files. */ static int ocfs2_expand_nonsparse_inode(struct inode *inode, struct buffer_head *di_bh, loff_t pos, unsigned len, struct ocfs2_write_ctxt *wc) { int ret; loff_t newsize = pos + len; BUG_ON(ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (newsize <= i_size_read(inode)) return 0; ret = ocfs2_extend_no_holes(inode, di_bh, newsize, pos); if (ret) mlog_errno(ret); /* There is no wc if this is call from direct. */ if (wc) wc->w_first_new_cpos = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)); return ret; } static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, loff_t pos) { int ret = 0; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))); if (pos > i_size_read(inode)) ret = ocfs2_zero_extend(inode, di_bh, pos); return ret; } int ocfs2_write_begin_nolock(struct address_space *mapping, loff_t pos, unsigned len, ocfs2_write_type_t type, struct folio **foliop, void **fsdata, struct buffer_head *di_bh, struct folio *mmap_folio) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; struct ocfs2_write_ctxt *wc; struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle; struct ocfs2_extent_tree et; int try_free = 1, ret1; try_again: ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, type, di_bh); if (ret) { mlog_errno(ret); return ret; } if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, mmap_folio, wc); if (ret == 1) { ret = 0; goto success; } if (ret < 0) { mlog_errno(ret); goto out; } } /* Direct io change i_size late, should not zero tail here. */ if (type != OCFS2_WRITE_DIRECT) { if (ocfs2_sparse_alloc(osb)) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, len, wc); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_check_range_for_refcount(inode, pos, len); if (ret < 0) { mlog_errno(ret); goto out; } else if (ret == 1) { clusters_need = wc->w_clen; ret = ocfs2_refcount_cow(inode, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { mlog_errno(ret); goto out; } } ret = ocfs2_populate_write_desc(inode, wc, &clusters_to_alloc, &extents_to_split); if (ret) { mlog_errno(ret); goto out; } clusters_need += clusters_to_alloc; di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; trace_ocfs2_write_begin_nolock( (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), pos, len, type, mmap_folio, clusters_to_alloc, extents_to_split); /* * We set w_target_from, w_target_to here so that * ocfs2_write_end() knows which range in the target page to * write out. An allocation requires that we write the entire * cluster range. */ if (clusters_to_alloc || extents_to_split) { /* * XXX: We are stretching the limits of * ocfs2_lock_allocators(). It greatly over-estimates * the work to be done. */ ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), wc->w_di_bh); ret = ocfs2_lock_allocators(inode, &et, clusters_to_alloc, extents_to_split, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto out; } if (data_ac) data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); } else if (type == OCFS2_WRITE_DIRECT) /* direct write needs not to start trans if no extents alloc. */ goto success; /* * We have to zero sparse allocated clusters, unwritten extent clusters, * and non-sparse clusters we just extended. For non-sparse writes, * we know zeros will only be needed in the first and/or last cluster. */ if (wc->w_clen && (wc->w_desc[0].c_needs_zero || wc->w_desc[wc->w_clen - 1].c_needs_zero)) cluster_of_pages = 1; else cluster_of_pages = 0; ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto out; } wc->w_handle = handle; if (clusters_to_alloc) { ret = dquot_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); if (ret) goto out_commit; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto out_quota; } /* * Fill our folio array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len, cluster_of_pages, mmap_folio); if (ret) { /* * ocfs2_grab_folios_for_write() returns -EAGAIN if it * could not lock the target folio. In this case, we exit * with no error and no target folio. This will trigger * the caller, page_mkwrite(), to re-try the operation. */ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { BUG_ON(wc->w_target_folio); ret = 0; goto out_quota; } mlog_errno(ret); goto out_quota; } ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { mlog_errno(ret); goto out_quota; } if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); success: if (foliop) *foliop = wc->w_target_folio; *fsdata = wc; return 0; out_quota: if (clusters_to_alloc) dquot_free_space(inode, ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); out_commit: ocfs2_commit_trans(osb, handle); out: /* * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), * even in case of error here like ENOSPC and ENOMEM. So, we need * to unlock the target page manually to prevent deadlocks when * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED * to VM code. */ if (wc->w_target_locked) folio_unlock(mmap_folio); ocfs2_free_write_ctxt(inode, wc); if (data_ac) { ocfs2_free_alloc_context(data_ac); data_ac = NULL; } if (meta_ac) { ocfs2_free_alloc_context(meta_ac); meta_ac = NULL; } if (ret == -ENOSPC && try_free) { /* * Try to free some truncate log so that we can have enough * clusters to allocate. */ try_free = 0; ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); if (ret1 == 1) goto try_again; if (ret1 < 0) mlog_errno(ret1); } return ret; } static int ocfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { int ret; struct buffer_head *di_bh = NULL; struct inode *inode = mapping->host; ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); return ret; } /* * Take alloc sem here to prevent concurrent lookups. That way * the mapping, zeroing and tree manipulation within * ocfs2_write() will be safe against ->read_folio(). This * should also serve to lock out allocation from a shared * writeable region. */ down_write(&OCFS2_I(inode)->ip_alloc_sem); ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, foliop, fsdata, di_bh, NULL); if (ret) { mlog_errno(ret); goto out_fail; } brelse(di_bh); return 0; out_fail: up_write(&OCFS2_I(inode)->ip_alloc_sem); brelse(di_bh); ocfs2_inode_unlock(inode, 1); return ret; } static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, unsigned len, unsigned *copied, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { if (unlikely(*copied < len)) { if (!folio_test_uptodate(wc->w_target_folio)) { *copied = 0; return; } } memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio, pos, *copied); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, (unsigned long long)pos, *copied, le16_to_cpu(di->id2.i_data.id_count), le16_to_cpu(di->i_dyn_features)); } int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, void *fsdata) { int i, ret; size_t from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; BUG_ON(!list_empty(&wc->w_unwritten_list)); if (handle) { ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { copied = ret; mlog_errno(ret); goto out; } } if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { ocfs2_write_end_inline(inode, pos, len, &copied, di, wc); goto out_write_size; } if (unlikely(copied < len) && wc->w_target_folio) { loff_t new_isize; if (!folio_test_uptodate(wc->w_target_folio)) copied = 0; new_isize = max_t(loff_t, i_size_read(inode), pos + copied); if (new_isize > folio_pos(wc->w_target_folio)) ocfs2_zero_new_buffers(wc->w_target_folio, start+copied, start+len); else { /* * When folio is fully beyond new isize (data copy * failed), do not bother zeroing the folio. Invalidate * it instead so that writeback does not get confused * put page & buffer dirty bits into inconsistent * state. */ block_invalidate_folio(wc->w_target_folio, 0, folio_size(wc->w_target_folio)); } } if (wc->w_target_folio) flush_dcache_folio(wc->w_target_folio); for (i = 0; i < wc->w_num_folios; i++) { struct folio *folio = wc->w_folios[i]; /* This is the direct io target folio */ if (folio == NULL) continue; if (folio == wc->w_target_folio) { from = wc->w_target_from; to = wc->w_target_to; BUG_ON(from > folio_size(folio) || to > folio_size(folio) || to < from); } else { /* * Pages adjacent to the target (if any) imply * a hole-filling write in which case we want * to flush their entire range. */ from = 0; to = folio_size(folio); } if (folio_buffers(folio)) { if (handle && ocfs2_should_order_data(inode)) { loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } block_commit_write(folio, from, to); } } out_write_size: /* Direct io do not update i_size here. */ if (wc->w_type != OCFS2_WRITE_DIRECT) { pos += copied; if (pos > i_size_read(inode)) { i_size_write(inode, pos); mark_inode_dirty(inode); } inode->i_blocks = ocfs2_inode_sector_count(inode); di->i_size = cpu_to_le64((u64)i_size_read(inode)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode)); di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode)); if (handle) ocfs2_update_inode_fsync_trans(handle, inode, 1); } if (handle) ocfs2_journal_dirty(handle, wc->w_di_bh); out: /* unlock pages before dealloc since it needs acquiring j_trans_barrier * lock, or it will cause a deadlock since journal commit threads holds * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ ocfs2_unlock_folios(wc); if (handle) ocfs2_commit_trans(osb, handle); ocfs2_run_deallocs(osb, &wc->w_dealloc); brelse(wc->w_di_bh); kfree(wc); return copied; } static int ocfs2_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { int ret; struct inode *inode = mapping->host; ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); up_write(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); return ret; } struct ocfs2_dio_write_ctxt { struct list_head dw_zero_list; unsigned dw_zero_count; int dw_orphaned; pid_t dw_writer_pid; }; static struct ocfs2_dio_write_ctxt * ocfs2_dio_alloc_write_ctx(struct buffer_head *bh, int *alloc) { struct ocfs2_dio_write_ctxt *dwc = NULL; if (bh->b_private) return bh->b_private; dwc = kmalloc(sizeof(struct ocfs2_dio_write_ctxt), GFP_NOFS); if (dwc == NULL) return NULL; INIT_LIST_HEAD(&dwc->dw_zero_list); dwc->dw_zero_count = 0; dwc->dw_orphaned = 0; dwc->dw_writer_pid = task_pid_nr(current); bh->b_private = dwc; *alloc = 1; return dwc; } static void ocfs2_dio_free_write_ctx(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc) { ocfs2_free_unwritten_list(inode, &dwc->dw_zero_list); kfree(dwc); } /* * TODO: Make this into a generic get_blocks function. * * From do_direct_io in direct-io.c: * "So what we do is to permit the ->get_blocks function to populate * bh.b_size with the size of IO which is permitted at this offset and * this i_blkbits." * * This function is called directly from get_more_blocks in direct-io.c. * * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); */ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_write_ctxt *wc; struct ocfs2_write_cluster_desc *desc = NULL; struct ocfs2_dio_write_ctxt *dwc = NULL; struct buffer_head *di_bh = NULL; u64 p_blkno; unsigned int i_blkbits = inode->i_sb->s_blocksize_bits; loff_t pos = iblock << i_blkbits; sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; unsigned len, total_len = bh_result->b_size; int ret = 0, first_get_block = 0; len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); len = min(total_len, len); /* * bh_result->b_size is count in get_more_blocks according to write * "pos" and "end", we need map twice to return different buffer state: * 1. area in file size, not set NEW; * 2. area out file size, set NEW. * * iblock endblk * |--------|---------|---------|--------- * |<-------area in file------->| */ if ((iblock <= endblk) && ((iblock + ((len - 1) >> i_blkbits)) > endblk)) len = (endblk - iblock + 1) << i_blkbits; mlog(0, "get block of %lu at %llu:%u req %u\n", inode->i_ino, pos, len, total_len); /* * Because we need to change file size in ocfs2_dio_end_io_write(), or * we may need to add it to orphan dir. So can not fall to fast path * while file size will be changed. */ if (pos + total_len <= i_size_read(inode)) { /* This is the fast path for re-write. */ ret = ocfs2_lock_get_block(inode, iblock, bh_result, create); if (buffer_mapped(bh_result) && !buffer_new(bh_result) && ret == 0) goto out; /* Clear state set by ocfs2_get_block. */ bh_result->b_state = 0; } dwc = ocfs2_dio_alloc_write_ctx(bh_result, &first_get_block); if (unlikely(dwc == NULL)) { ret = -ENOMEM; mlog_errno(ret); goto out; } if (ocfs2_clusters_for_bytes(inode->i_sb, pos + total_len) > ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode)) && !dwc->dw_orphaned) { /* * when we are going to alloc extents beyond file size, add the * inode to orphan dir, so we can recall those spaces when * system crashed during write. */ ret = ocfs2_add_inode_to_orphan(osb, inode); if (ret < 0) { mlog_errno(ret); goto out; } dwc->dw_orphaned = 1; } ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); if (first_get_block) { if (ocfs2_sparse_alloc(osb)) ret = ocfs2_zero_tail(inode, di_bh, pos); else ret = ocfs2_expand_nonsparse_inode(inode, di_bh, pos, total_len, NULL); if (ret < 0) { mlog_errno(ret); goto unlock; } } ret = ocfs2_write_begin_nolock(inode->i_mapping, pos, len, OCFS2_WRITE_DIRECT, NULL, (void **)&wc, di_bh, NULL); if (ret) { mlog_errno(ret); goto unlock; } desc = &wc->w_desc[0]; p_blkno = ocfs2_clusters_to_blocks(inode->i_sb, desc->c_phys); BUG_ON(p_blkno == 0); p_blkno += iblock & (u64)(ocfs2_clusters_to_blocks(inode->i_sb, 1) - 1); map_bh(bh_result, inode->i_sb, p_blkno); bh_result->b_size = len; if (desc->c_needs_zero) set_buffer_new(bh_result); if (iblock > endblk) set_buffer_new(bh_result); /* May sleep in end_io. It should not happen in a irq context. So defer * it to dio work queue. */ set_buffer_defer_completion(bh_result); if (!list_empty(&wc->w_unwritten_list)) { struct ocfs2_unwritten_extent *ue = NULL; ue = list_first_entry(&wc->w_unwritten_list, struct ocfs2_unwritten_extent, ue_node); BUG_ON(ue->ue_cpos != desc->c_cpos); /* The physical address may be 0, fill it. */ ue->ue_phys = desc->c_phys; list_splice_tail_init(&wc->w_unwritten_list, &dwc->dw_zero_list); dwc->dw_zero_count += wc->w_unwritten_count; } ret = ocfs2_write_end_nolock(inode->i_mapping, pos, len, len, wc); BUG_ON(ret != len); ret = 0; unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: return ret; } static int ocfs2_dio_end_io_write(struct inode *inode, struct ocfs2_dio_write_ctxt *dwc, loff_t offset, ssize_t bytes) { struct ocfs2_cached_dealloc_ctxt dealloc; struct ocfs2_extent_tree et; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_unwritten_extent *ue = NULL; struct buffer_head *di_bh = NULL; struct ocfs2_dinode *di; struct ocfs2_alloc_context *data_ac = NULL; struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle = NULL; loff_t end = offset + bytes; int ret = 0, credits = 0; ocfs2_init_dealloc_ctxt(&dealloc); /* We do clear unwritten, delete orphan, change i_size here. If neither * of these happen, we can skip all this. */ if (list_empty(&dwc->dw_zero_list) && end <= i_size_read(inode) && !dwc->dw_orphaned) goto out; ret = ocfs2_inode_lock(inode, &di_bh, 1); if (ret < 0) { mlog_errno(ret); goto out; } down_write(&oi->ip_alloc_sem); /* Delete orphan before acquire i_rwsem. */ if (dwc->dw_orphaned) { BUG_ON(dwc->dw_writer_pid != task_pid_nr(current)); end = end > i_size_read(inode) ? end : 0; ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh, !!end, end); if (ret < 0) mlog_errno(ret); } di = (struct ocfs2_dinode *)di_bh->b_data; ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh); /* Attach dealloc with extent tree in case that we may reuse extents * which are already unlinked from current extent tree due to extent * rotation and merging. */ et.et_dealloc = &dealloc; ret = ocfs2_lock_allocators(inode, &et, 0, dwc->dw_zero_count*2, &data_ac, &meta_ac); if (ret) { mlog_errno(ret); goto unlock; } credits = ocfs2_calc_extend_credits(inode->i_sb, &di->id2.i_list); handle = ocfs2_start_trans(osb, credits); if (IS_ERR(handle)) { ret = PTR_ERR(handle); mlog_errno(ret); goto unlock; } ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret) { mlog_errno(ret); goto commit; } list_for_each_entry(ue, &dwc->dw_zero_list, ue_node) { ret = ocfs2_assure_trans_credits(handle, credits); if (ret < 0) { mlog_errno(ret); break; } ret = ocfs2_mark_extent_written(inode, &et, handle, ue->ue_cpos, 1, ue->ue_phys, meta_ac, &dealloc); if (ret < 0) { mlog_errno(ret); break; } } if (end > i_size_read(inode)) { ret = ocfs2_set_inode_size(handle, inode, di_bh, end); if (ret < 0) mlog_errno(ret); } commit: ocfs2_commit_trans(osb, handle); unlock: up_write(&oi->ip_alloc_sem); ocfs2_inode_unlock(inode, 1); brelse(di_bh); out: if (data_ac) ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); ocfs2_dio_free_write_ctx(inode, dwc); return ret; } /* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're * particularly interested in the aio/dio case. We use the rw_lock DLM lock * to protect io on one node from truncation on another. */ static int ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, void *private) { struct inode *inode = file_inode(iocb->ki_filp); int level; int ret = 0; /* this io's submitter should not have unlocked this before we could */ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); if (bytes <= 0) mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld", (long long)bytes); if (private) { if (bytes > 0) ret = ocfs2_dio_end_io_write(inode, private, offset, bytes); else ocfs2_dio_free_write_ctx(inode, private); } ocfs2_iocb_clear_rw_locked(iocb); level = ocfs2_iocb_rw_locked_level(iocb); ocfs2_rw_unlock(inode, level); return ret; } static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); get_block_t *get_block; /* * Fallback to buffered I/O if we see an inode without * extents. */ if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; /* Fallback to buffered I/O if we do not support append dio. */ if (iocb->ki_pos + iter->count > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) return 0; if (iov_iter_rw(iter) == READ) get_block = ocfs2_lock_get_block; else get_block = ocfs2_dio_wr_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, get_block, ocfs2_dio_end_io, 0); } const struct address_space_operations ocfs2_aops = { .dirty_folio = block_dirty_folio, .read_folio = ocfs2_read_folio, .readahead = ocfs2_readahead, .writepages = ocfs2_writepages, .write_begin = ocfs2_write_begin, .write_end = ocfs2_write_end, .bmap = ocfs2_bmap, .direct_IO = ocfs2_direct_IO, .invalidate_folio = block_invalidate_folio, .release_folio = ocfs2_release_folio, .migrate_folio = buffer_migrate_folio, .is_partially_uptodate = block_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, };
1574 495 16 16 16 416 413 84 83 33 32 9 9 267 266 1834 1837 1844 1839 1295 2 1294 1296 1295 1291 2 2 2 2 19636 19666 564 562 563 20 20 20 20 562 565 562 563 561 565 13 12 2 2 2 4333 4342 4338 4341 4340 4362 4362 4340 212 4336 4325 4333 4339 9 8 2 1 1 12 12 2 2 2 9323 9321 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 // SPDX-License-Identifier: GPL-2.0-only /* Kernel thread helper functions. * Copyright (C) 2004 IBM Corporation, Rusty Russell. * Copyright (C) 2009 Red Hat, Inc. * * Creation is done via kthreadd, so that we get a clean environment * even if we're invoked from userspace (think modprobe, hotplug cpu, * etc.). */ #include <uapi/linux/sched/types.h> #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/kthread.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/cgroup.h> #include <linux/cpuset.h> #include <linux/unistd.h> #include <linux/file.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/freezer.h> #include <linux/ptrace.h> #include <linux/uaccess.h> #include <linux/numa.h> #include <linux/sched/isolation.h> #include <trace/events/sched.h> static DEFINE_SPINLOCK(kthread_create_lock); static LIST_HEAD(kthread_create_list); struct task_struct *kthreadd_task; static LIST_HEAD(kthreads_hotplug); static DEFINE_MUTEX(kthreads_hotplug_lock); struct kthread_create_info { /* Information passed to kthread() from kthreadd. */ char *full_name; int (*threadfn)(void *data); void *data; int node; /* Result passed back to kthread_create() from kthreadd. */ struct task_struct *result; struct completion *done; struct list_head list; }; struct kthread { unsigned long flags; unsigned int cpu; unsigned int node; int started; int result; int (*threadfn)(void *); void *data; struct completion parked; struct completion exited; #ifdef CONFIG_BLK_CGROUP struct cgroup_subsys_state *blkcg_css; #endif /* To store the full name if task comm is truncated. */ char *full_name; struct task_struct *task; struct list_head hotplug_node; struct cpumask *preferred_affinity; }; enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP, KTHREAD_SHOULD_PARK, }; static inline struct kthread *to_kthread(struct task_struct *k) { WARN_ON(!(k->flags & PF_KTHREAD)); return k->worker_private; } /* * Variant of to_kthread() that doesn't assume @p is a kthread. * * Per construction; when: * * (p->flags & PF_KTHREAD) && p->worker_private * * the task is both a kthread and struct kthread is persistent. However * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and * begin_new_exec()). */ static inline struct kthread *__to_kthread(struct task_struct *p) { void *kthread = p->worker_private; if (kthread && !(p->flags & PF_KTHREAD)) kthread = NULL; return kthread; } void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk) { struct kthread *kthread = to_kthread(tsk); if (!kthread || !kthread->full_name) { strscpy(buf, tsk->comm, buf_size); return; } strscpy_pad(buf, kthread->full_name, buf_size); } bool set_kthread_struct(struct task_struct *p) { struct kthread *kthread; if (WARN_ON_ONCE(to_kthread(p))) return false; kthread = kzalloc(sizeof(*kthread), GFP_KERNEL); if (!kthread) return false; init_completion(&kthread->exited); init_completion(&kthread->parked); INIT_LIST_HEAD(&kthread->hotplug_node); p->vfork_done = &kthread->exited; kthread->task = p; kthread->node = tsk_fork_get_node(current); p->worker_private = kthread; return true; } void free_kthread_struct(struct task_struct *k) { struct kthread *kthread; /* * Can be NULL if kmalloc() in set_kthread_struct() failed. */ kthread = to_kthread(k); if (!kthread) return; #ifdef CONFIG_BLK_CGROUP WARN_ON_ONCE(kthread->blkcg_css); #endif k->worker_private = NULL; kfree(kthread->full_name); kfree(kthread); } /** * kthread_should_stop - should this kthread return now? * * When someone calls kthread_stop() on your kthread, it will be woken * and this will return true. You should then return, and your return * value will be passed through to kthread_stop(). */ bool kthread_should_stop(void) { return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); } EXPORT_SYMBOL(kthread_should_stop); static bool __kthread_should_park(struct task_struct *k) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); } /** * kthread_should_park - should this kthread park now? * * When someone calls kthread_park() on your kthread, it will be woken * and this will return true. You should then do the necessary * cleanup and call kthread_parkme() * * Similar to kthread_should_stop(), but this keeps the thread alive * and in a park position. kthread_unpark() "restarts" the thread and * calls the thread function again. */ bool kthread_should_park(void) { return __kthread_should_park(current); } EXPORT_SYMBOL_GPL(kthread_should_park); bool kthread_should_stop_or_park(void) { struct kthread *kthread = __to_kthread(current); if (!kthread) return false; return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK)); } /** * kthread_freezable_should_stop - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen * * kthread_should_stop() for freezable kthreads, which will enter * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly. */ bool kthread_freezable_should_stop(bool *was_frozen) { bool frozen = false; might_sleep(); if (unlikely(freezing(current))) frozen = __refrigerator(true); if (was_frozen) *was_frozen = frozen; return kthread_should_stop(); } EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); /** * kthread_func - return the function specified on kthread creation * @task: kthread task in question * * Returns NULL if the task is not a kthread. */ void *kthread_func(struct task_struct *task) { struct kthread *kthread = __to_kthread(task); if (kthread) return kthread->threadfn; return NULL; } EXPORT_SYMBOL_GPL(kthread_func); /** * kthread_data - return data value specified on kthread creation * @task: kthread task in question * * Return the data value specified when kthread @task was created. * The caller is responsible for ensuring the validity of @task when * calling this function. */ void *kthread_data(struct task_struct *task) { return to_kthread(task)->data; } EXPORT_SYMBOL_GPL(kthread_data); /** * kthread_probe_data - speculative version of kthread_data() * @task: possible kthread task in question * * @task could be a kthread task. Return the data value specified when it * was created if accessible. If @task isn't a kthread task or its data is * inaccessible for any reason, %NULL is returned. This function requires * that @task itself is safe to dereference. */ void *kthread_probe_data(struct task_struct *task) { struct kthread *kthread = __to_kthread(task); void *data = NULL; if (kthread) copy_from_kernel_nofault(&data, &kthread->data, sizeof(data)); return data; } static void __kthread_parkme(struct kthread *self) { for (;;) { /* * TASK_PARKED is a special state; we must serialize against * possible pending wakeups to avoid store-store collisions on * task->state. * * Such a collision might possibly result in the task state * changin from TASK_PARKED and us failing the * wait_task_inactive() in kthread_park(). */ set_special_state(TASK_PARKED); if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; /* * Thread is going to call schedule(), do not preempt it, * or the caller of kthread_park() may spend more time in * wait_task_inactive(). */ preempt_disable(); complete(&self->parked); schedule_preempt_disabled(); preempt_enable(); } __set_current_state(TASK_RUNNING); } void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } EXPORT_SYMBOL_GPL(kthread_parkme); /** * kthread_exit - Cause the current kthread return @result to kthread_stop(). * @result: The integer value to return to kthread_stop(). * * While kthread_exit can be called directly, it exists so that * functions which do some additional work in non-modular code such as * module_put_and_kthread_exit can be implemented. * * Does not return. */ void __noreturn kthread_exit(long result) { struct kthread *kthread = to_kthread(current); kthread->result = result; if (!list_empty(&kthread->hotplug_node)) { mutex_lock(&kthreads_hotplug_lock); list_del(&kthread->hotplug_node); mutex_unlock(&kthreads_hotplug_lock); if (kthread->preferred_affinity) { kfree(kthread->preferred_affinity); kthread->preferred_affinity = NULL; } } do_exit(0); } EXPORT_SYMBOL(kthread_exit); /** * kthread_complete_and_exit - Exit the current kthread. * @comp: Completion to complete * @code: The integer value to return to kthread_stop(). * * If present, complete @comp and then return code to kthread_stop(). * * A kernel thread whose module may be removed after the completion of * @comp can use this function to exit safely. * * Does not return. */ void __noreturn kthread_complete_and_exit(struct completion *comp, long code) { if (comp) complete(comp); kthread_exit(code); } EXPORT_SYMBOL(kthread_complete_and_exit); static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask) { const struct cpumask *pref; if (kthread->preferred_affinity) { pref = kthread->preferred_affinity; } else { if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE)) return; pref = cpumask_of_node(kthread->node); } cpumask_and(cpumask, pref, housekeeping_cpumask(HK_TYPE_KTHREAD)); if (cpumask_empty(cpumask)) cpumask_copy(cpumask, housekeeping_cpumask(HK_TYPE_KTHREAD)); } static void kthread_affine_node(void) { struct kthread *kthread = to_kthread(current); cpumask_var_t affinity; WARN_ON_ONCE(kthread_is_per_cpu(current)); if (kthread->node == NUMA_NO_NODE) { housekeeping_affine(current, HK_TYPE_KTHREAD); } else { if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) { WARN_ON_ONCE(1); return; } mutex_lock(&kthreads_hotplug_lock); WARN_ON_ONCE(!list_empty(&kthread->hotplug_node)); list_add_tail(&kthread->hotplug_node, &kthreads_hotplug); /* * The node cpumask is racy when read from kthread() but: * - a racing CPU going down will either fail on the subsequent * call to set_cpus_allowed_ptr() or be migrated to housekeepers * afterwards by the scheduler. * - a racing CPU going up will be handled by kthreads_online_cpu() */ kthread_fetch_affinity(kthread, affinity); set_cpus_allowed_ptr(current, affinity); mutex_unlock(&kthreads_hotplug_lock); free_cpumask_var(affinity); } } static int kthread(void *_create) { static const struct sched_param param = { .sched_priority = 0 }; /* Copy data: it's on kthread's stack */ struct kthread_create_info *create = _create; int (*threadfn)(void *data) = create->threadfn; void *data = create->data; struct completion *done; struct kthread *self; int ret; self = to_kthread(current); /* Release the structure when caller killed by a fatal signal. */ done = xchg(&create->done, NULL); if (!done) { kfree(create->full_name); kfree(create); kthread_exit(-EINTR); } self->full_name = create->full_name; self->threadfn = threadfn; self->data = data; /* * The new thread inherited kthreadd's priority and CPU mask. Reset * back to default in case they have been changed. */ sched_setscheduler_nocheck(current, SCHED_NORMAL, &param); /* OK, tell user we're spawned, wait for stop or wakeup */ __set_current_state(TASK_UNINTERRUPTIBLE); create->result = current; /* * Thread is going to call schedule(), do not preempt it, * or the creator may spend more time in wait_task_inactive(). */ preempt_disable(); complete(done); schedule_preempt_disabled(); preempt_enable(); self->started = 1; if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity) kthread_affine_node(); ret = -EINTR; if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { cgroup_kthread_ready(); __kthread_parkme(self); ret = threadfn(data); } kthread_exit(ret); } /* called from kernel_clone() to get node information for about to be created task */ int tsk_fork_get_node(struct task_struct *tsk) { #ifdef CONFIG_NUMA if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif return NUMA_NO_NODE; } static void create_kthread(struct kthread_create_info *create) { int pid; #ifdef CONFIG_NUMA current->pref_node_fork = create->node; #endif /* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, create->full_name, CLONE_FS | CLONE_FILES | SIGCHLD); if (pid < 0) { /* Release the structure when caller killed by a fatal signal. */ struct completion *done = xchg(&create->done, NULL); kfree(create->full_name); if (!done) { kfree(create); return; } create->result = ERR_PTR(pid); complete(done); } } static __printf(4, 0) struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], va_list args) { DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); create->threadfn = threadfn; create->data = data; create->node = node; create->done = &done; create->full_name = kvasprintf(GFP_KERNEL, namefmt, args); if (!create->full_name) { task = ERR_PTR(-ENOMEM); goto free_create; } spin_lock(&kthread_create_lock); list_add_tail(&create->list, &kthread_create_list); spin_unlock(&kthread_create_lock); wake_up_process(kthreadd_task); /* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread. */ if (unlikely(wait_for_completion_killable(&done))) { /* * If I was killed by a fatal signal before kthreadd (or new * kernel thread) calls complete(), leave the cleanup of this * structure to that thread. */ if (xchg(&create->done, NULL)) return ERR_PTR(-EINTR); /* * kthreadd (or new kernel thread) will call complete() * shortly. */ wait_for_completion(&done); } task = create->result; free_create: kfree(create); return task; } /** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: task and thread structures for the thread are allocated on this node * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and * is affine to all CPUs. * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either return directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { struct task_struct *task; va_list args; va_start(args, namefmt); task = __kthread_create_on_node(threadfn, data, node, namefmt, args); va_end(args); return task; } EXPORT_SYMBOL(kthread_create_on_node); static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state) { unsigned long flags; if (!wait_task_inactive(p, state)) { WARN_ON(1); return; } /* It's safe because the task is inactive. */ raw_spin_lock_irqsave(&p->pi_lock, flags); do_set_cpus_allowed(p, mask); p->flags |= PF_NO_SETAFFINITY; raw_spin_unlock_irqrestore(&p->pi_lock, flags); } static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state) { __kthread_bind_mask(p, cpumask_of(cpu), state); } void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) { struct kthread *kthread = to_kthread(p); __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); WARN_ON_ONCE(kthread->started); } /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { struct kthread *kthread = to_kthread(p); __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); WARN_ON_ONCE(kthread->started); } EXPORT_SYMBOL(kthread_bind); /** * kthread_create_on_cpu - Create a cpu bound kthread * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @cpu: The cpu on which the thread should be bound, * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Description: This helper function creates and names a kernel thread */ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), void *data, unsigned int cpu, const char *namefmt) { struct task_struct *p; p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, cpu); if (IS_ERR(p)) return p; kthread_bind(p, cpu); /* CPU hotplug need to bind once again when unparking the thread. */ to_kthread(p)->cpu = cpu; return p; } EXPORT_SYMBOL(kthread_create_on_cpu); void kthread_set_per_cpu(struct task_struct *k, int cpu) { struct kthread *kthread = to_kthread(k); if (!kthread) return; WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY)); if (cpu < 0) { clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags); return; } kthread->cpu = cpu; set_bit(KTHREAD_IS_PER_CPU, &kthread->flags); } bool kthread_is_per_cpu(struct task_struct *p) { struct kthread *kthread = __to_kthread(p); if (!kthread) return false; return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags); } /** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return false, wakes it, and * waits for it to return. If the thread is marked percpu then its * bound to the cpu again. */ void kthread_unpark(struct task_struct *k) { struct kthread *kthread = to_kthread(k); if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)) return; /* * Newly created kthread was parked when the CPU was offline. * The binding was lost and we need to set it again. */ if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) __kthread_bind(k, kthread->cpu, TASK_PARKED); clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. */ wake_up_state(k, TASK_PARKED); } EXPORT_SYMBOL_GPL(kthread_unpark); /** * kthread_park - park a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return true, wakes it, and * waits for it to return. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will park without * calling threadfn(). * * Returns 0 if the thread is parked, -ENOSYS if the thread exited. * If called by the kthread itself just the park bit is set. */ int kthread_park(struct task_struct *k) { struct kthread *kthread = to_kthread(k); if (WARN_ON(k->flags & PF_EXITING)) return -ENOSYS; if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) return -EBUSY; set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) { wake_up_process(k); /* * Wait for __kthread_parkme() to complete(), this means we * _will_ have TASK_PARKED and are about to call schedule(). */ wait_for_completion(&kthread->parked); /* * Now wait for that schedule() to complete and the task to * get scheduled out. */ WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); } return 0; } EXPORT_SYMBOL_GPL(kthread_park); /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and * waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call kthread_exit() itself, the caller must ensure * task_struct can't go away. * * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ int kthread_stop(struct task_struct *k) { struct kthread *kthread; int ret; trace_sched_kthread_stop(k); get_task_struct(k); kthread = to_kthread(k); set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); kthread_unpark(k); set_tsk_thread_flag(k, TIF_NOTIFY_SIGNAL); wake_up_process(k); wait_for_completion(&kthread->exited); ret = kthread->result; put_task_struct(k); trace_sched_kthread_stop_ret(ret); return ret; } EXPORT_SYMBOL(kthread_stop); /** * kthread_stop_put - stop a thread and put its task struct * @k: thread created by kthread_create(). * * Stops a thread created by kthread_create() and put its task_struct. * Only use when holding an extra task struct reference obtained by * calling get_task_struct(). */ int kthread_stop_put(struct task_struct *k) { int ret; ret = kthread_stop(k); put_task_struct(k); return ret; } EXPORT_SYMBOL(kthread_stop_put); int kthreadd(void *unused) { static const char comm[TASK_COMM_LEN] = "kthreadd"; struct task_struct *tsk = current; /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, comm); ignore_signals(tsk); set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD)); set_mems_allowed(node_states[N_MEMORY]); current->flags |= PF_NOFREEZE; cgroup_init_kthreadd(); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask) { struct kthread *kthread = to_kthread(p); cpumask_var_t affinity; unsigned long flags; int ret = 0; if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) { WARN_ON(1); return -EINVAL; } WARN_ON_ONCE(kthread->preferred_affinity); if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) return -ENOMEM; kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL); if (!kthread->preferred_affinity) { ret = -ENOMEM; goto out; } mutex_lock(&kthreads_hotplug_lock); cpumask_copy(kthread->preferred_affinity, mask); WARN_ON_ONCE(!list_empty(&kthread->hotplug_node)); list_add_tail(&kthread->hotplug_node, &kthreads_hotplug); kthread_fetch_affinity(kthread, affinity); /* It's safe because the task is inactive. */ raw_spin_lock_irqsave(&p->pi_lock, flags); do_set_cpus_allowed(p, affinity); raw_spin_unlock_irqrestore(&p->pi_lock, flags); mutex_unlock(&kthreads_hotplug_lock); out: free_cpumask_var(affinity); return ret; } /* * Re-affine kthreads according to their preferences * and the newly online CPU. The CPU down part is handled * by select_fallback_rq() which default re-affines to * housekeepers from other nodes in case the preferred * affinity doesn't apply anymore. */ static int kthreads_online_cpu(unsigned int cpu) { cpumask_var_t affinity; struct kthread *k; int ret; guard(mutex)(&kthreads_hotplug_lock); if (list_empty(&kthreads_hotplug)) return 0; if (!zalloc_cpumask_var(&affinity, GFP_KERNEL)) return -ENOMEM; ret = 0; list_for_each_entry(k, &kthreads_hotplug, hotplug_node) { if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) || kthread_is_per_cpu(k->task))) { ret = -EINVAL; continue; } kthread_fetch_affinity(k, affinity); set_cpus_allowed_ptr(k->task, affinity); } free_cpumask_var(affinity); return ret; } static int kthreads_init(void) { return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online", kthreads_online_cpu, NULL); } early_initcall(kthreads_init); void __kthread_init_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key) { memset(worker, 0, sizeof(struct kthread_worker)); raw_spin_lock_init(&worker->lock); lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); INIT_LIST_HEAD(&worker->delayed_work_list); } EXPORT_SYMBOL_GPL(__kthread_init_worker); /** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker * * This function implements the main cycle of kthread worker. It processes * work_list until it is stopped with kthread_stop(). It sleeps when the queue * is empty. * * The works are not allowed to keep any locks, disable preemption or interrupts * when they finish. There is defined a safe point for freezing when one work * finishes and before a new one is started. * * Also the works must not be handled by more than one worker at the same time, * see also kthread_queue_work(). */ int kthread_worker_fn(void *worker_ptr) { struct kthread_worker *worker = worker_ptr; struct kthread_work *work; /* * FIXME: Update the check and remove the assignment when all kthread * worker users are created using kthread_create_worker*() functions. */ WARN_ON(worker->task && worker->task != current); worker->task = current; if (worker->flags & KTW_FREEZABLE) set_freezable(); repeat: set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); raw_spin_lock_irq(&worker->lock); worker->task = NULL; raw_spin_unlock_irq(&worker->lock); return 0; } work = NULL; raw_spin_lock_irq(&worker->lock); if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } worker->current_work = work; raw_spin_unlock_irq(&worker->lock); if (work) { kthread_work_func_t func = work->func; __set_current_state(TASK_RUNNING); trace_sched_kthread_work_execute_start(work); work->func(work); /* * Avoid dereferencing work after this point. The trace * event only cares about the address. */ trace_sched_kthread_work_execute_end(work, func); } else if (!freezing(current)) { schedule(); } else { /* * Handle the case where the current remains * TASK_INTERRUPTIBLE. try_to_freeze() expects * the current to be TASK_RUNNING. */ __set_current_state(TASK_RUNNING); } try_to_freeze(); cond_resched(); goto repeat; } EXPORT_SYMBOL_GPL(kthread_worker_fn); static __printf(3, 0) struct kthread_worker * __kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], va_list args) { struct kthread_worker *worker; struct task_struct *task; worker = kzalloc(sizeof(*worker), GFP_KERNEL); if (!worker) return ERR_PTR(-ENOMEM); kthread_init_worker(worker); task = __kthread_create_on_node(kthread_worker_fn, worker, node, namefmt, args); if (IS_ERR(task)) goto fail_task; worker->flags = flags; worker->task = task; return worker; fail_task: kfree(worker); return ERR_CAST(task); } /** * kthread_create_worker_on_node - create a kthread worker * @flags: flags modifying the default behavior of the worker * @node: task structure for the thread is allocated on this node * @namefmt: printf-style name for the kthread worker (task). * * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the caller was killed by a fatal signal. */ struct kthread_worker * kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...) { struct kthread_worker *worker; va_list args; va_start(args, namefmt); worker = __kthread_create_worker_on_node(flags, node, namefmt, args); va_end(args); return worker; } EXPORT_SYMBOL(kthread_create_worker_on_node); /** * kthread_create_worker_on_cpu - create a kthread worker and bind it * to a given CPU and the associated NUMA node. * @cpu: CPU number * @flags: flags modifying the default behavior of the worker * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Use a valid CPU number if you want to bind the kthread worker * to the given CPU and the associated NUMA node. * * A good practice is to add the cpu number also into the worker name. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). * * CPU hotplug: * The kthread worker API is simple and generic. It just provides a way * to create, use, and destroy workers. * * It is up to the API user how to handle CPU hotplug. They have to decide * how to handle pending work items, prevent queuing new ones, and * restore the functionality when the CPU goes off and on. There are a * few catches: * * - CPU affinity gets lost when it is scheduled on an offline CPU. * * - The worker might not exist when the CPU was off when the user * created the workers. * * Good practice is to implement two CPU hotplug callbacks and to * destroy/create the worker when the CPU goes down/up. * * Return: * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the caller was killed by a fatal signal. */ struct kthread_worker * kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[]) { struct kthread_worker *worker; worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu); if (!IS_ERR(worker)) kthread_bind(worker->task, cpu); return worker; } EXPORT_SYMBOL(kthread_create_worker_on_cpu); /* * Returns true when the work could not be queued at the moment. * It happens when it is already pending in a worker list * or when it is being cancelled. */ static inline bool queuing_blocked(struct kthread_worker *worker, struct kthread_work *work) { lockdep_assert_held(&worker->lock); return !list_empty(&work->node) || work->canceling; } static void kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work) { lockdep_assert_held(&worker->lock); WARN_ON_ONCE(!list_empty(&work->node)); /* Do not use a work with >1 worker, see kthread_queue_work() */ WARN_ON_ONCE(work->worker && work->worker != worker); } /* insert @work before @pos in @worker */ static void kthread_insert_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) { kthread_insert_work_sanity_check(worker, work); trace_sched_kthread_work_queue_work(worker, work); list_add_tail(&work->node, pos); work->worker = worker; if (!worker->current_work && likely(worker->task))