1 1 1 1 2 1 1 1 5 1 2 2 2 2 1 1 1 1 1 1 2 1 1 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | // SPDX-License-Identifier: GPL-2.0-or-later /* * USB Synaptics device driver * * Copyright (c) 2002 Rob Miller (rob@inpharmatica . co . uk) * Copyright (c) 2003 Ron Lee (ron@debian.org) * cPad driver for kernel 2.4 * * Copyright (c) 2004 Jan Steinhoff (cpad@jan-steinhoff . de) * Copyright (c) 2004 Ron Lee (ron@debian.org) * rewritten for kernel 2.6 * * cPad display character device part is not included. It can be found at * http://jan-steinhoff.de/linux/synaptics-usb.html * * Bases on: usb_skeleton.c v2.2 by Greg Kroah-Hartman * drivers/hid/usbhid/usbmouse.c by Vojtech Pavlik * drivers/input/mouse/synaptics.c by Peter Osterlund * * Trademarks are the property of their respective owners. */ /* * There are three different types of Synaptics USB devices: Touchpads, * touchsticks (or trackpoints), and touchscreens. Touchpads are well supported * by this driver, touchstick support has not been tested much yet, and * touchscreens have not been tested at all. * * Up to three alternate settings are possible: * setting 0: one int endpoint for relative movement (used by usbhid.ko) * setting 1: one int endpoint for absolute finger position * setting 2 (cPad only): one int endpoint for absolute finger position and * two bulk endpoints for the display (in/out) * This driver uses setting 1. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/usb.h> #include <linux/input.h> #include <linux/usb/input.h> #define USB_VENDOR_ID_SYNAPTICS 0x06cb #define USB_DEVICE_ID_SYNAPTICS_TP 0x0001 /* Synaptics USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002 /* Integrated USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_CPAD 0x0003 /* Synaptics cPad */ #define USB_DEVICE_ID_SYNAPTICS_TS 0x0006 /* Synaptics TouchScreen */ #define USB_DEVICE_ID_SYNAPTICS_STICK 0x0007 /* Synaptics USB Styk */ #define USB_DEVICE_ID_SYNAPTICS_WP 0x0008 /* Synaptics USB WheelPad */ #define USB_DEVICE_ID_SYNAPTICS_COMP_TP 0x0009 /* Composite USB TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_WTP 0x0010 /* Wireless TouchPad */ #define USB_DEVICE_ID_SYNAPTICS_DPAD 0x0013 /* DisplayPad */ #define SYNUSB_TOUCHPAD (1 << 0) #define SYNUSB_STICK (1 << 1) #define SYNUSB_TOUCHSCREEN (1 << 2) #define SYNUSB_AUXDISPLAY (1 << 3) /* For cPad */ #define SYNUSB_COMBO (1 << 4) /* Composite device (TP + stick) */ #define SYNUSB_IO_ALWAYS (1 << 5) #define USB_DEVICE_SYNAPTICS(prod, kind) \ USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, \ USB_DEVICE_ID_SYNAPTICS_##prod), \ .driver_info = (kind), #define SYNUSB_RECV_SIZE 8 #define XMIN_NOMINAL 1472 #define XMAX_NOMINAL 5472 #define YMIN_NOMINAL 1408 #define YMAX_NOMINAL 4448 struct synusb { struct usb_device *udev; struct usb_interface *intf; struct urb *urb; unsigned char *data; /* serialize access to open/suspend */ struct mutex pm_mutex; bool is_open; /* input device related data structures */ struct input_dev *input; char name[128]; char phys[64]; /* characteristics of the device */ unsigned long flags; }; static void synusb_report_buttons(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; input_report_key(input_dev, BTN_LEFT, synusb->data[1] & 0x04); input_report_key(input_dev, BTN_RIGHT, synusb->data[1] & 0x01); input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x02); } static void synusb_report_stick(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; int x, y; unsigned int pressure; pressure = synusb->data[6]; x = (s16)(be16_to_cpup((__be16 *)&synusb->data[2]) << 3) >> 7; y = (s16)(be16_to_cpup((__be16 *)&synusb->data[4]) << 3) >> 7; if (pressure > 0) { input_report_rel(input_dev, REL_X, x); input_report_rel(input_dev, REL_Y, -y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); synusb_report_buttons(synusb); input_sync(input_dev); } static void synusb_report_touchpad(struct synusb *synusb) { struct input_dev *input_dev = synusb->input; unsigned int num_fingers, tool_width; unsigned int x, y; unsigned int pressure, w; pressure = synusb->data[6]; x = be16_to_cpup((__be16 *)&synusb->data[2]); y = be16_to_cpup((__be16 *)&synusb->data[4]); w = synusb->data[0] & 0x0f; if (pressure > 0) { num_fingers = 1; tool_width = 5; switch (w) { case 0 ... 1: num_fingers = 2 + w; break; case 2: /* pen, pretend its a finger */ break; case 4 ... 15: tool_width = w; break; } } else { num_fingers = 0; tool_width = 0; } /* * Post events * BTN_TOUCH has to be first as mousedev relies on it when doing * absolute -> relative conversion */ if (pressure > 30) input_report_key(input_dev, BTN_TOUCH, 1); if (pressure < 25) input_report_key(input_dev, BTN_TOUCH, 0); if (num_fingers > 0) { input_report_abs(input_dev, ABS_X, x); input_report_abs(input_dev, ABS_Y, YMAX_NOMINAL + YMIN_NOMINAL - y); } input_report_abs(input_dev, ABS_PRESSURE, pressure); input_report_abs(input_dev, ABS_TOOL_WIDTH, tool_width); input_report_key(input_dev, BTN_TOOL_FINGER, num_fingers == 1); input_report_key(input_dev, BTN_TOOL_DOUBLETAP, num_fingers == 2); input_report_key(input_dev, BTN_TOOL_TRIPLETAP, num_fingers == 3); synusb_report_buttons(synusb); if (synusb->flags & SYNUSB_AUXDISPLAY) input_report_key(input_dev, BTN_MIDDLE, synusb->data[1] & 0x08); input_sync(input_dev); } static void synusb_irq(struct urb *urb) { struct synusb *synusb = urb->context; int error; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: usb_mark_last_busy(synusb->udev); break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; break; } if (synusb->flags & SYNUSB_STICK) synusb_report_stick(synusb); else synusb_report_touchpad(synusb); resubmit: error = usb_submit_urb(urb, GFP_ATOMIC); if (error && error != -EPERM) dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed with result: %d", __func__, error); } static struct usb_endpoint_descriptor * synusb_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } static int synusb_open(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int retval; retval = usb_autopm_get_interface(synusb->intf); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_autopm_get_interface failed, error: %d\n", __func__, retval); return retval; } mutex_lock(&synusb->pm_mutex); retval = usb_submit_urb(synusb->urb, GFP_KERNEL); if (retval) { dev_err(&synusb->intf->dev, "%s - usb_submit_urb failed, error: %d\n", __func__, retval); retval = -EIO; goto out; } synusb->intf->needs_remote_wakeup = 1; synusb->is_open = true; out: mutex_unlock(&synusb->pm_mutex); usb_autopm_put_interface(synusb->intf); return retval; } static void synusb_close(struct input_dev *dev) { struct synusb *synusb = input_get_drvdata(dev); int autopm_error; autopm_error = usb_autopm_get_interface(synusb->intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); synusb->intf->needs_remote_wakeup = 0; synusb->is_open = false; mutex_unlock(&synusb->pm_mutex); if (!autopm_error) usb_autopm_put_interface(synusb->intf); } static int synusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *ep; struct synusb *synusb; struct input_dev *input_dev; unsigned int intf_num = intf->cur_altsetting->desc.bInterfaceNumber; unsigned int altsetting = min(intf->num_altsetting, 1U); int error; error = usb_set_interface(udev, intf_num, altsetting); if (error) { dev_err(&udev->dev, "Can not set alternate setting to %i, error: %i", altsetting, error); return error; } ep = synusb_get_in_endpoint(intf->cur_altsetting); if (!ep) return -ENODEV; synusb = kzalloc(sizeof(*synusb), GFP_KERNEL); input_dev = input_allocate_device(); if (!synusb || !input_dev) { error = -ENOMEM; goto err_free_mem; } synusb->udev = udev; synusb->intf = intf; synusb->input = input_dev; mutex_init(&synusb->pm_mutex); synusb->flags = id->driver_info; if (synusb->flags & SYNUSB_COMBO) { /* * This is a combo device, we need to set proper * capability, depending on the interface. */ synusb->flags |= intf_num == 1 ? SYNUSB_STICK : SYNUSB_TOUCHPAD; } synusb->urb = usb_alloc_urb(0, GFP_KERNEL); if (!synusb->urb) { error = -ENOMEM; goto err_free_mem; } synusb->data = usb_alloc_coherent(udev, SYNUSB_RECV_SIZE, GFP_KERNEL, &synusb->urb->transfer_dma); if (!synusb->data) { error = -ENOMEM; goto err_free_urb; } usb_fill_int_urb(synusb->urb, udev, usb_rcvintpipe(udev, ep->bEndpointAddress), synusb->data, SYNUSB_RECV_SIZE, synusb_irq, synusb, ep->bInterval); synusb->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (udev->manufacturer) strscpy(synusb->name, udev->manufacturer, sizeof(synusb->name)); if (udev->product) { if (udev->manufacturer) strlcat(synusb->name, " ", sizeof(synusb->name)); strlcat(synusb->name, udev->product, sizeof(synusb->name)); } if (!strlen(synusb->name)) snprintf(synusb->name, sizeof(synusb->name), "USB Synaptics Device %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); if (synusb->flags & SYNUSB_STICK) strlcat(synusb->name, " (Stick)", sizeof(synusb->name)); usb_make_path(udev, synusb->phys, sizeof(synusb->phys)); strlcat(synusb->phys, "/input0", sizeof(synusb->phys)); input_dev->name = synusb->name; input_dev->phys = synusb->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &synusb->intf->dev; if (!(synusb->flags & SYNUSB_IO_ALWAYS)) { input_dev->open = synusb_open; input_dev->close = synusb_close; } input_set_drvdata(input_dev, synusb); __set_bit(EV_ABS, input_dev->evbit); __set_bit(EV_KEY, input_dev->evbit); if (synusb->flags & SYNUSB_STICK) { __set_bit(EV_REL, input_dev->evbit); __set_bit(REL_X, input_dev->relbit); __set_bit(REL_Y, input_dev->relbit); __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); } else { input_set_abs_params(input_dev, ABS_X, XMIN_NOMINAL, XMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_Y, YMIN_NOMINAL, YMAX_NOMINAL, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, 0, 255, 0, 0); input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, 15, 0, 0); __set_bit(BTN_TOUCH, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); } if (synusb->flags & SYNUSB_TOUCHSCREEN) __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); else __set_bit(INPUT_PROP_POINTER, input_dev->propbit); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); usb_set_intfdata(intf, synusb); if (synusb->flags & SYNUSB_IO_ALWAYS) { error = synusb_open(input_dev); if (error) goto err_free_dma; } error = input_register_device(input_dev); if (error) { dev_err(&udev->dev, "Failed to register input device, error %d\n", error); goto err_stop_io; } return 0; err_stop_io: if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); err_free_dma: usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); err_free_urb: usb_free_urb(synusb->urb); err_free_mem: input_free_device(input_dev); kfree(synusb); usb_set_intfdata(intf, NULL); return error; } static void synusb_disconnect(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); struct usb_device *udev = interface_to_usbdev(intf); if (synusb->flags & SYNUSB_IO_ALWAYS) synusb_close(synusb->input); input_unregister_device(synusb->input); usb_free_coherent(udev, SYNUSB_RECV_SIZE, synusb->data, synusb->urb->transfer_dma); usb_free_urb(synusb->urb); kfree(synusb); usb_set_intfdata(intf, NULL); } static int synusb_suspend(struct usb_interface *intf, pm_message_t message) { struct synusb *synusb = usb_get_intfdata(intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); mutex_unlock(&synusb->pm_mutex); return 0; } static int synusb_resume(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); int retval = 0; mutex_lock(&synusb->pm_mutex); if ((synusb->is_open || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&synusb->pm_mutex); return retval; } static int synusb_pre_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); mutex_lock(&synusb->pm_mutex); usb_kill_urb(synusb->urb); return 0; } static int synusb_post_reset(struct usb_interface *intf) { struct synusb *synusb = usb_get_intfdata(intf); int retval = 0; if ((synusb->is_open || (synusb->flags & SYNUSB_IO_ALWAYS)) && usb_submit_urb(synusb->urb, GFP_NOIO) < 0) { retval = -EIO; } mutex_unlock(&synusb->pm_mutex); return retval; } static int synusb_reset_resume(struct usb_interface *intf) { return synusb_resume(intf); } static const struct usb_device_id synusb_idtable[] = { { USB_DEVICE_SYNAPTICS(TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(INT_TP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(CPAD, SYNUSB_TOUCHPAD | SYNUSB_AUXDISPLAY | SYNUSB_IO_ALWAYS) }, { USB_DEVICE_SYNAPTICS(TS, SYNUSB_TOUCHSCREEN) }, { USB_DEVICE_SYNAPTICS(STICK, SYNUSB_STICK) }, { USB_DEVICE_SYNAPTICS(WP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(COMP_TP, SYNUSB_COMBO) }, { USB_DEVICE_SYNAPTICS(WTP, SYNUSB_TOUCHPAD) }, { USB_DEVICE_SYNAPTICS(DPAD, SYNUSB_TOUCHPAD) }, { } }; MODULE_DEVICE_TABLE(usb, synusb_idtable); static struct usb_driver synusb_driver = { .name = "synaptics_usb", .probe = synusb_probe, .disconnect = synusb_disconnect, .id_table = synusb_idtable, .suspend = synusb_suspend, .resume = synusb_resume, .pre_reset = synusb_pre_reset, .post_reset = synusb_post_reset, .reset_resume = synusb_reset_resume, .supports_autosuspend = 1, }; module_usb_driver(synusb_driver); MODULE_AUTHOR("Rob Miller <rob@inpharmatica.co.uk>, " "Ron Lee <ron@debian.org>, " "Jan Steinhoff <cpad@jan-steinhoff.de>"); MODULE_DESCRIPTION("Synaptics USB device driver"); MODULE_LICENSE("GPL"); |
1 1 147 12 5 130 3 10 80 2 17 17 5 83 22 25 113 3 8 16 16 26 17 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include <net/genetlink.h> #include <net/sock.h> #include "devl_internal.h" static const struct genl_multicast_group devlink_nl_mcgrps[] = { [DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME }, }; int devlink_nl_put_nested_handle(struct sk_buff *msg, struct net *net, struct devlink *devlink, int attrtype) { struct nlattr *nested_attr; struct net *devl_net; nested_attr = nla_nest_start(msg, attrtype); if (!nested_attr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; rcu_read_lock(); devl_net = read_pnet_rcu(&devlink->_net); if (!net_eq(net, devl_net)) { int id = peernet2id_alloc(net, devl_net, GFP_ATOMIC); rcu_read_unlock(); if (nla_put_s32(msg, DEVLINK_ATTR_NETNS_ID, id)) return -EMSGSIZE; } else { rcu_read_unlock(); } nla_nest_end(msg, nested_attr); return 0; nla_put_failure: nla_nest_cancel(msg, nested_attr); return -EMSGSIZE; } int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info) { int err; if (*msg) { err = genlmsg_reply(*msg, info); if (err) return err; } *msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!*msg) return -ENOMEM; return 0; } struct devlink * devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs) { struct devlink *devlink; unsigned long index; char *busname; char *devname; if (!attrs[DEVLINK_ATTR_BUS_NAME] || !attrs[DEVLINK_ATTR_DEV_NAME]) return ERR_PTR(-EINVAL); busname = nla_data(attrs[DEVLINK_ATTR_BUS_NAME]); devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]); devlinks_xa_for_each_registered_get(net, index, devlink) { devl_lock(devlink); if (devl_is_registered(devlink) && strcmp(devlink->dev->bus->name, busname) == 0 && strcmp(dev_name(devlink->dev), devname) == 0) return devlink; devl_unlock(devlink); devlink_put(devlink); } return ERR_PTR(-ENODEV); } static int __devlink_nl_pre_doit(struct sk_buff *skb, struct genl_info *info, u8 flags) { struct devlink_port *devlink_port; struct devlink *devlink; int err; devlink = devlink_get_from_attrs_lock(genl_info_net(info), info->attrs); if (IS_ERR(devlink)) return PTR_ERR(devlink); info->user_ptr[0] = devlink; if (flags & DEVLINK_NL_FLAG_NEED_PORT) { devlink_port = devlink_port_get_from_info(devlink, info); if (IS_ERR(devlink_port)) { err = PTR_ERR(devlink_port); goto unlock; } info->user_ptr[1] = devlink_port; } else if (flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) { devlink_port = devlink_port_get_from_info(devlink, info); if (!IS_ERR(devlink_port)) info->user_ptr[1] = devlink_port; } return 0; unlock: devl_unlock(devlink); devlink_put(devlink); return err; } int devlink_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { return __devlink_nl_pre_doit(skb, info, 0); } int devlink_nl_pre_doit_port(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { return __devlink_nl_pre_doit(skb, info, DEVLINK_NL_FLAG_NEED_PORT); } int devlink_nl_pre_doit_port_optional(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { return __devlink_nl_pre_doit(skb, info, DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT); } void devlink_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink; devlink = info->user_ptr[0]; devl_unlock(devlink); devlink_put(devlink); } static int devlink_nl_inst_single_dumpit(struct sk_buff *msg, struct netlink_callback *cb, int flags, devlink_nl_dump_one_func_t *dump_one, struct nlattr **attrs) { struct devlink *devlink; int err; devlink = devlink_get_from_attrs_lock(sock_net(msg->sk), attrs); if (IS_ERR(devlink)) return PTR_ERR(devlink); err = dump_one(msg, devlink, cb, flags | NLM_F_DUMP_FILTERED); devl_unlock(devlink); devlink_put(devlink); if (err != -EMSGSIZE) return err; return msg->len; } static int devlink_nl_inst_iter_dumpit(struct sk_buff *msg, struct netlink_callback *cb, int flags, devlink_nl_dump_one_func_t *dump_one) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink *devlink; int err = 0; while ((devlink = devlinks_xa_find_get(sock_net(msg->sk), &state->instance))) { devl_lock(devlink); if (devl_is_registered(devlink)) err = dump_one(msg, devlink, cb, flags); else err = 0; devl_unlock(devlink); devlink_put(devlink); if (err) break; state->instance++; /* restart sub-object walk for the next instance */ state->idx = 0; } if (err != -EMSGSIZE) return err; return msg->len; } int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb, devlink_nl_dump_one_func_t *dump_one) { const struct genl_info *info = genl_info_dump(cb); struct nlattr **attrs = info->attrs; int flags = NLM_F_MULTI; if (attrs && (attrs[DEVLINK_ATTR_BUS_NAME] || attrs[DEVLINK_ATTR_DEV_NAME])) return devlink_nl_inst_single_dumpit(msg, cb, flags, dump_one, attrs); else return devlink_nl_inst_iter_dumpit(msg, cb, flags, dump_one); } struct genl_family devlink_nl_family __ro_after_init = { .name = DEVLINK_GENL_NAME, .version = DEVLINK_GENL_VERSION, .netnsok = true, .parallel_ops = true, .module = THIS_MODULE, .split_ops = devlink_nl_ops, .n_split_ops = ARRAY_SIZE(devlink_nl_ops), .resv_start_op = DEVLINK_CMD_SELFTESTS_RUN + 1, .mcgrps = devlink_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps), }; |
2 2 17 20 17 15 28 25 28 5 24 3 26 16 13 16 12 24 12 8 8 8 2 8 5 5 7 1 3 2 1 195 99 185 385 86 355 1 1 1 1 324 407 407 406 403 405 406 406 1 1 404 425 421 342 341 61 60 56 56 406 406 51 52 52 44 2 42 427 427 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 | // SPDX-License-Identifier: GPL-2.0 /* * drivers/usb/core/usb.c * * (C) Copyright Linus Torvalds 1999 * (C) Copyright Johannes Erdfelt 1999-2001 * (C) Copyright Andreas Gal 1999 * (C) Copyright Gregory P. Smith 1999 * (C) Copyright Deti Fliegl 1999 (new USB architecture) * (C) Copyright Randy Dunlap 2000 * (C) Copyright David Brownell 2000-2004 * (C) Copyright Yggdrasil Computing, Inc. 2000 * (usb_device_id matching changes by Adam J. Richter) * (C) Copyright Greg Kroah-Hartman 2002-2003 * * Released under the GPLv2 only. * * NOTE! This is not actually a driver at all, rather this is * just a collection of helper routines that implement the * generic USB things that the real drivers can use.. * * Think of this as a "USB library" rather than anything else, * with no callbacks. Callbacks are evil. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/of.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/usb/of.h> #include <asm/io.h> #include <linux/scatterlist.h> #include <linux/mm.h> #include <linux/dma-mapping.h> #include "hub.h" const char *usbcore_name = "usbcore"; static bool nousb; /* Disable USB when built into kernel image */ module_param(nousb, bool, 0444); /* * for external read access to <nousb> */ int usb_disabled(void) { return nousb; } EXPORT_SYMBOL_GPL(usb_disabled); #ifdef CONFIG_PM /* Default delay value, in seconds */ static int usb_autosuspend_delay = CONFIG_USB_AUTOSUSPEND_DELAY; module_param_named(autosuspend, usb_autosuspend_delay, int, 0644); MODULE_PARM_DESC(autosuspend, "default autosuspend delay"); #else #define usb_autosuspend_delay 0 #endif static bool match_endpoint(struct usb_endpoint_descriptor *epd, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { switch (usb_endpoint_type(epd)) { case USB_ENDPOINT_XFER_BULK: if (usb_endpoint_dir_in(epd)) { if (bulk_in && !*bulk_in) { *bulk_in = epd; break; } } else { if (bulk_out && !*bulk_out) { *bulk_out = epd; break; } } return false; case USB_ENDPOINT_XFER_INT: if (usb_endpoint_dir_in(epd)) { if (int_in && !*int_in) { *int_in = epd; break; } } else { if (int_out && !*int_out) { *int_out = epd; break; } } return false; default: return false; } return (!bulk_in || *bulk_in) && (!bulk_out || *bulk_out) && (!int_in || *int_in) && (!int_out || *int_out); } /** * usb_find_common_endpoints() -- look up common endpoint descriptors * @alt: alternate setting to search * @bulk_in: pointer to descriptor pointer, or NULL * @bulk_out: pointer to descriptor pointer, or NULL * @int_in: pointer to descriptor pointer, or NULL * @int_out: pointer to descriptor pointer, or NULL * * Search the alternate setting's endpoint descriptors for the first bulk-in, * bulk-out, interrupt-in and interrupt-out endpoints and return them in the * provided pointers (unless they are NULL). * * If a requested endpoint is not found, the corresponding pointer is set to * NULL. * * Return: Zero if all requested descriptors were found, or -ENXIO otherwise. */ int usb_find_common_endpoints(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { struct usb_endpoint_descriptor *epd; int i; if (bulk_in) *bulk_in = NULL; if (bulk_out) *bulk_out = NULL; if (int_in) *int_in = NULL; if (int_out) *int_out = NULL; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { epd = &alt->endpoint[i].desc; if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out)) return 0; } return -ENXIO; } EXPORT_SYMBOL_GPL(usb_find_common_endpoints); /** * usb_find_common_endpoints_reverse() -- look up common endpoint descriptors * @alt: alternate setting to search * @bulk_in: pointer to descriptor pointer, or NULL * @bulk_out: pointer to descriptor pointer, or NULL * @int_in: pointer to descriptor pointer, or NULL * @int_out: pointer to descriptor pointer, or NULL * * Search the alternate setting's endpoint descriptors for the last bulk-in, * bulk-out, interrupt-in and interrupt-out endpoints and return them in the * provided pointers (unless they are NULL). * * If a requested endpoint is not found, the corresponding pointer is set to * NULL. * * Return: Zero if all requested descriptors were found, or -ENXIO otherwise. */ int usb_find_common_endpoints_reverse(struct usb_host_interface *alt, struct usb_endpoint_descriptor **bulk_in, struct usb_endpoint_descriptor **bulk_out, struct usb_endpoint_descriptor **int_in, struct usb_endpoint_descriptor **int_out) { struct usb_endpoint_descriptor *epd; int i; if (bulk_in) *bulk_in = NULL; if (bulk_out) *bulk_out = NULL; if (int_in) *int_in = NULL; if (int_out) *int_out = NULL; for (i = alt->desc.bNumEndpoints - 1; i >= 0; --i) { epd = &alt->endpoint[i].desc; if (match_endpoint(epd, bulk_in, bulk_out, int_in, int_out)) return 0; } return -ENXIO; } EXPORT_SYMBOL_GPL(usb_find_common_endpoints_reverse); /** * usb_find_endpoint() - Given an endpoint address, search for the endpoint's * usb_host_endpoint structure in an interface's current altsetting. * @intf: the interface whose current altsetting should be searched * @ep_addr: the endpoint address (number and direction) to find * * Search the altsetting's list of endpoints for one with the specified address. * * Return: Pointer to the usb_host_endpoint if found, %NULL otherwise. */ static const struct usb_host_endpoint *usb_find_endpoint( const struct usb_interface *intf, unsigned int ep_addr) { int n; const struct usb_host_endpoint *ep; n = intf->cur_altsetting->desc.bNumEndpoints; ep = intf->cur_altsetting->endpoint; for (; n > 0; (--n, ++ep)) { if (ep->desc.bEndpointAddress == ep_addr) return ep; } return NULL; } /** * usb_check_bulk_endpoints - Check whether an interface's current altsetting * contains a set of bulk endpoints with the given addresses. * @intf: the interface whose current altsetting should be searched * @ep_addrs: 0-terminated array of the endpoint addresses (number and * direction) to look for * * Search for endpoints with the specified addresses and check their types. * * Return: %true if all the endpoints are found and are bulk, %false otherwise. */ bool usb_check_bulk_endpoints( const struct usb_interface *intf, const u8 *ep_addrs) { const struct usb_host_endpoint *ep; for (; *ep_addrs; ++ep_addrs) { ep = usb_find_endpoint(intf, *ep_addrs); if (!ep || !usb_endpoint_xfer_bulk(&ep->desc)) return false; } return true; } EXPORT_SYMBOL_GPL(usb_check_bulk_endpoints); /** * usb_check_int_endpoints - Check whether an interface's current altsetting * contains a set of interrupt endpoints with the given addresses. * @intf: the interface whose current altsetting should be searched * @ep_addrs: 0-terminated array of the endpoint addresses (number and * direction) to look for * * Search for endpoints with the specified addresses and check their types. * * Return: %true if all the endpoints are found and are interrupt, * %false otherwise. */ bool usb_check_int_endpoints( const struct usb_interface *intf, const u8 *ep_addrs) { const struct usb_host_endpoint *ep; for (; *ep_addrs; ++ep_addrs) { ep = usb_find_endpoint(intf, *ep_addrs); if (!ep || !usb_endpoint_xfer_int(&ep->desc)) return false; } return true; } EXPORT_SYMBOL_GPL(usb_check_int_endpoints); /** * usb_find_alt_setting() - Given a configuration, find the alternate setting * for the given interface. * @config: the configuration to search (not necessarily the current config). * @iface_num: interface number to search in * @alt_num: alternate interface setting number to search for. * * Search the configuration's interface cache for the given alt setting. * * Return: The alternate setting, if found. %NULL otherwise. */ struct usb_host_interface *usb_find_alt_setting( struct usb_host_config *config, unsigned int iface_num, unsigned int alt_num) { struct usb_interface_cache *intf_cache = NULL; int i; if (!config) return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) { if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber == iface_num) { intf_cache = config->intf_cache[i]; break; } } if (!intf_cache) return NULL; for (i = 0; i < intf_cache->num_altsetting; i++) if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num) return &intf_cache->altsetting[i]; printk(KERN_DEBUG "Did not find alt setting %u for intf %u, " "config %u\n", alt_num, iface_num, config->desc.bConfigurationValue); return NULL; } EXPORT_SYMBOL_GPL(usb_find_alt_setting); /** * usb_ifnum_to_if - get the interface object with a given interface number * @dev: the device whose current configuration is considered * @ifnum: the desired interface * * This walks the device descriptor for the currently active configuration * to find the interface object with the particular interface number. * * Note that configuration descriptors are not required to assign interface * numbers sequentially, so that it would be incorrect to assume that * the first interface in that descriptor corresponds to interface zero. * This routine helps device drivers avoid such mistakes. * However, you should make sure that you do the right thing with any * alternate settings available for this interfaces. * * Don't call this function unless you are bound to one of the interfaces * on this device or you have locked the device! * * Return: A pointer to the interface that has @ifnum as interface number, * if found. %NULL otherwise. */ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev, unsigned ifnum) { struct usb_host_config *config = dev->actconfig; int i; if (!config) return NULL; for (i = 0; i < config->desc.bNumInterfaces; i++) if (config->interface[i]->altsetting[0] .desc.bInterfaceNumber == ifnum) return config->interface[i]; return NULL; } EXPORT_SYMBOL_GPL(usb_ifnum_to_if); /** * usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number. * @intf: the interface containing the altsetting in question * @altnum: the desired alternate setting number * * This searches the altsetting array of the specified interface for * an entry with the correct bAlternateSetting value. * * Note that altsettings need not be stored sequentially by number, so * it would be incorrect to assume that the first altsetting entry in * the array corresponds to altsetting zero. This routine helps device * drivers avoid such mistakes. * * Don't call this function unless you are bound to the intf interface * or you have locked the device! * * Return: A pointer to the entry of the altsetting array of @intf that * has @altnum as the alternate setting number. %NULL if not found. */ struct usb_host_interface *usb_altnum_to_altsetting( const struct usb_interface *intf, unsigned int altnum) { int i; for (i = 0; i < intf->num_altsetting; i++) { if (intf->altsetting[i].desc.bAlternateSetting == altnum) return &intf->altsetting[i]; } return NULL; } EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); struct find_interface_arg { int minor; struct device_driver *drv; }; static int __find_interface(struct device *dev, const void *data) { const struct find_interface_arg *arg = data; struct usb_interface *intf; if (!is_usb_interface(dev)) return 0; if (dev->driver != arg->drv) return 0; intf = to_usb_interface(dev); return intf->minor == arg->minor; } /** * usb_find_interface - find usb_interface pointer for driver and device * @drv: the driver whose current configuration is considered * @minor: the minor number of the desired device * * This walks the bus device list and returns a pointer to the interface * with the matching minor and driver. Note, this only works for devices * that share the USB major number. * * Return: A pointer to the interface with the matching major and @minor. */ struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) { struct find_interface_arg argb; struct device *dev; argb.minor = minor; argb.drv = &drv->drvwrap.driver; dev = bus_find_device(&usb_bus_type, NULL, &argb, __find_interface); /* Drop reference count from bus_find_device */ put_device(dev); return dev ? to_usb_interface(dev) : NULL; } EXPORT_SYMBOL_GPL(usb_find_interface); struct each_dev_arg { void *data; int (*fn)(struct usb_device *, void *); }; static int __each_dev(struct device *dev, void *data) { struct each_dev_arg *arg = (struct each_dev_arg *)data; /* There are struct usb_interface on the same bus, filter them out */ if (!is_usb_device(dev)) return 0; return arg->fn(to_usb_device(dev), arg->data); } /** * usb_for_each_dev - iterate over all USB devices in the system * @data: data pointer that will be handed to the callback function * @fn: callback function to be called for each USB device * * Iterate over all USB devices and call @fn for each, passing it @data. If it * returns anything other than 0, we break the iteration prematurely and return * that value. */ int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *)) { struct each_dev_arg arg = {data, fn}; return bus_for_each_dev(&usb_bus_type, NULL, &arg, __each_dev); } EXPORT_SYMBOL_GPL(usb_for_each_dev); /** * usb_release_dev - free a usb device structure when all users of it are finished. * @dev: device that's been disconnected * * Will be called only by the device core when all users of this usb device are * done. */ static void usb_release_dev(struct device *dev) { struct usb_device *udev; struct usb_hcd *hcd; udev = to_usb_device(dev); hcd = bus_to_hcd(udev->bus); usb_destroy_configuration(udev); usb_release_bos_descriptor(udev); of_node_put(dev->of_node); usb_put_hcd(hcd); kfree(udev->product); kfree(udev->manufacturer); kfree(udev->serial); kfree(udev); } static int usb_dev_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_device *usb_dev; usb_dev = to_usb_device(dev); if (add_uevent_var(env, "BUSNUM=%03d", usb_dev->bus->busnum)) return -ENOMEM; if (add_uevent_var(env, "DEVNUM=%03d", usb_dev->devnum)) return -ENOMEM; return 0; } #ifdef CONFIG_PM /* USB device Power-Management thunks. * There's no need to distinguish here between quiescing a USB device * and powering it down; the generic_suspend() routine takes care of * it by skipping the usb_port_suspend() call for a quiesce. And for * USB interfaces there's no difference at all. */ static int usb_dev_prepare(struct device *dev) { return 0; /* Implement eventually? */ } static void usb_dev_complete(struct device *dev) { /* Currently used only for rebinding interfaces */ usb_resume_complete(dev); } static int usb_dev_suspend(struct device *dev) { return usb_suspend(dev, PMSG_SUSPEND); } static int usb_dev_resume(struct device *dev) { return usb_resume(dev, PMSG_RESUME); } static int usb_dev_freeze(struct device *dev) { return usb_suspend(dev, PMSG_FREEZE); } static int usb_dev_thaw(struct device *dev) { return usb_resume(dev, PMSG_THAW); } static int usb_dev_poweroff(struct device *dev) { return usb_suspend(dev, PMSG_HIBERNATE); } static int usb_dev_restore(struct device *dev) { return usb_resume(dev, PMSG_RESTORE); } static const struct dev_pm_ops usb_device_pm_ops = { .prepare = usb_dev_prepare, .complete = usb_dev_complete, .suspend = usb_dev_suspend, .resume = usb_dev_resume, .freeze = usb_dev_freeze, .thaw = usb_dev_thaw, .poweroff = usb_dev_poweroff, .restore = usb_dev_restore, .runtime_suspend = usb_runtime_suspend, .runtime_resume = usb_runtime_resume, .runtime_idle = usb_runtime_idle, }; #endif /* CONFIG_PM */ static char *usb_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) { const struct usb_device *usb_dev; usb_dev = to_usb_device(dev); return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d", usb_dev->bus->busnum, usb_dev->devnum); } struct device_type usb_device_type = { .name = "usb_device", .release = usb_release_dev, .uevent = usb_dev_uevent, .devnode = usb_devnode, #ifdef CONFIG_PM .pm = &usb_device_pm_ops, #endif }; static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd) { struct usb_hub *hub; if (!dev->parent) return true; /* Root hub always ok [and always wired] */ switch (hcd->dev_policy) { case USB_DEVICE_AUTHORIZE_NONE: default: return false; case USB_DEVICE_AUTHORIZE_ALL: return true; case USB_DEVICE_AUTHORIZE_INTERNAL: hub = usb_hub_to_struct_hub(dev->parent); return hub->ports[dev->portnum - 1]->connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED; } } /** * usb_alloc_dev - usb device constructor (usbcore-internal) * @parent: hub to which device is connected; null to allocate a root hub * @bus: bus used to access the device * @port1: one-based index of port; ignored for root hubs * * Context: task context, might sleep. * * Only hub drivers (including virtual root hub drivers for host * controllers) should ever call this. * * This call may not be used in a non-sleeping context. * * Return: On success, a pointer to the allocated usb device. %NULL on * failure. */ struct usb_device *usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) { struct usb_device *dev; struct usb_hcd *usb_hcd = bus_to_hcd(bus); unsigned raw_port = port1; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; if (!usb_get_hcd(usb_hcd)) { kfree(dev); return NULL; } /* Root hubs aren't true devices, so don't allocate HCD resources */ if (usb_hcd->driver->alloc_dev && parent && !usb_hcd->driver->alloc_dev(usb_hcd, dev)) { usb_put_hcd(bus_to_hcd(bus)); kfree(dev); return NULL; } device_initialize(&dev->dev); dev->dev.bus = &usb_bus_type; dev->dev.type = &usb_device_type; dev->dev.groups = usb_device_groups; set_dev_node(&dev->dev, dev_to_node(bus->sysdev)); dev->state = USB_STATE_ATTACHED; dev->lpm_disable_count = 1; atomic_set(&dev->urbnum, 0); INIT_LIST_HEAD(&dev->ep0.urb_list); dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE; dev->ep0.desc.bDescriptorType = USB_DT_ENDPOINT; /* ep0 maxpacket comes later, from device descriptor */ usb_enable_endpoint(dev, &dev->ep0, false); dev->can_submit = 1; /* Save readable and stable topology id, distinguishing devices * by location for diagnostics, tools, driver model, etc. The * string is a path along hub ports, from the root. Each device's * dev->devpath will be stable until USB is re-cabled, and hubs * are often labeled with these port numbers. The name isn't * as stable: bus->busnum changes easily from modprobe order, * cardbus or pci hotplugging, and so on. */ if (unlikely(!parent)) { dev->devpath[0] = '0'; dev->route = 0; dev->dev.parent = bus->controller; device_set_of_node_from_dev(&dev->dev, bus->sysdev); dev_set_name(&dev->dev, "usb%d", bus->busnum); } else { /* match any labeling on the hubs; it's one-based */ if (parent->devpath[0] == '0') { snprintf(dev->devpath, sizeof dev->devpath, "%d", port1); /* Root ports are not counted in route string */ dev->route = 0; } else { snprintf(dev->devpath, sizeof dev->devpath, "%s.%d", parent->devpath, port1); /* Route string assumes hubs have less than 16 ports */ if (port1 < 15) dev->route = parent->route + (port1 << ((parent->level - 1)*4)); else dev->route = parent->route + (15 << ((parent->level - 1)*4)); } dev->dev.parent = &parent->dev; dev_set_name(&dev->dev, "%d-%s", bus->busnum, dev->devpath); if (!parent->parent) { /* device under root hub's port */ raw_port = usb_hcd_find_raw_port_number(usb_hcd, port1); } dev->dev.of_node = usb_of_get_device_node(parent, raw_port); /* hub driver sets up TT records */ } dev->portnum = port1; dev->bus = bus; dev->parent = parent; INIT_LIST_HEAD(&dev->filelist); #ifdef CONFIG_PM pm_runtime_set_autosuspend_delay(&dev->dev, usb_autosuspend_delay * 1000); dev->connect_time = jiffies; dev->active_duration = -jiffies; #endif dev->authorized = usb_dev_authorized(dev, usb_hcd); return dev; } EXPORT_SYMBOL_GPL(usb_alloc_dev); /** * usb_get_dev - increments the reference count of the usb device structure * @dev: the device being referenced * * Each live reference to a device should be refcounted. * * Drivers for USB interfaces should normally record such references in * their probe() methods, when they bind to an interface, and release * them by calling usb_put_dev(), in their disconnect() methods. * However, if a driver does not access the usb_device structure after * its disconnect() method returns then refcounting is not necessary, * because the USB core guarantees that a usb_device will not be * deallocated until after all of its interface drivers have been unbound. * * Return: A pointer to the device with the incremented reference counter. */ struct usb_device *usb_get_dev(struct usb_device *dev) { if (dev) get_device(&dev->dev); return dev; } EXPORT_SYMBOL_GPL(usb_get_dev); /** * usb_put_dev - release a use of the usb device structure * @dev: device that's been disconnected * * Must be called when a user of a device is finished with it. When the last * user of the device calls this function, the memory of the device is freed. */ void usb_put_dev(struct usb_device *dev) { if (dev) put_device(&dev->dev); } EXPORT_SYMBOL_GPL(usb_put_dev); /** * usb_get_intf - increments the reference count of the usb interface structure * @intf: the interface being referenced * * Each live reference to a interface must be refcounted. * * Drivers for USB interfaces should normally record such references in * their probe() methods, when they bind to an interface, and release * them by calling usb_put_intf(), in their disconnect() methods. * However, if a driver does not access the usb_interface structure after * its disconnect() method returns then refcounting is not necessary, * because the USB core guarantees that a usb_interface will not be * deallocated until after its driver has been unbound. * * Return: A pointer to the interface with the incremented reference counter. */ struct usb_interface *usb_get_intf(struct usb_interface *intf) { if (intf) get_device(&intf->dev); return intf; } EXPORT_SYMBOL_GPL(usb_get_intf); /** * usb_put_intf - release a use of the usb interface structure * @intf: interface that's been decremented * * Must be called when a user of an interface is finished with it. When the * last user of the interface calls this function, the memory of the interface * is freed. */ void usb_put_intf(struct usb_interface *intf) { if (intf) put_device(&intf->dev); } EXPORT_SYMBOL_GPL(usb_put_intf); /** * usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint * @intf: the usb interface * * While a USB device cannot perform DMA operations by itself, many USB * controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint * for the given USB interface, if any. The returned device structure must be * released with put_device(). * * See also usb_get_dma_device(). * * Returns: A reference to the usb interface's DMA endpoint; or NULL if none * exists. */ struct device *usb_intf_get_dma_device(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct device *dmadev; if (!udev->bus) return NULL; dmadev = get_device(udev->bus->sysdev); if (!dmadev || !dmadev->dma_mask) { put_device(dmadev); return NULL; } return dmadev; } EXPORT_SYMBOL_GPL(usb_intf_get_dma_device); /* USB device locking * * USB devices and interfaces are locked using the semaphore in their * embedded struct device. The hub driver guarantees that whenever a * device is connected or disconnected, drivers are called with the * USB device locked as well as their particular interface. * * Complications arise when several devices are to be locked at the same * time. Only hub-aware drivers that are part of usbcore ever have to * do this; nobody else needs to worry about it. The rule for locking * is simple: * * When locking both a device and its parent, always lock the * parent first. */ /** * usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure * @udev: device that's being locked * @iface: interface bound to the driver making the request (optional) * * Attempts to acquire the device lock, but fails if the device is * NOTATTACHED or SUSPENDED, or if iface is specified and the interface * is neither BINDING nor BOUND. Rather than sleeping to wait for the * lock, the routine polls repeatedly. This is to prevent deadlock with * disconnect; in some drivers (such as usb-storage) the disconnect() * or suspend() method will block waiting for a device reset to complete. * * Return: A negative error code for failure, otherwise 0. */ int usb_lock_device_for_reset(struct usb_device *udev, const struct usb_interface *iface) { unsigned long jiffies_expire = jiffies + HZ; if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (udev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (iface && (iface->condition == USB_INTERFACE_UNBINDING || iface->condition == USB_INTERFACE_UNBOUND)) return -EINTR; while (!usb_trylock_device(udev)) { /* If we can't acquire the lock after waiting one second, * we're probably deadlocked */ if (time_after(jiffies, jiffies_expire)) return -EBUSY; msleep(15); if (udev->state == USB_STATE_NOTATTACHED) return -ENODEV; if (udev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (iface && (iface->condition == USB_INTERFACE_UNBINDING || iface->condition == USB_INTERFACE_UNBOUND)) return -EINTR; } return 0; } EXPORT_SYMBOL_GPL(usb_lock_device_for_reset); /** * usb_get_current_frame_number - return current bus frame number * @dev: the device whose bus is being queried * * Return: The current frame number for the USB host controller used * with the given USB device. This can be used when scheduling * isochronous requests. * * Note: Different kinds of host controller have different "scheduling * horizons". While one type might support scheduling only 32 frames * into the future, others could support scheduling up to 1024 frames * into the future. * */ int usb_get_current_frame_number(struct usb_device *dev) { return usb_hcd_get_frame_number(dev); } EXPORT_SYMBOL_GPL(usb_get_current_frame_number); /*-------------------------------------------------------------------*/ /* * __usb_get_extra_descriptor() finds a descriptor of specific type in the * extra field of the interface and endpoint descriptor structs. */ int __usb_get_extra_descriptor(char *buffer, unsigned size, unsigned char type, void **ptr, size_t minsize) { struct usb_descriptor_header *header; while (size >= sizeof(struct usb_descriptor_header)) { header = (struct usb_descriptor_header *)buffer; if (header->bLength < 2 || header->bLength > size) { printk(KERN_ERR "%s: bogus descriptor, type %d length %d\n", usbcore_name, header->bDescriptorType, header->bLength); return -1; } if (header->bDescriptorType == type && header->bLength >= minsize) { *ptr = header; return 0; } buffer += header->bLength; size -= header->bLength; } return -1; } EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); /** * usb_alloc_coherent - allocate dma-consistent buffer for URB_NO_xxx_DMA_MAP * @dev: device the buffer will be used with * @size: requested buffer size * @mem_flags: affect whether allocation may block * @dma: used to return DMA address of buffer * * Return: Either null (indicating no buffer could be allocated), or the * cpu-space pointer to a buffer that may be used to perform DMA to the * specified device. Such cpu-space buffers are returned along with the DMA * address (through the pointer provided). * * Note: * These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags * to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU * hardware during URB completion/resubmit. The implementation varies between * platforms, depending on details of how DMA will work to this device. * Using these buffers also eliminates cacheline sharing problems on * architectures where CPU caches are not DMA-coherent. On systems without * bus-snooping caches, these buffers are uncached. * * When the buffer is no longer used, free it with usb_free_coherent(). */ void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma) { if (!dev || !dev->bus) return NULL; return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); } EXPORT_SYMBOL_GPL(usb_alloc_coherent); /** * usb_free_coherent - free memory allocated with usb_alloc_coherent() * @dev: device the buffer was used with * @size: requested buffer size * @addr: CPU address of buffer * @dma: DMA address of buffer * * This reclaims an I/O buffer, letting it be reused. The memory must have * been allocated using usb_alloc_coherent(), and the parameters must match * those provided in that allocation request. */ void usb_free_coherent(struct usb_device *dev, size_t size, void *addr, dma_addr_t dma) { if (!dev || !dev->bus) return; if (!addr) return; hcd_buffer_free(dev->bus, size, addr, dma); } EXPORT_SYMBOL_GPL(usb_free_coherent); /* * Notifications of device and interface registration */ static int usb_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: if (dev->type == &usb_device_type) (void) usb_create_sysfs_dev_files(to_usb_device(dev)); else if (dev->type == &usb_if_device_type) usb_create_sysfs_intf_files(to_usb_interface(dev)); break; case BUS_NOTIFY_DEL_DEVICE: if (dev->type == &usb_device_type) usb_remove_sysfs_dev_files(to_usb_device(dev)); else if (dev->type == &usb_if_device_type) usb_remove_sysfs_intf_files(to_usb_interface(dev)); break; } return 0; } static struct notifier_block usb_bus_nb = { .notifier_call = usb_bus_notify, }; static void usb_debugfs_init(void) { debugfs_create_file("devices", 0444, usb_debug_root, NULL, &usbfs_devices_fops); } static void usb_debugfs_cleanup(void) { debugfs_lookup_and_remove("devices", usb_debug_root); } /* * Init */ static int __init usb_init(void) { int retval; if (usb_disabled()) { pr_info("%s: USB support disabled\n", usbcore_name); return 0; } usb_init_pool_max(); usb_debugfs_init(); usb_acpi_register(); retval = bus_register(&usb_bus_type); if (retval) goto bus_register_failed; retval = bus_register_notifier(&usb_bus_type, &usb_bus_nb); if (retval) goto bus_notifier_failed; retval = usb_major_init(); if (retval) goto major_init_failed; retval = class_register(&usbmisc_class); if (retval) goto class_register_failed; retval = usb_register(&usbfs_driver); if (retval) goto driver_register_failed; retval = usb_devio_init(); if (retval) goto usb_devio_init_failed; retval = usb_hub_init(); if (retval) goto hub_init_failed; retval = usb_register_device_driver(&usb_generic_driver, THIS_MODULE); if (!retval) goto out; usb_hub_cleanup(); hub_init_failed: usb_devio_cleanup(); usb_devio_init_failed: usb_deregister(&usbfs_driver); driver_register_failed: class_unregister(&usbmisc_class); class_register_failed: usb_major_cleanup(); major_init_failed: bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_notifier_failed: bus_unregister(&usb_bus_type); bus_register_failed: usb_acpi_unregister(); usb_debugfs_cleanup(); out: return retval; } /* * Cleanup */ static void __exit usb_exit(void) { /* This will matter if shutdown/reboot does exitcalls. */ if (usb_disabled()) return; usb_release_quirk_list(); usb_deregister_device_driver(&usb_generic_driver); usb_major_cleanup(); usb_deregister(&usbfs_driver); usb_devio_cleanup(); usb_hub_cleanup(); class_unregister(&usbmisc_class); bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_unregister(&usb_bus_type); usb_acpi_unregister(); usb_debugfs_cleanup(); idr_destroy(&usb_bus_idr); } subsys_initcall(usb_init); module_exit(usb_exit); MODULE_LICENSE("GPL"); |
432 357 482 225 4 444 1 427 444 428 288 1 427 8 7 429 2 418 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001-2003 Intel Corp. * * This file is part of the SCTP kernel implementation * * The base lksctp header. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Xingang Guo <xingang.guo@intel.com> * Jon Grimm <jgrimm@us.ibm.com> * Daisy Chang <daisyc@us.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Ryan Layer <rmlayer@us.ibm.com> * Kevin Gao <kevin.gao@intel.com> */ #ifndef __net_sctp_h__ #define __net_sctp_h__ /* Header Strategy. * Start getting some control over the header file depencies: * includes * constants * structs * prototypes * macros, externs, and inlines * * Move test_frame specific items out of the kernel headers * and into the test frame headers. This is not perfect in any sense * and will continue to evolve. */ #include <linux/types.h> #include <linux/slab.h> #include <linux/in.h> #include <linux/tty.h> #include <linux/proc_fs.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/idr.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ip6_route.h> #endif #include <linux/uaccess.h> #include <asm/page.h> #include <net/sock.h> #include <net/snmp.h> #include <net/sctp/structs.h> #include <net/sctp/constants.h> #ifdef CONFIG_IP_SCTP_MODULE #define SCTP_PROTOSW_FLAG 0 #else /* static! */ #define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT #endif /* * Function declarations. */ /* * sctp/protocol.c */ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr, enum sctp_scope, gfp_t gfp, int flags); struct sctp_pf *sctp_get_pf_specific(sa_family_t family); int sctp_register_pf(struct sctp_pf *, sa_family_t); void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int); int sctp_udp_sock_start(struct net *net); void sctp_udp_sock_stop(struct net *net); /* * sctp/socket.c */ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb); int sctp_inet_listen(struct socket *sock, int backlog); void sctp_write_space(struct sock *sk); void sctp_data_ready(struct sock *sk); __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait); void sctp_sock_rfree(struct sk_buff *skb); void sctp_copy_sock(struct sock *newsk, struct sock *sk, struct sctp_association *asoc); extern struct percpu_counter sctp_sockets_allocated; int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *); struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int *); typedef int (*sctp_callback_t)(struct sctp_endpoint *, struct sctp_transport *, void *); void sctp_transport_walk_start(struct rhashtable_iter *iter); void sctp_transport_walk_stop(struct rhashtable_iter *iter); struct sctp_transport *sctp_transport_get_next(struct net *net, struct rhashtable_iter *iter); struct sctp_transport *sctp_transport_get_idx(struct net *net, struct rhashtable_iter *iter, int pos); int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, void *p, int dif); int sctp_transport_traverse_process(sctp_callback_t cb, sctp_callback_t cb_done, struct net *net, int *pos, void *p); int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), void *p); int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, struct sctp_info *info); /* * sctp/primitive.c */ int sctp_primitive_ASSOCIATE(struct net *, struct sctp_association *, void *arg); int sctp_primitive_SHUTDOWN(struct net *, struct sctp_association *, void *arg); int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg); int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg); int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg); int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc, void *arg); /* * sctp/input.c */ int sctp_rcv(struct sk_buff *skb); int sctp_v4_err(struct sk_buff *skb, u32 info); int sctp_hash_endpoint(struct sctp_endpoint *ep); void sctp_unhash_endpoint(struct sctp_endpoint *); struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, struct sctp_transport **); void sctp_err_finish(struct sock *, struct sctp_transport *); int sctp_udp_v4_err(struct sock *sk, struct sk_buff *skb); int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb); void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, struct sctp_transport *t, __u32 pmtu); void sctp_icmp_redirect(struct sock *, struct sctp_transport *, struct sk_buff *); void sctp_icmp_proto_unreachable(struct sock *sk, struct sctp_association *asoc, struct sctp_transport *t); int sctp_transport_hashtable_init(void); void sctp_transport_hashtable_destroy(void); int sctp_hash_transport(struct sctp_transport *t); void sctp_unhash_transport(struct sctp_transport *t); struct sctp_transport *sctp_addrs_lookup_transport( struct net *net, const union sctp_addr *laddr, const union sctp_addr *paddr, int dif, int sdif); struct sctp_transport *sctp_epaddr_lookup_transport( const struct sctp_endpoint *ep, const union sctp_addr *paddr); bool sctp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif); /* * sctp/proc.c */ int __net_init sctp_proc_init(struct net *net); /* * sctp/offload.c */ int sctp_offload_init(void); /* * sctp/stream_sched.c */ void sctp_sched_ops_init(void); /* * sctp/stream.c */ int sctp_send_reset_streams(struct sctp_association *asoc, struct sctp_reset_streams *params); int sctp_send_reset_assoc(struct sctp_association *asoc); int sctp_send_add_streams(struct sctp_association *asoc, struct sctp_add_streams *params); /* * Module global variables */ /* * sctp/protocol.c */ extern struct kmem_cache *sctp_chunk_cachep __read_mostly; extern struct kmem_cache *sctp_bucket_cachep __read_mostly; extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; extern int sysctl_sctp_wmem[3]; /* * Section: Macros, externs, and inlines */ /* SCTP SNMP MIB stats handlers */ #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field) /* sctp mib definitions */ enum { SCTP_MIB_NUM = 0, SCTP_MIB_CURRESTAB, /* CurrEstab */ SCTP_MIB_ACTIVEESTABS, /* ActiveEstabs */ SCTP_MIB_PASSIVEESTABS, /* PassiveEstabs */ SCTP_MIB_ABORTEDS, /* Aborteds */ SCTP_MIB_SHUTDOWNS, /* Shutdowns */ SCTP_MIB_OUTOFBLUES, /* OutOfBlues */ SCTP_MIB_CHECKSUMERRORS, /* ChecksumErrors */ SCTP_MIB_OUTCTRLCHUNKS, /* OutCtrlChunks */ SCTP_MIB_OUTORDERCHUNKS, /* OutOrderChunks */ SCTP_MIB_OUTUNORDERCHUNKS, /* OutUnorderChunks */ SCTP_MIB_INCTRLCHUNKS, /* InCtrlChunks */ SCTP_MIB_INORDERCHUNKS, /* InOrderChunks */ SCTP_MIB_INUNORDERCHUNKS, /* InUnorderChunks */ SCTP_MIB_FRAGUSRMSGS, /* FragUsrMsgs */ SCTP_MIB_REASMUSRMSGS, /* ReasmUsrMsgs */ SCTP_MIB_OUTSCTPPACKS, /* OutSCTPPacks */ SCTP_MIB_INSCTPPACKS, /* InSCTPPacks */ SCTP_MIB_T1_INIT_EXPIREDS, SCTP_MIB_T1_COOKIE_EXPIREDS, SCTP_MIB_T2_SHUTDOWN_EXPIREDS, SCTP_MIB_T3_RTX_EXPIREDS, SCTP_MIB_T4_RTO_EXPIREDS, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS, SCTP_MIB_DELAY_SACK_EXPIREDS, SCTP_MIB_AUTOCLOSE_EXPIREDS, SCTP_MIB_T1_RETRANSMITS, SCTP_MIB_T3_RETRANSMITS, SCTP_MIB_PMTUD_RETRANSMITS, SCTP_MIB_FAST_RETRANSMITS, SCTP_MIB_IN_PKT_SOFTIRQ, SCTP_MIB_IN_PKT_BACKLOG, SCTP_MIB_IN_PKT_DISCARDS, SCTP_MIB_IN_DATA_CHUNK_DISCARDS, __SCTP_MIB_MAX }; #define SCTP_MIB_MAX __SCTP_MIB_MAX struct sctp_mib { unsigned long mibs[SCTP_MIB_MAX]; }; /* helper function to track stats about max rto and related transport */ static inline void sctp_max_rto(struct sctp_association *asoc, struct sctp_transport *trans) { if (asoc->stats.max_obs_rto < (__u64)trans->rto) { asoc->stats.max_obs_rto = trans->rto; memset(&asoc->stats.obs_rto_ipaddr, 0, sizeof(struct sockaddr_storage)); memcpy(&asoc->stats.obs_rto_ipaddr, &trans->ipaddr, trans->af_specific->sockaddr_len); } } /* * Macros for keeping a global reference of object allocations. */ #ifdef CONFIG_SCTP_DBG_OBJCNT extern atomic_t sctp_dbg_objcnt_sock; extern atomic_t sctp_dbg_objcnt_ep; extern atomic_t sctp_dbg_objcnt_assoc; extern atomic_t sctp_dbg_objcnt_transport; extern atomic_t sctp_dbg_objcnt_chunk; extern atomic_t sctp_dbg_objcnt_bind_addr; extern atomic_t sctp_dbg_objcnt_bind_bucket; extern atomic_t sctp_dbg_objcnt_addr; extern atomic_t sctp_dbg_objcnt_datamsg; extern atomic_t sctp_dbg_objcnt_keys; /* Macros to atomically increment/decrement objcnt counters. */ #define SCTP_DBG_OBJCNT_INC(name) \ atomic_inc(&sctp_dbg_objcnt_## name) #define SCTP_DBG_OBJCNT_DEC(name) \ atomic_dec(&sctp_dbg_objcnt_## name) #define SCTP_DBG_OBJCNT(name) \ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0) /* Macro to help create new entries in the global array of * objcnt counters. */ #define SCTP_DBG_OBJCNT_ENTRY(name) \ {.label= #name, .counter= &sctp_dbg_objcnt_## name} void sctp_dbg_objcnt_init(struct net *); #else #define SCTP_DBG_OBJCNT_INC(name) #define SCTP_DBG_OBJCNT_DEC(name) static inline void sctp_dbg_objcnt_init(struct net *net) { return; } #endif /* CONFIG_SCTP_DBG_OBJCOUNT */ #if defined CONFIG_SYSCTL void sctp_sysctl_register(void); void sctp_sysctl_unregister(void); int sctp_sysctl_net_register(struct net *net); void sctp_sysctl_net_unregister(struct net *net); #else static inline void sctp_sysctl_register(void) { return; } static inline void sctp_sysctl_unregister(void) { return; } static inline int sctp_sysctl_net_register(struct net *net) { return 0; } static inline void sctp_sysctl_net_unregister(struct net *net) { return; } #endif /* Size of Supported Address Parameter for 'x' address types. */ #define SCTP_SAT_LEN(x) (sizeof(struct sctp_paramhdr) + (x) * sizeof(__u16)) #if IS_ENABLED(CONFIG_IPV6) void sctp_v6_pf_init(void); void sctp_v6_pf_exit(void); int sctp_v6_protosw_init(void); void sctp_v6_protosw_exit(void); int sctp_v6_add_protocol(void); void sctp_v6_del_protocol(void); #else /* #ifdef defined(CONFIG_IPV6) */ static inline void sctp_v6_pf_init(void) { return; } static inline void sctp_v6_pf_exit(void) { return; } static inline int sctp_v6_protosw_init(void) { return 0; } static inline void sctp_v6_protosw_exit(void) { return; } static inline int sctp_v6_add_protocol(void) { return 0; } static inline void sctp_v6_del_protocol(void) { return; } #endif /* #if defined(CONFIG_IPV6) */ /* Map an association to an assoc_id. */ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) { return asoc ? asoc->assoc_id : 0; } static inline enum sctp_sstat_state sctp_assoc_to_state(const struct sctp_association *asoc) { /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we * got rid of it in kernel space. Therefore SCTP_CLOSED et al * start at =1 in user space, but actually as =0 in kernel space. * Now that we can not break user space and SCTP_EMPTY is exposed * there, we need to fix it up with an ugly offset not to break * applications. :( */ return asoc->state + 1; } /* Look up the association by its id. */ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp); /* A macro to walk a list of skbs. */ #define sctp_skb_for_each(pos, head, tmp) \ skb_queue_walk_safe(head, pos, tmp) /** * sctp_list_dequeue - remove from the head of the queue * @list: list to dequeue from * * Remove the head of the list. The head item is * returned or %NULL if the list is empty. */ static inline struct list_head *sctp_list_dequeue(struct list_head *list) { struct list_head *result = NULL; if (!list_empty(list)) { result = list->next; list_del_init(result); } return result; } /* SCTP version of skb_set_owner_r. We need this one because * of the way we have to do receive buffer accounting on bundled * chunks. */ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { struct sctp_ulpevent *event = sctp_skb2event(skb); skb_orphan(skb); skb->sk = sk; skb->destructor = sctp_sock_rfree; atomic_add(event->rmem_len, &sk->sk_rmem_alloc); /* * This mimics the behavior of skb_set_owner_r */ sk_mem_charge(sk, event->rmem_len); } /* Tests if the list has one and only one entry. */ static inline int sctp_list_single_entry(struct list_head *head) { return list_is_singular(head); } static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) { return !list_empty(&chunk->list); } /* Walk through a list of TLV parameters. Don't trust the * individual parameter lengths and instead depend on * the chunk length to indicate when to stop. Make sure * there is room for a param header too. */ #define sctp_walk_params(pos, chunk)\ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length)) #define _sctp_walk_params(pos, chunk, end)\ for (pos.v = (u8 *)(chunk + 1);\ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\ (void *)chunk + end) &&\ pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ ntohs(pos.p->length) >= sizeof(struct sctp_paramhdr);\ pos.v += SCTP_PAD4(ntohs(pos.p->length))) #define sctp_walk_errors(err, chunk_hdr)\ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \ sizeof(struct sctp_chunkhdr));\ ((void *)err + offsetof(struct sctp_errhdr, length) + sizeof(err->length) <=\ (void *)chunk_hdr + end) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ ntohs(err->length) >= sizeof(struct sctp_errhdr); \ err = (struct sctp_errhdr *)((void *)err + SCTP_PAD4(ntohs(err->length)))) #define sctp_walk_fwdtsn(pos, chunk)\ _sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk)) #define _sctp_walk_fwdtsn(pos, chunk, end)\ for (pos = (void *)(chunk->subh.fwdtsn_hdr + 1);\ (void *)pos <= (void *)(chunk->subh.fwdtsn_hdr + 1) + end - sizeof(struct sctp_fwdtsn_skip);\ pos++) /* External references. */ extern struct proto sctp_prot; extern struct proto sctpv6_prot; void sctp_put_port(struct sock *sk); extern struct idr sctp_assocs_id; extern spinlock_t sctp_assocs_id_lock; /* Static inline functions. */ /* Convert from an IP version number to an Address Family symbol. */ static inline int ipver2af(__u8 ipver) { switch (ipver) { case 4: return AF_INET; case 6: return AF_INET6; default: return 0; } } /* Convert from an address parameter type to an address family. */ static inline int param_type2af(__be16 type) { switch (type) { case SCTP_PARAM_IPV4_ADDRESS: return AF_INET; case SCTP_PARAM_IPV6_ADDRESS: return AF_INET6; default: return 0; } } /* Warning: The following hash functions assume a power of two 'size'. */ /* This is the hash function for the SCTP port hash table. */ static inline int sctp_phashfn(struct net *net, __u16 lport) { return (net_hash_mix(net) + lport) & (sctp_port_hashsize - 1); } /* This is the hash function for the endpoint hash table. */ static inline int sctp_ep_hashfn(struct net *net, __u16 lport) { return (net_hash_mix(net) + lport) & (sctp_ep_hashsize - 1); } #define sctp_for_each_hentry(ep, head) \ hlist_for_each_entry(ep, head, node) /* Is a socket of this style? */ #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) static inline int __sctp_style(const struct sock *sk, enum sctp_socket_type style) { return sctp_sk(sk)->type == style; } /* Is the association in this state? */ #define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state)) static inline int __sctp_state(const struct sctp_association *asoc, enum sctp_state state) { return asoc->state == state; } /* Is the socket in this state? */ #define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state)) static inline int __sctp_sstate(const struct sock *sk, enum sctp_sock_state state) { return sk->sk_state == state; } /* Map v4-mapped v6 address back to v4 address */ static inline void sctp_v6_map_v4(union sctp_addr *addr) { addr->v4.sin_family = AF_INET; addr->v4.sin_port = addr->v6.sin6_port; addr->v4.sin_addr.s_addr = addr->v6.sin6_addr.s6_addr32[3]; } /* Map v4 address to v4-mapped v6 address */ static inline void sctp_v4_map_v6(union sctp_addr *addr) { __be16 port; port = addr->v4.sin_port; addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; addr->v6.sin6_port = port; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_scope_id = 0; addr->v6.sin6_addr.s6_addr32[0] = 0; addr->v6.sin6_addr.s6_addr32[1] = 0; addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); } /* The cookie is always 0 since this is how it's used in the * pmtu code. */ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) { if (t->dst && !dst_check(t->dst, t->dst_cookie)) sctp_transport_dst_release(t); return t->dst; } /* Calculate max payload size given a MTU, or the total overhead if * given MTU is zero */ static inline __u32 __sctp_mtu_payload(const struct sctp_sock *sp, const struct sctp_transport *t, __u32 mtu, __u32 extra) { __u32 overhead = sizeof(struct sctphdr) + extra; if (sp) { overhead += sp->pf->af->net_header_len; if (sp->udp_port && (!t || t->encap_port)) overhead += sizeof(struct udphdr); } else { overhead += sizeof(struct ipv6hdr); } if (WARN_ON_ONCE(mtu && mtu <= overhead)) mtu = overhead; return mtu ? mtu - overhead : overhead; } static inline __u32 sctp_mtu_payload(const struct sctp_sock *sp, __u32 mtu, __u32 extra) { return __sctp_mtu_payload(sp, NULL, mtu, extra); } static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) { return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst), SCTP_DEFAULT_MINSEGMENT)); } static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) { __u32 pmtu = sctp_dst_mtu(t->dst); if (t->pathmtu == pmtu) return true; t->pathmtu = pmtu; return false; } static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize) { return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize); } static inline int sctp_transport_pl_hlen(struct sctp_transport *t) { return __sctp_mtu_payload(sctp_sk(t->asoc->base.sk), t, 0, 0) - sizeof(struct sctphdr); } static inline void sctp_transport_pl_reset(struct sctp_transport *t) { if (t->probe_interval && (t->param_flags & SPP_PMTUD_ENABLE) && (t->state == SCTP_ACTIVE || t->state == SCTP_UNKNOWN)) { if (t->pl.state == SCTP_PL_DISABLED) { t->pl.state = SCTP_PL_BASE; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pl.probe_size = SCTP_BASE_PLPMTU; sctp_transport_reset_probe_timer(t); } } else { if (t->pl.state != SCTP_PL_DISABLED) { if (del_timer(&t->probe_timer)) sctp_transport_put(t); t->pl.state = SCTP_PL_DISABLED; } } } static inline void sctp_transport_pl_update(struct sctp_transport *t) { if (t->pl.state == SCTP_PL_DISABLED) return; t->pl.state = SCTP_PL_BASE; t->pl.pmtu = SCTP_BASE_PLPMTU; t->pl.probe_size = SCTP_BASE_PLPMTU; sctp_transport_reset_probe_timer(t); } static inline bool sctp_transport_pl_enabled(struct sctp_transport *t) { return t->pl.state != SCTP_PL_DISABLED; } static inline bool sctp_newsk_ready(const struct sock *sk) { return sock_flag(sk, SOCK_DEAD) || sk->sk_socket; } static inline void sctp_sock_set_nodelay(struct sock *sk) { lock_sock(sk); sctp_sk(sk)->nodelay = true; release_sock(sk); } #endif /* __net_sctp_h__ */ |
3 1 2 3 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 | // SPDX-License-Identifier: GPL-2.0-only /* * ebt_ip6 * * Authors: * Manohar Castelino <manohar.r.castelino@intel.com> * Kuo-Lang Tseng <kuo-lang.tseng@intel.com> * Jan Engelhardt <jengelh@medozas.de> * * Summary: * This is just a modification of the IPv4 code written by * Bart De Schuymer <bdschuym@pandora.be> * with the changes required to support IPv6 * * Jan, 2008 */ #include <linux/ipv6.h> #include <net/ipv6.h> #include <linux/in.h> #include <linux/module.h> #include <net/dsfield.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_ip6.h> union pkthdr { struct { __be16 src; __be16 dst; } tcpudphdr; struct { u8 type; u8 code; } icmphdr; }; static bool ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_ip6_info *info = par->matchinfo; const struct ipv6hdr *ih6; struct ipv6hdr _ip6h; const union pkthdr *pptr; union pkthdr _pkthdr; ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); if (ih6 == NULL) return false; if ((info->bitmask & EBT_IP6_TCLASS) && NF_INVF(info, EBT_IP6_TCLASS, info->tclass != ipv6_get_dsfield(ih6))) return false; if (((info->bitmask & EBT_IP6_SOURCE) && NF_INVF(info, EBT_IP6_SOURCE, ipv6_masked_addr_cmp(&ih6->saddr, &info->smsk, &info->saddr))) || ((info->bitmask & EBT_IP6_DEST) && NF_INVF(info, EBT_IP6_DEST, ipv6_masked_addr_cmp(&ih6->daddr, &info->dmsk, &info->daddr)))) return false; if (info->bitmask & EBT_IP6_PROTO) { uint8_t nexthdr = ih6->nexthdr; __be16 frag_off; int offset_ph; offset_ph = ipv6_skip_exthdr(skb, sizeof(_ip6h), &nexthdr, &frag_off); if (offset_ph == -1) return false; if (NF_INVF(info, EBT_IP6_PROTO, info->protocol != nexthdr)) return false; if (!(info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT | EBT_IP6_ICMP6))) return true; /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */ pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr), &_pkthdr); if (pptr == NULL) return false; if (info->bitmask & EBT_IP6_DPORT) { u16 dst = ntohs(pptr->tcpudphdr.dst); if (NF_INVF(info, EBT_IP6_DPORT, dst < info->dport[0] || dst > info->dport[1])) return false; } if (info->bitmask & EBT_IP6_SPORT) { u16 src = ntohs(pptr->tcpudphdr.src); if (NF_INVF(info, EBT_IP6_SPORT, src < info->sport[0] || src > info->sport[1])) return false; } if ((info->bitmask & EBT_IP6_ICMP6) && NF_INVF(info, EBT_IP6_ICMP6, pptr->icmphdr.type < info->icmpv6_type[0] || pptr->icmphdr.type > info->icmpv6_type[1] || pptr->icmphdr.code < info->icmpv6_code[0] || pptr->icmphdr.code > info->icmpv6_code[1])) return false; } return true; } static int ebt_ip6_mt_check(const struct xt_mtchk_param *par) { const struct ebt_entry *e = par->entryinfo; struct ebt_ip6_info *info = par->matchinfo; if (e->ethproto != htons(ETH_P_IPV6) || e->invflags & EBT_IPROTO) return -EINVAL; if (info->bitmask & ~EBT_IP6_MASK || info->invflags & ~EBT_IP6_MASK) return -EINVAL; if (info->bitmask & (EBT_IP6_DPORT | EBT_IP6_SPORT)) { if (info->invflags & EBT_IP6_PROTO) return -EINVAL; if (info->protocol != IPPROTO_TCP && info->protocol != IPPROTO_UDP && info->protocol != IPPROTO_UDPLITE && info->protocol != IPPROTO_SCTP && info->protocol != IPPROTO_DCCP) return -EINVAL; } if (info->bitmask & EBT_IP6_DPORT && info->dport[0] > info->dport[1]) return -EINVAL; if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) return -EINVAL; if (info->bitmask & EBT_IP6_ICMP6) { if ((info->invflags & EBT_IP6_PROTO) || info->protocol != IPPROTO_ICMPV6) return -EINVAL; if (info->icmpv6_type[0] > info->icmpv6_type[1] || info->icmpv6_code[0] > info->icmpv6_code[1]) return -EINVAL; } return 0; } static struct xt_match ebt_ip6_mt_reg __read_mostly = { .name = "ip6", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_ip6_mt, .checkentry = ebt_ip6_mt_check, .matchsize = sizeof(struct ebt_ip6_info), .me = THIS_MODULE, }; static int __init ebt_ip6_init(void) { return xt_register_match(&ebt_ip6_mt_reg); } static void __exit ebt_ip6_fini(void) { xt_unregister_match(&ebt_ip6_mt_reg); } module_init(ebt_ip6_init); module_exit(ebt_ip6_fini); MODULE_DESCRIPTION("Ebtables: IPv6 protocol packet match"); MODULE_AUTHOR("Kuo-Lang Tseng <kuo-lang.tseng@intel.com>"); MODULE_LICENSE("GPL"); |
10 10 10 10 10 10 10 10 10 10 10 7 12 9 12 18 18 9 9 21 1 1 1 18 6 3 9 24 19 1 1 4 2 2 22 22 4 18 10 10 9 22 28 28 26 2 6 1 3 18 29 29 28 28 9 28 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2020, Microsoft Corporation. * * Author(s): Steve French <stfrench@microsoft.com> * David Howells <dhowells@redhat.com> */ /* #include <linux/module.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/magic.h> #include <linux/security.h> #include <net/net_namespace.h> #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" #endif */ #include <linux/ctype.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/parser.h> #include <linux/utsname.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" #include "ntlmssp.h" #include "nterr.h" #include "rfc1002pdu.h" #include "fs_context.h" static DEFINE_MUTEX(cifs_mount_mutex); static const match_table_t cifs_smb_version_tokens = { { Smb_1, SMB1_VERSION_STRING }, { Smb_20, SMB20_VERSION_STRING}, { Smb_21, SMB21_VERSION_STRING }, { Smb_30, SMB30_VERSION_STRING }, { Smb_302, SMB302_VERSION_STRING }, { Smb_302, ALT_SMB302_VERSION_STRING }, { Smb_311, SMB311_VERSION_STRING }, { Smb_311, ALT_SMB311_VERSION_STRING }, { Smb_3any, SMB3ANY_VERSION_STRING }, { Smb_default, SMBDEFAULT_VERSION_STRING }, { Smb_version_err, NULL } }; static const match_table_t cifs_secflavor_tokens = { { Opt_sec_krb5, "krb5" }, { Opt_sec_krb5i, "krb5i" }, { Opt_sec_krb5p, "krb5p" }, { Opt_sec_ntlmsspi, "ntlmsspi" }, { Opt_sec_ntlmssp, "ntlmssp" }, { Opt_sec_ntlmv2, "nontlm" }, { Opt_sec_ntlmv2, "ntlmv2" }, { Opt_sec_ntlmv2i, "ntlmv2i" }, { Opt_sec_none, "none" }, { Opt_sec_err, NULL } }; const struct fs_parameter_spec smb3_fs_parameters[] = { /* Mount options that take no arguments */ fsparam_flag_no("user_xattr", Opt_user_xattr), fsparam_flag_no("forceuid", Opt_forceuid), fsparam_flag_no("multichannel", Opt_multichannel), fsparam_flag_no("forcegid", Opt_forcegid), fsparam_flag("noblocksend", Opt_noblocksend), fsparam_flag("noautotune", Opt_noautotune), fsparam_flag("nolease", Opt_nolease), fsparam_flag_no("hard", Opt_hard), fsparam_flag_no("soft", Opt_soft), fsparam_flag_no("perm", Opt_perm), fsparam_flag("nodelete", Opt_nodelete), fsparam_flag_no("mapposix", Opt_mapposix), fsparam_flag("mapchars", Opt_mapchars), fsparam_flag("nomapchars", Opt_nomapchars), fsparam_flag_no("sfu", Opt_sfu), fsparam_flag("nodfs", Opt_nodfs), fsparam_flag_no("posixpaths", Opt_posixpaths), fsparam_flag_no("unix", Opt_unix), fsparam_flag_no("linux", Opt_unix), fsparam_flag_no("posix", Opt_unix), fsparam_flag("nocase", Opt_nocase), fsparam_flag("ignorecase", Opt_nocase), fsparam_flag_no("brl", Opt_brl), fsparam_flag_no("handlecache", Opt_handlecache), fsparam_flag("forcemandatorylock", Opt_forcemandatorylock), fsparam_flag("forcemand", Opt_forcemandatorylock), fsparam_flag("setuidfromacl", Opt_setuidfromacl), fsparam_flag("idsfromsid", Opt_setuidfromacl), fsparam_flag_no("setuids", Opt_setuids), fsparam_flag_no("dynperm", Opt_dynperm), fsparam_flag_no("intr", Opt_intr), fsparam_flag_no("strictsync", Opt_strictsync), fsparam_flag_no("serverino", Opt_serverino), fsparam_flag("rwpidforward", Opt_rwpidforward), fsparam_flag("cifsacl", Opt_cifsacl), fsparam_flag_no("acl", Opt_acl), fsparam_flag("locallease", Opt_locallease), fsparam_flag("sign", Opt_sign), fsparam_flag("ignore_signature", Opt_ignore_signature), fsparam_flag("signloosely", Opt_ignore_signature), fsparam_flag("seal", Opt_seal), fsparam_flag("noac", Opt_noac), fsparam_flag("fsc", Opt_fsc), fsparam_flag("mfsymlinks", Opt_mfsymlinks), fsparam_flag("multiuser", Opt_multiuser), fsparam_flag("sloppy", Opt_sloppy), fsparam_flag("nosharesock", Opt_nosharesock), fsparam_flag_no("persistenthandles", Opt_persistent), fsparam_flag_no("resilienthandles", Opt_resilient), fsparam_flag_no("tcpnodelay", Opt_tcp_nodelay), fsparam_flag("nosparse", Opt_nosparse), fsparam_flag("domainauto", Opt_domainauto), fsparam_flag("rdma", Opt_rdma), fsparam_flag("modesid", Opt_modesid), fsparam_flag("modefromsid", Opt_modesid), fsparam_flag("rootfs", Opt_rootfs), fsparam_flag("compress", Opt_compress), fsparam_flag("witness", Opt_witness), /* Mount options which take numeric value */ fsparam_u32("backupuid", Opt_backupuid), fsparam_u32("backupgid", Opt_backupgid), fsparam_u32("uid", Opt_uid), fsparam_u32("cruid", Opt_cruid), fsparam_u32("gid", Opt_gid), fsparam_u32("file_mode", Opt_file_mode), fsparam_u32("dirmode", Opt_dirmode), fsparam_u32("dir_mode", Opt_dirmode), fsparam_u32("port", Opt_port), fsparam_u32("min_enc_offload", Opt_min_enc_offload), fsparam_u32("esize", Opt_min_enc_offload), fsparam_u32("bsize", Opt_blocksize), fsparam_u32("rasize", Opt_rasize), fsparam_u32("rsize", Opt_rsize), fsparam_u32("wsize", Opt_wsize), fsparam_u32("actimeo", Opt_actimeo), fsparam_u32("acdirmax", Opt_acdirmax), fsparam_u32("acregmax", Opt_acregmax), fsparam_u32("closetimeo", Opt_closetimeo), fsparam_u32("echo_interval", Opt_echo_interval), fsparam_u32("max_credits", Opt_max_credits), fsparam_u32("max_cached_dirs", Opt_max_cached_dirs), fsparam_u32("handletimeout", Opt_handletimeout), fsparam_u64("snapshot", Opt_snapshot), fsparam_u32("max_channels", Opt_max_channels), /* Mount options which take string value */ fsparam_string("source", Opt_source), fsparam_string("user", Opt_user), fsparam_string("username", Opt_user), fsparam_string("pass", Opt_pass), fsparam_string("password", Opt_pass), fsparam_string("ip", Opt_ip), fsparam_string("addr", Opt_ip), fsparam_string("domain", Opt_domain), fsparam_string("dom", Opt_domain), fsparam_string("srcaddr", Opt_srcaddr), fsparam_string("iocharset", Opt_iocharset), fsparam_string("netbiosname", Opt_netbiosname), fsparam_string("servern", Opt_servern), fsparam_string("ver", Opt_ver), fsparam_string("vers", Opt_vers), fsparam_string("sec", Opt_sec), fsparam_string("cache", Opt_cache), /* Arguments that should be ignored */ fsparam_flag("guest", Opt_ignore), fsparam_flag("noatime", Opt_ignore), fsparam_flag("relatime", Opt_ignore), fsparam_flag("_netdev", Opt_ignore), fsparam_flag_no("suid", Opt_ignore), fsparam_flag_no("exec", Opt_ignore), fsparam_flag_no("dev", Opt_ignore), fsparam_flag_no("mand", Opt_ignore), fsparam_flag_no("auto", Opt_ignore), fsparam_string("cred", Opt_ignore), fsparam_string("credentials", Opt_ignore), /* * UNC and prefixpath is now extracted from Opt_source * in the new mount API so we can just ignore them going forward. */ fsparam_string("unc", Opt_ignore), fsparam_string("prefixpath", Opt_ignore), {} }; static int cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_context *ctx) { substring_t args[MAX_OPT_ARGS]; /* * With mount options, the last one should win. Reset any existing * settings back to default. */ ctx->sectype = Unspecified; ctx->sign = false; switch (match_token(value, cifs_secflavor_tokens, args)) { case Opt_sec_krb5p: cifs_errorf(fc, "sec=krb5p is not supported!\n"); return 1; case Opt_sec_krb5i: ctx->sign = true; fallthrough; case Opt_sec_krb5: ctx->sectype = Kerberos; break; case Opt_sec_ntlmsspi: ctx->sign = true; fallthrough; case Opt_sec_ntlmssp: ctx->sectype = RawNTLMSSP; break; case Opt_sec_ntlmv2i: ctx->sign = true; fallthrough; case Opt_sec_ntlmv2: ctx->sectype = NTLMv2; break; case Opt_sec_none: ctx->nullauth = 1; kfree(ctx->username); ctx->username = NULL; break; default: cifs_errorf(fc, "bad security option: %s\n", value); return 1; } return 0; } static const match_table_t cifs_cacheflavor_tokens = { { Opt_cache_loose, "loose" }, { Opt_cache_strict, "strict" }, { Opt_cache_none, "none" }, { Opt_cache_ro, "ro" }, { Opt_cache_rw, "singleclient" }, { Opt_cache_err, NULL } }; static int cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_context *ctx) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_cacheflavor_tokens, args)) { case Opt_cache_loose: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_strict: ctx->direct_io = false; ctx->strict_io = true; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_none: ctx->direct_io = true; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = false; break; case Opt_cache_ro: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = true; ctx->cache_rw = false; break; case Opt_cache_rw: ctx->direct_io = false; ctx->strict_io = false; ctx->cache_ro = false; ctx->cache_rw = true; break; default: cifs_errorf(fc, "bad cache= option: %s\n", value); return 1; } return 0; } #define DUP_CTX_STR(field) \ do { \ if (ctx->field) { \ new_ctx->field = kstrdup(ctx->field, GFP_ATOMIC); \ if (new_ctx->field == NULL) { \ smb3_cleanup_fs_context_contents(new_ctx); \ return -ENOMEM; \ } \ } \ } while (0) int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx) { memcpy(new_ctx, ctx, sizeof(*ctx)); new_ctx->prepath = NULL; new_ctx->nodename = NULL; new_ctx->username = NULL; new_ctx->password = NULL; new_ctx->server_hostname = NULL; new_ctx->domainname = NULL; new_ctx->UNC = NULL; new_ctx->source = NULL; new_ctx->iocharset = NULL; new_ctx->leaf_fullpath = NULL; /* * Make sure to stay in sync with smb3_cleanup_fs_context_contents() */ DUP_CTX_STR(prepath); DUP_CTX_STR(username); DUP_CTX_STR(password); DUP_CTX_STR(server_hostname); DUP_CTX_STR(UNC); DUP_CTX_STR(source); DUP_CTX_STR(domainname); DUP_CTX_STR(nodename); DUP_CTX_STR(iocharset); DUP_CTX_STR(leaf_fullpath); return 0; } static int cifs_parse_smb_version(struct fs_context *fc, char *value, struct smb3_fs_context *ctx, bool is_smb3) { substring_t args[MAX_OPT_ARGS]; switch (match_token(value, cifs_smb_version_tokens, args)) { #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case Smb_1: if (disable_legacy_dialects) { cifs_errorf(fc, "mount with legacy dialect disabled\n"); return 1; } if (is_smb3) { cifs_errorf(fc, "vers=1.0 (cifs) not permitted when mounting with smb3\n"); return 1; } cifs_errorf(fc, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n"); ctx->ops = &smb1_operations; ctx->vals = &smb1_values; break; case Smb_20: if (disable_legacy_dialects) { cifs_errorf(fc, "mount with legacy dialect disabled\n"); return 1; } if (is_smb3) { cifs_errorf(fc, "vers=2.0 not permitted when mounting with smb3\n"); return 1; } ctx->ops = &smb20_operations; ctx->vals = &smb20_values; break; #else case Smb_1: cifs_errorf(fc, "vers=1.0 (cifs) mount not permitted when legacy dialects disabled\n"); return 1; case Smb_20: cifs_errorf(fc, "vers=2.0 mount not permitted when legacy dialects disabled\n"); return 1; #endif /* CIFS_ALLOW_INSECURE_LEGACY */ case Smb_21: ctx->ops = &smb21_operations; ctx->vals = &smb21_values; break; case Smb_30: ctx->ops = &smb30_operations; ctx->vals = &smb30_values; break; case Smb_302: ctx->ops = &smb30_operations; /* currently identical with 3.0 */ ctx->vals = &smb302_values; break; case Smb_311: ctx->ops = &smb311_operations; ctx->vals = &smb311_values; break; case Smb_3any: ctx->ops = &smb30_operations; /* currently identical with 3.0 */ ctx->vals = &smb3any_values; break; case Smb_default: ctx->ops = &smb30_operations; ctx->vals = &smbdefault_values; break; default: cifs_errorf(fc, "Unknown vers= option specified: %s\n", value); return 1; } return 0; } int smb3_parse_opt(const char *options, const char *key, char **val) { int rc = -ENOENT; char *opts, *orig, *p; orig = opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; while ((p = strsep(&opts, ","))) { char *nval; if (!*p) continue; if (strncasecmp(p, key, strlen(key))) continue; nval = strchr(p, '='); if (nval) { if (nval == p) continue; *nval++ = 0; *val = kstrdup(nval, GFP_KERNEL); rc = !*val ? -ENOMEM : 0; goto out; } } out: kfree(orig); return rc; } /* * Remove duplicate path delimiters. Windows is supposed to do that * but there are some bugs that prevent rename from working if there are * multiple delimiters. * * Return a sanitized duplicate of @path or NULL for empty prefix paths. * Otherwise, return ERR_PTR. * * @gfp indicates the GFP_* flags for kstrdup. * The caller is responsible for freeing the original. */ #define IS_DELIM(c) ((c) == '/' || (c) == '\\') char *cifs_sanitize_prepath(char *prepath, gfp_t gfp) { char *cursor1 = prepath, *cursor2 = prepath; char *s; /* skip all prepended delimiters */ while (IS_DELIM(*cursor1)) cursor1++; /* copy the first letter */ *cursor2 = *cursor1; /* copy the remainder... */ while (*(cursor1++)) { /* ... skipping all duplicated delimiters */ if (IS_DELIM(*cursor1) && IS_DELIM(*cursor2)) continue; *(++cursor2) = *cursor1; } /* if the last character is a delimiter, skip it */ if (IS_DELIM(*(cursor2 - 1))) cursor2--; *cursor2 = '\0'; if (!*prepath) return NULL; s = kstrdup(prepath, gfp); if (!s) return ERR_PTR(-ENOMEM); return s; } /* * Return full path based on the values of @ctx->{UNC,prepath}. * * It is assumed that both values were already parsed by smb3_parse_devname(). */ char *smb3_fs_context_fullpath(const struct smb3_fs_context *ctx, char dirsep) { size_t ulen, plen; char *s; ulen = strlen(ctx->UNC); plen = ctx->prepath ? strlen(ctx->prepath) + 1 : 0; s = kmalloc(ulen + plen + 1, GFP_KERNEL); if (!s) return ERR_PTR(-ENOMEM); memcpy(s, ctx->UNC, ulen); if (plen) { s[ulen] = dirsep; memcpy(s + ulen + 1, ctx->prepath, plen); } s[ulen + plen] = '\0'; convert_delimiter(s, dirsep); return s; } /* * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath * fields with the result. Returns 0 on success and an error otherwise * (e.g. ENOMEM or EINVAL) */ int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx) { char *pos; const char *delims = "/\\"; size_t len; int rc; if (unlikely(!devname || !*devname)) { cifs_dbg(VFS, "Device name not specified\n"); return -EINVAL; } /* make sure we have a valid UNC double delimiter prefix */ len = strspn(devname, delims); if (len != 2) return -EINVAL; /* find delimiter between host and sharename */ pos = strpbrk(devname + 2, delims); if (!pos) return -EINVAL; /* record the server hostname */ kfree(ctx->server_hostname); ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL); if (!ctx->server_hostname) return -ENOMEM; /* skip past delimiter */ ++pos; /* now go until next delimiter or end of string */ len = strcspn(pos, delims); if (!len) return -EINVAL; /* move "pos" up to delimiter or NULL */ pos += len; kfree(ctx->UNC); ctx->UNC = kstrndup(devname, pos - devname, GFP_KERNEL); if (!ctx->UNC) return -ENOMEM; convert_delimiter(ctx->UNC, '\\'); /* skip any delimiter */ if (*pos == '/' || *pos == '\\') pos++; kfree(ctx->prepath); ctx->prepath = NULL; /* If pos is NULL then no prepath */ if (!*pos) return 0; ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL); if (IS_ERR(ctx->prepath)) { rc = PTR_ERR(ctx->prepath); ctx->prepath = NULL; return rc; } return 0; } static void smb3_fs_context_free(struct fs_context *fc); static int smb3_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); static int smb3_fs_context_parse_monolithic(struct fs_context *fc, void *data); static int smb3_get_tree(struct fs_context *fc); static int smb3_reconfigure(struct fs_context *fc); static const struct fs_context_operations smb3_fs_context_ops = { .free = smb3_fs_context_free, .parse_param = smb3_fs_context_parse_param, .parse_monolithic = smb3_fs_context_parse_monolithic, .get_tree = smb3_get_tree, .reconfigure = smb3_reconfigure, }; /* * Parse a monolithic block of data from sys_mount(). * smb3_fs_context_parse_monolithic - Parse key[=val][,key[=val]]* mount data * @ctx: The superblock configuration to fill in. * @data: The data to parse * * Parse a blob of data that's in key[=val][,key[=val]]* form. This can be * called from the ->monolithic_mount_data() fs_context operation. * * Returns 0 on success or the error returned by the ->parse_option() fs_context * operation on failure. */ static int smb3_fs_context_parse_monolithic(struct fs_context *fc, void *data) { char *options = data, *key; int ret = 0; if (!options) return 0; ret = security_sb_eat_lsm_opts(options, &fc->security); if (ret) return ret; /* BB Need to add support for sep= here TBD */ while ((key = strsep(&options, ",")) != NULL) { size_t len; char *value; if (*key == 0) break; /* Check if following character is the deliminator If yes, * we have encountered a double deliminator reset the NULL * character to the deliminator */ while (options && options[0] == ',') { len = strlen(key); strcpy(key + len, options); options = strchr(options, ','); if (options) *options++ = 0; } len = 0; value = strchr(key, '='); if (value) { if (value == key) continue; *value++ = 0; len = strlen(value); } ret = vfs_parse_fs_string(fc, key, value, len); if (ret < 0) break; } return ret; } /* * Validate the preparsed information in the config. */ static int smb3_fs_context_validate(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) { cifs_errorf(fc, "SMB Direct requires Version >=3.0\n"); return -EOPNOTSUPP; } #ifndef CONFIG_KEYS /* Muliuser mounts require CONFIG_KEYS support */ if (ctx->multiuser) { cifs_errorf(fc, "Multiuser mounts require kernels with CONFIG_KEYS enabled\n"); return -1; } #endif if (ctx->got_version == false) pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n"); if (!ctx->UNC) { cifs_errorf(fc, "CIFS mount error: No usable UNC path provided in device string!\n"); return -1; } /* make sure UNC has a share name */ if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) { cifs_errorf(fc, "Malformed UNC. Unable to find share name.\n"); return -ENOENT; } if (!ctx->got_ip) { int len; const char *slash; /* No ip= option specified? Try to get it from UNC */ /* Use the address part of the UNC. */ slash = strchr(&ctx->UNC[2], '\\'); len = slash - &ctx->UNC[2]; if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr, &ctx->UNC[2], len)) { pr_err("Unable to determine destination address\n"); return -EHOSTUNREACH; } } /* set the port that we got earlier */ cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port); if (ctx->override_uid && !ctx->uid_specified) { ctx->override_uid = 0; pr_notice("ignoring forceuid mount option specified with no uid= option\n"); } if (ctx->override_gid && !ctx->gid_specified) { ctx->override_gid = 0; pr_notice("ignoring forcegid mount option specified with no gid= option\n"); } return 0; } static int smb3_get_tree_common(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); struct dentry *root; int rc = 0; root = cifs_smb3_do_mount(fc->fs_type, 0, ctx); if (IS_ERR(root)) return PTR_ERR(root); fc->root = root; return rc; } /* * Create an SMB3 superblock from the parameters passed. */ static int smb3_get_tree(struct fs_context *fc) { int err = smb3_fs_context_validate(fc); int ret; if (err) return err; mutex_lock(&cifs_mount_mutex); ret = smb3_get_tree_common(fc); mutex_unlock(&cifs_mount_mutex); return ret; } static void smb3_fs_context_free(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); smb3_cleanup_fs_context(ctx); } /* * Compare the old and new proposed context during reconfigure * and check if the changes are compatible. */ static int smb3_verify_reconfigure_ctx(struct fs_context *fc, struct smb3_fs_context *new_ctx, struct smb3_fs_context *old_ctx) { if (new_ctx->posix_paths != old_ctx->posix_paths) { cifs_errorf(fc, "can not change posixpaths during remount\n"); return -EINVAL; } if (new_ctx->sectype != old_ctx->sectype) { cifs_errorf(fc, "can not change sec during remount\n"); return -EINVAL; } if (new_ctx->multiuser != old_ctx->multiuser) { cifs_errorf(fc, "can not change multiuser during remount\n"); return -EINVAL; } if (new_ctx->UNC && (!old_ctx->UNC || strcmp(new_ctx->UNC, old_ctx->UNC))) { cifs_errorf(fc, "can not change UNC during remount\n"); return -EINVAL; } if (new_ctx->username && (!old_ctx->username || strcmp(new_ctx->username, old_ctx->username))) { cifs_errorf(fc, "can not change username during remount\n"); return -EINVAL; } if (new_ctx->password && (!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) { cifs_errorf(fc, "can not change password during remount\n"); return -EINVAL; } if (new_ctx->domainname && (!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) { cifs_errorf(fc, "can not change domainname during remount\n"); return -EINVAL; } if (strcmp(new_ctx->workstation_name, old_ctx->workstation_name)) { cifs_errorf(fc, "can not change workstation_name during remount\n"); return -EINVAL; } if (new_ctx->nodename && (!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) { cifs_errorf(fc, "can not change nodename during remount\n"); return -EINVAL; } if (new_ctx->iocharset && (!old_ctx->iocharset || strcmp(new_ctx->iocharset, old_ctx->iocharset))) { cifs_errorf(fc, "can not change iocharset during remount\n"); return -EINVAL; } return 0; } #define STEAL_STRING(cifs_sb, ctx, field) \ do { \ kfree(ctx->field); \ ctx->field = cifs_sb->ctx->field; \ cifs_sb->ctx->field = NULL; \ } while (0) #define STEAL_STRING_SENSITIVE(cifs_sb, ctx, field) \ do { \ kfree_sensitive(ctx->field); \ ctx->field = cifs_sb->ctx->field; \ cifs_sb->ctx->field = NULL; \ } while (0) static int smb3_reconfigure(struct fs_context *fc) { struct smb3_fs_context *ctx = smb3_fc2context(fc); struct dentry *root = fc->root; struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); int rc; rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx); if (rc) return rc; /* * We can not change UNC/username/password/domainname/ * workstation_name/nodename/iocharset * during reconnect so ignore what we have in the new context and * just use what we already have in cifs_sb->ctx. */ STEAL_STRING(cifs_sb, ctx, UNC); STEAL_STRING(cifs_sb, ctx, source); STEAL_STRING(cifs_sb, ctx, username); STEAL_STRING_SENSITIVE(cifs_sb, ctx, password); STEAL_STRING(cifs_sb, ctx, domainname); STEAL_STRING(cifs_sb, ctx, nodename); STEAL_STRING(cifs_sb, ctx, iocharset); /* if rsize or wsize not passed in on remount, use previous values */ if (ctx->rsize == 0) ctx->rsize = cifs_sb->ctx->rsize; if (ctx->wsize == 0) ctx->wsize = cifs_sb->ctx->wsize; smb3_cleanup_fs_context_contents(cifs_sb->ctx); rc = smb3_fs_context_dup(cifs_sb->ctx, ctx); smb3_update_mnt_flags(cifs_sb); #ifdef CONFIG_CIFS_DFS_UPCALL if (!rc) rc = dfs_cache_remount_fs(cifs_sb); #endif return rc; } static int smb3_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; struct smb3_fs_context *ctx = smb3_fc2context(fc); int i, opt; bool is_smb3 = !strcmp(fc->fs_type->name, "smb3"); bool skip_parsing = false; kuid_t uid; kgid_t gid; cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key); /* * fs_parse can not handle string options with an empty value so * we will need special handling of them. */ if (param->type == fs_value_is_string && param->string[0] == 0) { if (!strcmp("pass", param->key) || !strcmp("password", param->key)) { skip_parsing = true; opt = Opt_pass; } else if (!strcmp("user", param->key) || !strcmp("username", param->key)) { skip_parsing = true; opt = Opt_user; } } if (!skip_parsing) { opt = fs_parse(fc, smb3_fs_parameters, param, &result); if (opt < 0) return ctx->sloppy ? 1 : opt; } switch (opt) { case Opt_compress: ctx->compression = UNKNOWN_TYPE; cifs_dbg(VFS, "SMB3 compression support is experimental\n"); break; case Opt_nodfs: ctx->nodfs = 1; break; case Opt_hard: if (result.negated) { if (ctx->retry == 1) cifs_dbg(VFS, "conflicting hard vs. soft mount options\n"); ctx->retry = 0; } else ctx->retry = 1; break; case Opt_soft: if (result.negated) ctx->retry = 1; else { if (ctx->retry == 1) cifs_dbg(VFS, "conflicting hard vs soft mount options\n"); ctx->retry = 0; } break; case Opt_mapposix: if (result.negated) ctx->remap = false; else { ctx->remap = true; ctx->sfu_remap = false; /* disable SFU mapping */ } break; case Opt_mapchars: if (result.negated) ctx->sfu_remap = false; else { ctx->sfu_remap = true; ctx->remap = false; /* disable SFM (mapposix) mapping */ } break; case Opt_user_xattr: if (result.negated) ctx->no_xattr = 1; else ctx->no_xattr = 0; break; case Opt_forceuid: if (result.negated) ctx->override_uid = 0; else ctx->override_uid = 1; break; case Opt_forcegid: if (result.negated) ctx->override_gid = 0; else ctx->override_gid = 1; break; case Opt_perm: if (result.negated) ctx->noperm = 1; else ctx->noperm = 0; break; case Opt_dynperm: if (result.negated) ctx->dynperm = 0; else ctx->dynperm = 1; break; case Opt_sfu: if (result.negated) ctx->sfu_emul = 0; else ctx->sfu_emul = 1; break; case Opt_noblocksend: ctx->noblocksnd = 1; break; case Opt_noautotune: ctx->noautotune = 1; break; case Opt_nolease: ctx->no_lease = 1; break; case Opt_nosparse: ctx->no_sparse = 1; break; case Opt_nodelete: ctx->nodelete = 1; break; case Opt_multichannel: if (result.negated) { ctx->multichannel = false; ctx->max_channels = 1; } else { ctx->multichannel = true; /* if number of channels not specified, default to 2 */ if (ctx->max_channels < 2) ctx->max_channels = 2; } break; case Opt_uid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->linux_uid = uid; ctx->uid_specified = true; break; case Opt_cruid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->cred_uid = uid; ctx->cruid_specified = true; break; case Opt_backupuid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) goto cifs_parse_mount_err; ctx->backupuid = uid; ctx->backupuid_specified = true; break; case Opt_backupgid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) goto cifs_parse_mount_err; ctx->backupgid = gid; ctx->backupgid_specified = true; break; case Opt_gid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) goto cifs_parse_mount_err; ctx->linux_gid = gid; ctx->gid_specified = true; break; case Opt_port: ctx->port = result.uint_32; break; case Opt_file_mode: ctx->file_mode = result.uint_32; break; case Opt_dirmode: ctx->dir_mode = result.uint_32; break; case Opt_min_enc_offload: ctx->min_offload = result.uint_32; break; case Opt_blocksize: /* * inode blocksize realistically should never need to be * less than 16K or greater than 16M and default is 1MB. * Note that small inode block sizes (e.g. 64K) can lead * to very poor performance of common tools like cp and scp */ if ((result.uint_32 < CIFS_MAX_MSGSIZE) || (result.uint_32 > (4 * SMB3_DEFAULT_IOSIZE))) { cifs_errorf(fc, "%s: Invalid blocksize\n", __func__); goto cifs_parse_mount_err; } ctx->bsize = result.uint_32; ctx->got_bsize = true; break; case Opt_rasize: /* * readahead size realistically should never need to be * less than 1M (CIFS_DEFAULT_IOSIZE) or greater than 32M * (perhaps an exception should be considered in the * for the case of a large number of channels * when multichannel is negotiated) since that would lead * to plenty of parallel I/O in flight to the server. * Note that smaller read ahead sizes would * hurt performance of common tools like cp and scp * which often trigger sequential i/o with read ahead */ if ((result.uint_32 > (8 * SMB3_DEFAULT_IOSIZE)) || (result.uint_32 < CIFS_DEFAULT_IOSIZE)) { cifs_errorf(fc, "%s: Invalid rasize %d vs. %d\n", __func__, result.uint_32, SMB3_DEFAULT_IOSIZE); goto cifs_parse_mount_err; } ctx->rasize = result.uint_32; break; case Opt_rsize: ctx->rsize = result.uint_32; ctx->got_rsize = true; break; case Opt_wsize: ctx->wsize = result.uint_32; ctx->got_wsize = true; break; case Opt_acregmax: ctx->acregmax = HZ * result.uint_32; if (ctx->acregmax > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "acregmax too large\n"); goto cifs_parse_mount_err; } break; case Opt_acdirmax: ctx->acdirmax = HZ * result.uint_32; if (ctx->acdirmax > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "acdirmax too large\n"); goto cifs_parse_mount_err; } break; case Opt_actimeo: if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) { cifs_errorf(fc, "timeout too large\n"); goto cifs_parse_mount_err; } if ((ctx->acdirmax != CIFS_DEF_ACTIMEO) || (ctx->acregmax != CIFS_DEF_ACTIMEO)) { cifs_errorf(fc, "actimeo ignored since acregmax or acdirmax specified\n"); break; } ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; case Opt_closetimeo: ctx->closetimeo = HZ * result.uint_32; if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { cifs_errorf(fc, "closetimeo too large\n"); goto cifs_parse_mount_err; } break; case Opt_echo_interval: ctx->echo_interval = result.uint_32; break; case Opt_snapshot: ctx->snapshot_time = result.uint_64; break; case Opt_max_credits: if (result.uint_32 < 20 || result.uint_32 > 60000) { cifs_errorf(fc, "%s: Invalid max_credits value\n", __func__); goto cifs_parse_mount_err; } ctx->max_credits = result.uint_32; break; case Opt_max_channels: if (result.uint_32 < 1 || result.uint_32 > CIFS_MAX_CHANNELS) { cifs_errorf(fc, "%s: Invalid max_channels value, needs to be 1-%d\n", __func__, CIFS_MAX_CHANNELS); goto cifs_parse_mount_err; } ctx->max_channels = result.uint_32; /* If more than one channel requested ... they want multichan */ if (result.uint_32 > 1) ctx->multichannel = true; break; case Opt_max_cached_dirs: if (result.uint_32 < 1) { cifs_errorf(fc, "%s: Invalid max_cached_dirs, needs to be 1 or more\n", __func__); goto cifs_parse_mount_err; } ctx->max_cached_dirs = result.uint_32; break; case Opt_handletimeout: ctx->handle_timeout = result.uint_32; if (ctx->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) { cifs_errorf(fc, "Invalid handle cache timeout, longer than 16 minutes\n"); goto cifs_parse_mount_err; } break; case Opt_source: kfree(ctx->UNC); ctx->UNC = NULL; switch (smb3_parse_devname(param->string, ctx)) { case 0: break; case -ENOMEM: cifs_errorf(fc, "Unable to allocate memory for devname\n"); goto cifs_parse_mount_err; case -EINVAL: cifs_errorf(fc, "Malformed UNC in devname\n"); goto cifs_parse_mount_err; default: cifs_errorf(fc, "Unknown error parsing devname\n"); goto cifs_parse_mount_err; } ctx->source = smb3_fs_context_fullpath(ctx, '/'); if (IS_ERR(ctx->source)) { ctx->source = NULL; cifs_errorf(fc, "OOM when copying UNC string\n"); goto cifs_parse_mount_err; } fc->source = kstrdup(ctx->source, GFP_KERNEL); if (fc->source == NULL) { cifs_errorf(fc, "OOM when copying UNC string\n"); goto cifs_parse_mount_err; } break; case Opt_user: kfree(ctx->username); ctx->username = NULL; if (ctx->nullauth) break; if (strlen(param->string) == 0) { /* null user, ie. anonymous authentication */ ctx->nullauth = 1; break; } if (strnlen(param->string, CIFS_MAX_USERNAME_LEN) > CIFS_MAX_USERNAME_LEN) { pr_warn("username too long\n"); goto cifs_parse_mount_err; } ctx->username = kstrdup(param->string, GFP_KERNEL); if (ctx->username == NULL) { cifs_errorf(fc, "OOM when copying username string\n"); goto cifs_parse_mount_err; } break; case Opt_pass: kfree_sensitive(ctx->password); ctx->password = NULL; if (strlen(param->string) == 0) break; ctx->password = kstrdup(param->string, GFP_KERNEL); if (ctx->password == NULL) { cifs_errorf(fc, "OOM when copying password string\n"); goto cifs_parse_mount_err; } break; case Opt_ip: if (strlen(param->string) == 0) { ctx->got_ip = false; break; } if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr, param->string, strlen(param->string))) { pr_err("bad ip= option (%s)\n", param->string); goto cifs_parse_mount_err; } ctx->got_ip = true; break; case Opt_domain: if (strnlen(param->string, CIFS_MAX_DOMAINNAME_LEN) == CIFS_MAX_DOMAINNAME_LEN) { pr_warn("domain name too long\n"); goto cifs_parse_mount_err; } kfree(ctx->domainname); ctx->domainname = kstrdup(param->string, GFP_KERNEL); if (ctx->domainname == NULL) { cifs_errorf(fc, "OOM when copying domainname string\n"); goto cifs_parse_mount_err; } cifs_dbg(FYI, "Domain name set\n"); break; case Opt_srcaddr: if (!cifs_convert_address( (struct sockaddr *)&ctx->srcaddr, param->string, strlen(param->string))) { pr_warn("Could not parse srcaddr: %s\n", param->string); goto cifs_parse_mount_err; } break; case Opt_iocharset: if (strnlen(param->string, 1024) >= 65) { pr_warn("iocharset name too long\n"); goto cifs_parse_mount_err; } if (strncasecmp(param->string, "default", 7) != 0) { kfree(ctx->iocharset); ctx->iocharset = kstrdup(param->string, GFP_KERNEL); if (ctx->iocharset == NULL) { cifs_errorf(fc, "OOM when copying iocharset string\n"); goto cifs_parse_mount_err; } } /* if iocharset not set then load_nls_default * is used by caller */ cifs_dbg(FYI, "iocharset set to %s\n", ctx->iocharset); break; case Opt_netbiosname: memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); /* * FIXME: are there cases in which a comma can * be valid in workstation netbios name (and * need special handling)? */ for (i = 0; i < RFC1001_NAME_LEN; i++) { /* don't ucase netbiosname for user */ if (param->string[i] == 0) break; ctx->source_rfc1001_name[i] = param->string[i]; } /* The string has 16th byte zero still from * set at top of the function */ if (i == RFC1001_NAME_LEN && param->string[i] != 0) pr_warn("netbiosname longer than 15 truncated\n"); break; case Opt_servern: /* last byte, type, is 0x20 for servr type */ memset(ctx->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL); /* * BB are there cases in which a comma can be valid in this * workstation netbios name (and need special handling)? */ /* user or mount helper must uppercase the netbios name */ for (i = 0; i < 15; i++) { if (param->string[i] == 0) break; ctx->target_rfc1001_name[i] = param->string[i]; } /* The string has 16th byte zero still from set at top of function */ if (i == RFC1001_NAME_LEN && param->string[i] != 0) pr_warn("server netbiosname longer than 15 truncated\n"); break; case Opt_ver: /* version of mount userspace tools, not dialect */ /* If interface changes in mount.cifs bump to new ver */ if (strncasecmp(param->string, "1", 1) == 0) { if (strlen(param->string) > 1) { pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n", param->string); goto cifs_parse_mount_err; } /* This is the default */ break; } /* For all other value, error */ pr_warn("Invalid mount helper version specified\n"); goto cifs_parse_mount_err; case Opt_vers: /* protocol version (dialect) */ if (cifs_parse_smb_version(fc, param->string, ctx, is_smb3) != 0) goto cifs_parse_mount_err; ctx->got_version = true; break; case Opt_sec: if (cifs_parse_security_flavors(fc, param->string, ctx) != 0) goto cifs_parse_mount_err; break; case Opt_cache: if (cifs_parse_cache_flavor(fc, param->string, ctx) != 0) goto cifs_parse_mount_err; break; case Opt_witness: #ifndef CONFIG_CIFS_SWN_UPCALL cifs_errorf(fc, "Witness support needs CONFIG_CIFS_SWN_UPCALL config option\n"); goto cifs_parse_mount_err; #endif ctx->witness = true; pr_warn_once("Witness protocol support is experimental\n"); break; case Opt_rootfs: #ifndef CONFIG_CIFS_ROOT cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n"); goto cifs_parse_mount_err; #endif ctx->rootfs = true; break; case Opt_posixpaths: if (result.negated) ctx->posix_paths = 0; else ctx->posix_paths = 1; break; case Opt_unix: if (result.negated) { if (ctx->linux_ext == 1) pr_warn_once("conflicting posix mount options specified\n"); ctx->linux_ext = 0; ctx->no_linux_ext = 1; } else { if (ctx->no_linux_ext == 1) pr_warn_once("conflicting posix mount options specified\n"); ctx->linux_ext = 1; ctx->no_linux_ext = 0; } break; case Opt_nocase: ctx->nocase = 1; break; case Opt_brl: if (result.negated) { /* * turn off mandatory locking in mode * if remote locking is turned off since the * local vfs will do advisory */ if (ctx->file_mode == (S_IALLUGO & ~(S_ISUID | S_IXGRP))) ctx->file_mode = S_IALLUGO; ctx->nobrl = 1; } else ctx->nobrl = 0; break; case Opt_handlecache: if (result.negated) ctx->nohandlecache = 1; else ctx->nohandlecache = 0; break; case Opt_forcemandatorylock: ctx->mand_lock = 1; break; case Opt_setuids: ctx->setuids = result.negated; break; case Opt_intr: ctx->intr = !result.negated; break; case Opt_setuidfromacl: ctx->setuidfromacl = 1; break; case Opt_strictsync: ctx->nostrictsync = result.negated; break; case Opt_serverino: ctx->server_ino = !result.negated; break; case Opt_rwpidforward: ctx->rwpidforward = 1; break; case Opt_modesid: ctx->mode_ace = 1; break; case Opt_cifsacl: ctx->cifs_acl = !result.negated; break; case Opt_acl: ctx->no_psx_acl = result.negated; break; case Opt_locallease: ctx->local_lease = 1; break; case Opt_sign: ctx->sign = true; break; case Opt_ignore_signature: ctx->sign = true; ctx->ignore_signature = true; break; case Opt_seal: /* we do not do the following in secFlags because seal * is a per tree connection (mount) not a per socket * or per-smb connection option in the protocol * vol->secFlg |= CIFSSEC_MUST_SEAL; */ ctx->seal = 1; break; case Opt_noac: pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n"); break; case Opt_fsc: #ifndef CONFIG_CIFS_FSCACHE cifs_errorf(fc, "FS-Cache support needs CONFIG_CIFS_FSCACHE kernel config option set\n"); goto cifs_parse_mount_err; #endif ctx->fsc = true; break; case Opt_mfsymlinks: ctx->mfsymlinks = true; break; case Opt_multiuser: ctx->multiuser = true; break; case Opt_sloppy: ctx->sloppy = true; break; case Opt_nosharesock: ctx->nosharesock = true; break; case Opt_persistent: if (result.negated) { ctx->nopersistent = true; if (ctx->persistent) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } else { ctx->persistent = true; if ((ctx->nopersistent) || (ctx->resilient)) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } break; case Opt_resilient: if (result.negated) { ctx->resilient = false; /* already the default */ } else { ctx->resilient = true; if (ctx->persistent) { cifs_errorf(fc, "persistenthandles mount options conflict\n"); goto cifs_parse_mount_err; } } break; case Opt_tcp_nodelay: /* tcp nodelay should not usually be needed since we CORK/UNCORK the socket */ if (result.negated) ctx->sockopt_tcp_nodelay = false; else ctx->sockopt_tcp_nodelay = true; break; case Opt_domainauto: ctx->domainauto = true; break; case Opt_rdma: ctx->rdma = true; break; } /* case Opt_ignore: - is ignored as expected ... */ return 0; cifs_parse_mount_err: kfree_sensitive(ctx->password); ctx->password = NULL; return -EINVAL; } int smb3_init_fs_context(struct fs_context *fc) { struct smb3_fs_context *ctx; char *nodename = utsname()->nodename; int i; ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (unlikely(!ctx)) return -ENOMEM; strscpy(ctx->workstation_name, nodename, sizeof(ctx->workstation_name)); /* * does not have to be perfect mapping since field is * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) ctx->source_rfc1001_name[i] = toupper(nodename[i]); ctx->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* * null target name indicates to use *SMBSERVR default called name * if we end up sending RFC1001 session initialize */ ctx->target_rfc1001_name[0] = 0; ctx->cred_uid = current_uid(); ctx->linux_uid = current_uid(); ctx->linux_gid = current_gid(); /* By default 4MB read ahead size, 1MB block size */ ctx->bsize = CIFS_DEFAULT_IOSIZE; /* can improve cp performance significantly */ ctx->rasize = 0; /* 0 = use default (ie negotiated rsize) for read ahead pages */ /* * default to SFM style remapping of seven reserved characters * unless user overrides it or we negotiate CIFS POSIX where * it is unnecessary. Can not simultaneously use more than one mapping * since then readdir could list files that open could not open */ ctx->remap = true; /* default to only allowing write access to owner of the mount */ ctx->dir_mode = ctx->file_mode = S_IRUGO | S_IXUGO | S_IWUSR; /* ctx->retry default is 0 (i.e. "soft" limited retry not hard retry) */ /* default is always to request posix paths. */ ctx->posix_paths = 1; /* default to using server inode numbers where available */ ctx->server_ino = 1; /* default is to use strict cifs caching semantics */ ctx->strict_io = true; ctx->acregmax = CIFS_DEF_ACTIMEO; ctx->acdirmax = CIFS_DEF_ACTIMEO; ctx->closetimeo = SMB3_DEF_DCLOSETIMEO; ctx->max_cached_dirs = MAX_CACHED_FIDS; /* Most clients set timeout to 0, allows server to use its default */ ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */ /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ ctx->ops = &smb30_operations; ctx->vals = &smbdefault_values; ctx->echo_interval = SMB_ECHO_INTERVAL_DEFAULT; /* default to no multichannel (single server connection) */ ctx->multichannel = false; ctx->max_channels = 1; ctx->backupuid_specified = false; /* no backup intent for a user */ ctx->backupgid_specified = false; /* no backup intent for a group */ /* * short int override_uid = -1; * short int override_gid = -1; * char *nodename = strdup(utsname()->nodename); * struct sockaddr *dstaddr = (struct sockaddr *)&vol->dstaddr; */ fc->fs_private = ctx; fc->ops = &smb3_fs_context_ops; return 0; } void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx) { if (ctx == NULL) return; /* * Make sure this stays in sync with smb3_fs_context_dup() */ kfree(ctx->username); ctx->username = NULL; kfree_sensitive(ctx->password); ctx->password = NULL; kfree(ctx->server_hostname); ctx->server_hostname = NULL; kfree(ctx->UNC); ctx->UNC = NULL; kfree(ctx->source); ctx->source = NULL; kfree(ctx->domainname); ctx->domainname = NULL; kfree(ctx->nodename); ctx->nodename = NULL; kfree(ctx->iocharset); ctx->iocharset = NULL; kfree(ctx->prepath); ctx->prepath = NULL; kfree(ctx->leaf_fullpath); ctx->leaf_fullpath = NULL; } void smb3_cleanup_fs_context(struct smb3_fs_context *ctx) { if (!ctx) return; smb3_cleanup_fs_context_contents(ctx); kfree(ctx); } void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb) { struct smb3_fs_context *ctx = cifs_sb->ctx; if (ctx->nodfs) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_DFS; if (ctx->noperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_PERM; if (ctx->setuids) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SET_UID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SET_UID; if (ctx->setuidfromacl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UID_FROM_ACL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UID_FROM_ACL; if (ctx->server_ino) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_SERVER_INUM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; if (ctx->remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SFM_CHR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SFM_CHR; if (ctx->sfu_remap) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MAP_SPECIAL_CHR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MAP_SPECIAL_CHR; if (ctx->no_xattr) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_XATTR; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_XATTR; if (ctx->sfu_emul) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_UNX_EMUL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_UNX_EMUL; if (ctx->nobrl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_BRL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_BRL; if (ctx->nohandlecache) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_HANDLE_CACHE; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NO_HANDLE_CACHE; if (ctx->nostrictsync) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOSSYNC; if (ctx->mand_lock) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_NOPOSIXBRL; if (ctx->rwpidforward) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_RWPIDFORWARD; if (ctx->mode_ace) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MODE_FROM_SID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MODE_FROM_SID; if (ctx->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_ACL; if (ctx->backupuid_specified) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPUID; if (ctx->backupgid_specified) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_CIFS_BACKUPGID; if (ctx->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_UID; if (ctx->override_gid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_OVERR_GID; if (ctx->dynperm) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DYNPERM; if (ctx->fsc) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_FSCACHE; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_FSCACHE; if (ctx->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MULTIUSER; if (ctx->strict_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_STRICT_IO; if (ctx->direct_io) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_DIRECT_IO; if (ctx->mfsymlinks) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_MF_SYMLINKS; else cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_MF_SYMLINKS; if (ctx->mfsymlinks) { if (ctx->sfu_emul) { /* * Our SFU ("Services for Unix" emulation does not allow * creating symlinks but does allow reading existing SFU * symlinks (it does allow both creating and reading SFU * style mknod and FIFOs though). When "mfsymlinks" and * "sfu" are both enabled at the same time, it allows * reading both types of symlinks, but will only create * them with mfsymlinks format. This allows better * Apple compatibility (probably better for Samba too) * while still recognizing old Windows style symlinks. */ cifs_dbg(VFS, "mount options mfsymlinks and sfu both enabled\n"); } } cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SHUTDOWN; return; } |
1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 | // SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/xattr.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Portions of this code from linux/fs/ext2/xattr.c * * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de> * * Fix by Harrison Xing <harrison@mountainviewdata.com>. * Extended attributes for symlinks and special files added per * suggestion of Luka Renko <luka.renko@hermes.si>. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, * Red Hat Inc. */ #include <linux/rwsem.h> #include <linux/f2fs_fs.h> #include <linux/security.h> #include <linux/posix_acl_xattr.h> #include "f2fs.h" #include "xattr.h" #include "segment.h" static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline) { if (likely(size == sbi->inline_xattr_slab_size)) { *is_inline = true; return f2fs_kmem_cache_alloc(sbi->inline_xattr_slab, GFP_F2FS_ZERO, false, sbi); } *is_inline = false; return f2fs_kzalloc(sbi, size, GFP_NOFS); } static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr, bool is_inline) { if (is_inline) kmem_cache_free(sbi->inline_xattr_slab, xattr_addr); else kfree(xattr_addr); } static int f2fs_xattr_generic_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); switch (handler->flags) { case F2FS_XATTR_INDEX_USER: if (!test_opt(sbi, XATTR_USER)) return -EOPNOTSUPP; break; case F2FS_XATTR_INDEX_TRUSTED: case F2FS_XATTR_INDEX_SECURITY: break; default: return -EINVAL; } return f2fs_getxattr(inode, handler->flags, name, buffer, size, NULL); } static int f2fs_xattr_generic_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); switch (handler->flags) { case F2FS_XATTR_INDEX_USER: if (!test_opt(sbi, XATTR_USER)) return -EOPNOTSUPP; break; case F2FS_XATTR_INDEX_TRUSTED: case F2FS_XATTR_INDEX_SECURITY: break; default: return -EINVAL; } return f2fs_setxattr(inode, handler->flags, name, value, size, NULL, flags); } static bool f2fs_xattr_user_list(struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); return test_opt(sbi, XATTR_USER); } static bool f2fs_xattr_trusted_list(struct dentry *dentry) { return capable(CAP_SYS_ADMIN); } static int f2fs_xattr_advise_get(const struct xattr_handler *handler, struct dentry *unused, struct inode *inode, const char *name, void *buffer, size_t size) { if (buffer) *((char *)buffer) = F2FS_I(inode)->i_advise; return sizeof(char); } static int f2fs_xattr_advise_set(const struct xattr_handler *handler, struct mnt_idmap *idmap, struct dentry *unused, struct inode *inode, const char *name, const void *value, size_t size, int flags) { unsigned char old_advise = F2FS_I(inode)->i_advise; unsigned char new_advise; if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) return -EPERM; if (value == NULL) return -EINVAL; new_advise = *(char *)value; if (new_advise & ~FADVISE_MODIFIABLE_BITS) return -EINVAL; new_advise = new_advise & FADVISE_MODIFIABLE_BITS; new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS; F2FS_I(inode)->i_advise = new_advise; f2fs_mark_inode_dirty_sync(inode, true); return 0; } #ifdef CONFIG_F2FS_FS_SECURITY static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *page) { const struct xattr *xattr; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY, xattr->name, xattr->value, xattr->value_len, (struct page *)page, 0); if (err < 0) break; } return err; } int f2fs_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, struct page *ipage) { return security_inode_init_security(inode, dir, qstr, &f2fs_initxattrs, ipage); } #endif const struct xattr_handler f2fs_xattr_user_handler = { .prefix = XATTR_USER_PREFIX, .flags = F2FS_XATTR_INDEX_USER, .list = f2fs_xattr_user_list, .get = f2fs_xattr_generic_get, .set = f2fs_xattr_generic_set, }; const struct xattr_handler f2fs_xattr_trusted_handler = { .prefix = XATTR_TRUSTED_PREFIX, .flags = F2FS_XATTR_INDEX_TRUSTED, .list = f2fs_xattr_trusted_list, .get = f2fs_xattr_generic_get, .set = f2fs_xattr_generic_set, }; const struct xattr_handler f2fs_xattr_advise_handler = { .name = F2FS_SYSTEM_ADVISE_NAME, .flags = F2FS_XATTR_INDEX_ADVISE, .get = f2fs_xattr_advise_get, .set = f2fs_xattr_advise_set, }; const struct xattr_handler f2fs_xattr_security_handler = { .prefix = XATTR_SECURITY_PREFIX, .flags = F2FS_XATTR_INDEX_SECURITY, .get = f2fs_xattr_generic_get, .set = f2fs_xattr_generic_set, }; static const struct xattr_handler * const f2fs_xattr_handler_map[] = { [F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler, #ifdef CONFIG_F2FS_FS_POSIX_ACL [F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, [F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default, #endif [F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler, #ifdef CONFIG_F2FS_FS_SECURITY [F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler, #endif [F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler, }; const struct xattr_handler * const f2fs_xattr_handlers[] = { &f2fs_xattr_user_handler, &f2fs_xattr_trusted_handler, #ifdef CONFIG_F2FS_FS_SECURITY &f2fs_xattr_security_handler, #endif &f2fs_xattr_advise_handler, NULL, }; static inline const char *f2fs_xattr_prefix(int index, struct dentry *dentry) { const struct xattr_handler *handler = NULL; if (index > 0 && index < ARRAY_SIZE(f2fs_xattr_handler_map)) handler = f2fs_xattr_handler_map[index]; if (!xattr_handler_can_list(handler, dentry)) return NULL; return xattr_prefix(handler); } static struct f2fs_xattr_entry *__find_xattr(void *base_addr, void *last_base_addr, void **last_addr, int index, size_t len, const char *name) { struct f2fs_xattr_entry *entry; list_for_each_xattr(entry, base_addr) { if ((void *)(entry) + sizeof(__u32) > last_base_addr || (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) { if (last_addr) *last_addr = entry; return NULL; } if (entry->e_name_index != index) continue; if (entry->e_name_len != len) continue; if (!memcmp(entry->e_name, name, len)) break; } return entry; } static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode, void *base_addr, void **last_addr, int index, size_t len, const char *name) { struct f2fs_xattr_entry *entry; unsigned int inline_size = inline_xattr_size(inode); void *max_addr = base_addr + inline_size; entry = __find_xattr(base_addr, max_addr, last_addr, index, len, name); if (!entry) return NULL; /* inline xattr header or entry across max inline xattr size */ if (IS_XATTR_LAST_ENTRY(entry) && (void *)entry + sizeof(__u32) > max_addr) { *last_addr = entry; return NULL; } return entry; } static int read_inline_xattr(struct inode *inode, struct page *ipage, void *txattr_addr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); unsigned int inline_size = inline_xattr_size(inode); struct page *page = NULL; void *inline_addr; if (ipage) { inline_addr = inline_xattr_addr(inode, ipage); } else { page = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) return PTR_ERR(page); inline_addr = inline_xattr_addr(inode, page); } memcpy(txattr_addr, inline_addr, inline_size); f2fs_put_page(page, 1); return 0; } static int read_xattr_block(struct inode *inode, void *txattr_addr) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int inline_size = inline_xattr_size(inode); struct page *xpage; void *xattr_addr; /* The inode already has an extended attribute block. */ xpage = f2fs_get_node_page(sbi, xnid); if (IS_ERR(xpage)) return PTR_ERR(xpage); xattr_addr = page_address(xpage); memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE); f2fs_put_page(xpage, 1); return 0; } static int lookup_all_xattrs(struct inode *inode, struct page *ipage, unsigned int index, unsigned int len, const char *name, struct f2fs_xattr_entry **xe, void **base_addr, int *base_size, bool *is_inline) { void *cur_addr, *txattr_addr, *last_txattr_addr; void *last_addr = NULL; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int inline_size = inline_xattr_size(inode); int err; if (!xnid && !inline_size) return -ENODATA; *base_size = XATTR_SIZE(inode) + XATTR_PADDING_SIZE; txattr_addr = xattr_alloc(F2FS_I_SB(inode), *base_size, is_inline); if (!txattr_addr) return -ENOMEM; last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(inode); /* read from inline xattr */ if (inline_size) { err = read_inline_xattr(inode, ipage, txattr_addr); if (err) goto out; *xe = __find_inline_xattr(inode, txattr_addr, &last_addr, index, len, name); if (*xe) { *base_size = inline_size; goto check; } } /* read from xattr node block */ if (xnid) { err = read_xattr_block(inode, txattr_addr); if (err) goto out; } if (last_addr) cur_addr = XATTR_HDR(last_addr) - 1; else cur_addr = txattr_addr; *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name); if (!*xe) { f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr", inode->i_ino); set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); err = -ENODATA; f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_XATTR); goto out; } check: if (IS_XATTR_LAST_ENTRY(*xe)) { err = -ENODATA; goto out; } *base_addr = txattr_addr; return 0; out: xattr_free(F2FS_I_SB(inode), txattr_addr, *is_inline); return err; } static int read_all_xattrs(struct inode *inode, struct page *ipage, void **base_addr) { struct f2fs_xattr_header *header; nid_t xnid = F2FS_I(inode)->i_xattr_nid; unsigned int size = VALID_XATTR_BLOCK_SIZE; unsigned int inline_size = inline_xattr_size(inode); void *txattr_addr; int err; txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS); if (!txattr_addr) return -ENOMEM; /* read from inline xattr */ if (inline_size) { err = read_inline_xattr(inode, ipage, txattr_addr); if (err) goto fail; } /* read from xattr node block */ if (xnid) { err = read_xattr_block(inode, txattr_addr); if (err) goto fail; } header = XATTR_HDR(txattr_addr); /* never been allocated xattrs */ if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) { header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC); header->h_refcount = cpu_to_le32(1); } *base_addr = txattr_addr; return 0; fail: kfree(txattr_addr); return err; } static inline int write_all_xattrs(struct inode *inode, __u32 hsize, void *txattr_addr, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); size_t inline_size = inline_xattr_size(inode); struct page *in_page = NULL; void *xattr_addr; void *inline_addr = NULL; struct page *xpage; nid_t new_nid = 0; int err = 0; if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid) if (!f2fs_alloc_nid(sbi, &new_nid)) return -ENOSPC; /* write to inline xattr */ if (inline_size) { if (ipage) { inline_addr = inline_xattr_addr(inode, ipage); } else { in_page = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(in_page)) { f2fs_alloc_nid_failed(sbi, new_nid); return PTR_ERR(in_page); } inline_addr = inline_xattr_addr(inode, in_page); } f2fs_wait_on_page_writeback(ipage ? ipage : in_page, NODE, true, true); /* no need to use xattr node block */ if (hsize <= inline_size) { err = f2fs_truncate_xattr_node(inode); f2fs_alloc_nid_failed(sbi, new_nid); if (err) { f2fs_put_page(in_page, 1); return err; } memcpy(inline_addr, txattr_addr, inline_size); set_page_dirty(ipage ? ipage : in_page); goto in_page_out; } } /* write to xattr node block */ if (F2FS_I(inode)->i_xattr_nid) { xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid); if (IS_ERR(xpage)) { err = PTR_ERR(xpage); f2fs_alloc_nid_failed(sbi, new_nid); goto in_page_out; } f2fs_bug_on(sbi, new_nid); f2fs_wait_on_page_writeback(xpage, NODE, true, true); } else { struct dnode_of_data dn; set_new_dnode(&dn, inode, NULL, NULL, new_nid); xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); if (IS_ERR(xpage)) { err = PTR_ERR(xpage); f2fs_alloc_nid_failed(sbi, new_nid); goto in_page_out; } f2fs_alloc_nid_done(sbi, new_nid); } xattr_addr = page_address(xpage); if (inline_size) memcpy(inline_addr, txattr_addr, inline_size); memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE); if (inline_size) set_page_dirty(ipage ? ipage : in_page); set_page_dirty(xpage); f2fs_put_page(xpage, 1); in_page_out: f2fs_put_page(in_page, 1); return err; } int f2fs_getxattr(struct inode *inode, int index, const char *name, void *buffer, size_t buffer_size, struct page *ipage) { struct f2fs_xattr_entry *entry = NULL; int error; unsigned int size, len; void *base_addr = NULL; int base_size; bool is_inline; if (name == NULL) return -EINVAL; len = strlen(name); if (len > F2FS_NAME_LEN) return -ERANGE; if (!ipage) f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); error = lookup_all_xattrs(inode, ipage, index, len, name, &entry, &base_addr, &base_size, &is_inline); if (!ipage) f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); if (error) return error; size = le16_to_cpu(entry->e_value_size); if (buffer && size > buffer_size) { error = -ERANGE; goto out; } if (buffer) { char *pval = entry->e_name + entry->e_name_len; if (base_size - (pval - (char *)base_addr) < size) { error = -ERANGE; goto out; } memcpy(buffer, pval, size); } error = size; out: xattr_free(F2FS_I_SB(inode), base_addr, is_inline); return error; } ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) { struct inode *inode = d_inode(dentry); struct f2fs_xattr_entry *entry; void *base_addr, *last_base_addr; int error; size_t rest = buffer_size; f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); error = read_all_xattrs(inode, NULL, &base_addr); f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); if (error) return error; last_base_addr = (void *)base_addr + XATTR_SIZE(inode); list_for_each_xattr(entry, base_addr) { const char *prefix; size_t prefix_len; size_t size; prefix = f2fs_xattr_prefix(entry->e_name_index, dentry); if ((void *)(entry) + sizeof(__u32) > last_base_addr || (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) { f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr", inode->i_ino); set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_XATTR); break; } if (!prefix) continue; prefix_len = strlen(prefix); size = prefix_len + entry->e_name_len + 1; if (buffer) { if (size > rest) { error = -ERANGE; goto cleanup; } memcpy(buffer, prefix, prefix_len); buffer += prefix_len; memcpy(buffer, entry->e_name, entry->e_name_len); buffer += entry->e_name_len; *buffer++ = 0; } rest -= size; } error = buffer_size - rest; cleanup: kfree(base_addr); return error; } static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry, const void *value, size_t size) { void *pval = entry->e_name + entry->e_name_len; return (le16_to_cpu(entry->e_value_size) == size) && !memcmp(pval, value, size); } static int __f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *ipage, int flags) { struct f2fs_xattr_entry *here, *last; void *base_addr, *last_base_addr; int found, newsize; size_t len; __u32 new_hsize; int error; if (name == NULL) return -EINVAL; if (value == NULL) size = 0; len = strlen(name); if (len > F2FS_NAME_LEN) return -ERANGE; if (size > MAX_VALUE_LEN(inode)) return -E2BIG; retry: error = read_all_xattrs(inode, ipage, &base_addr); if (error) return error; last_base_addr = (void *)base_addr + XATTR_SIZE(inode); /* find entry with wanted name. */ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name); if (!here) { if (!F2FS_I(inode)->i_xattr_nid) { f2fs_notice(F2FS_I_SB(inode), "recover xattr in inode (%lu)", inode->i_ino); f2fs_recover_xattr_data(inode, NULL); kfree(base_addr); goto retry; } f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr", inode->i_ino); set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); error = -EFSCORRUPTED; f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_XATTR); goto exit; } found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1; if (found) { if ((flags & XATTR_CREATE)) { error = -EEXIST; goto exit; } if (value && f2fs_xattr_value_same(here, value, size)) goto same; } else if ((flags & XATTR_REPLACE)) { error = -ENODATA; goto exit; } last = here; while (!IS_XATTR_LAST_ENTRY(last)) { if ((void *)(last) + sizeof(__u32) > last_base_addr || (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) { f2fs_err(F2FS_I_SB(inode), "inode (%lu) has invalid last xattr entry, entry_size: %zu", inode->i_ino, ENTRY_SIZE(last)); set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); error = -EFSCORRUPTED; f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_XATTR); goto exit; } last = XATTR_NEXT_ENTRY(last); } newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size); /* 1. Check space */ if (value) { int free; /* * If value is NULL, it is remove operation. * In case of update operation, we calculate free. */ free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr); if (found) free = free + ENTRY_SIZE(here); if (unlikely(free < newsize)) { error = -E2BIG; goto exit; } } /* 2. Remove old entry */ if (found) { /* * If entry is found, remove old entry. * If not found, remove operation is not needed. */ struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here); int oldsize = ENTRY_SIZE(here); memmove(here, next, (char *)last - (char *)next); last = (struct f2fs_xattr_entry *)((char *)last - oldsize); memset(last, 0, oldsize); } new_hsize = (char *)last - (char *)base_addr; /* 3. Write new entry */ if (value) { char *pval; /* * Before we come here, old entry is removed. * We just write new entry. */ last->e_name_index = index; last->e_name_len = len; memcpy(last->e_name, name, len); pval = last->e_name + len; memcpy(pval, value, size); last->e_value_size = cpu_to_le16(size); new_hsize += newsize; } error = write_all_xattrs(inode, new_hsize, base_addr, ipage); if (error) goto exit; if (index == F2FS_XATTR_INDEX_ENCRYPTION && !strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT)) f2fs_set_encrypted_inode(inode); if (S_ISDIR(inode->i_mode)) set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP); same: if (is_inode_flag_set(inode, FI_ACL_MODE)) { inode->i_mode = F2FS_I(inode)->i_acl_mode; clear_inode_flag(inode, FI_ACL_MODE); } inode_set_ctime_current(inode); f2fs_mark_inode_dirty_sync(inode, true); exit: kfree(base_addr); return error; } int f2fs_setxattr(struct inode *inode, int index, const char *name, const void *value, size_t size, struct page *ipage, int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; if (unlikely(f2fs_cp_error(sbi))) return -EIO; if (!f2fs_is_checkpoint_ready(sbi)) return -ENOSPC; err = f2fs_dquot_initialize(inode); if (err) return err; /* this case is only from f2fs_init_inode_metadata */ if (ipage) return __f2fs_setxattr(inode, index, name, value, size, ipage, flags); f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); f2fs_down_write(&F2FS_I(inode)->i_xattr_sem); err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); f2fs_up_write(&F2FS_I(inode)->i_xattr_sem); f2fs_unlock_op(sbi); f2fs_update_time(sbi, REQ_TIME); return err; } int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; char slab_name[32]; sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev)); sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size * sizeof(__le32) + XATTR_PADDING_SIZE; sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name, sbi->inline_xattr_slab_size); if (!sbi->inline_xattr_slab) return -ENOMEM; return 0; } void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { kmem_cache_destroy(sbi->inline_xattr_slab); } |
373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 | /* * Copyright (c) 2001 The Regents of the University of Michigan. * All rights reserved. * * Kendrick Smith <kmsmith@umich.edu> * Andy Adamson <kandros@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include <linux/file.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/namei.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/ratelimit.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/addr.h> #include <linux/jhash.h> #include <linux/string_helpers.h> #include <linux/fsnotify.h> #include <linux/rhashtable.h> #include <linux/nfs_ssc.h> #include "xdr4.h" #include "xdr4cb.h" #include "vfs.h" #include "current_stateid.h" #include "netns.h" #include "pnfs.h" #include "filecache.h" #include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define all_ones {{ ~0, ~0}, ~0} static const stateid_t one_stateid = { .si_generation = ~0, .si_opaque = all_ones, }; static const stateid_t zero_stateid = { /* all fields zero */ }; static const stateid_t currentstateid = { .si_generation = 1, }; static const stateid_t close_stateid = { .si_generation = 0xffffffffU, }; static u64 current_sessionid = 1; #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) /* forward declarations */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); static void nfs4_free_ol_stateid(struct nfs4_stid *stid); void nfsd4_end_grace(struct nfsd_net *nn); static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); static void nfsd4_file_hash_remove(struct nfs4_file *fi); /* Locking: */ /* * Currently used for the del_recall_lru and file hash table. In an * effort to decrease the scope of the client_mutex, this spinlock may * eventually cover more: */ static DEFINE_SPINLOCK(state_lock); enum nfsd4_st_mutex_lock_subclass { OPEN_STATEID_MUTEX = 0, LOCK_STATEID_MUTEX = 1, }; /* * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for * the refcount on the open stateid to drop. */ static DECLARE_WAIT_QUEUE_HEAD(close_wq); /* * A waitqueue where a writer to clients/#/ctl destroying a client can * wait for cl_rpc_users to drop to 0 and then for the client to be * unhashed. */ static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); static struct kmem_cache *client_slab; static struct kmem_cache *openowner_slab; static struct kmem_cache *lockowner_slab; static struct kmem_cache *file_slab; static struct kmem_cache *stateid_slab; static struct kmem_cache *deleg_slab; static struct kmem_cache *odstate_slab; static void free_session(struct nfsd4_session *); static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; static struct workqueue_struct *laundry_wq; int nfsd4_create_laundry_wq(void) { int rc = 0; laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); if (laundry_wq == NULL) rc = -ENOMEM; return rc; } void nfsd4_destroy_laundry_wq(void) { destroy_workqueue(laundry_wq); } static bool is_session_dead(struct nfsd4_session *ses) { return ses->se_flags & NFS4_SESSION_DEAD; } static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) { if (atomic_read(&ses->se_ref) > ref_held_by_me) return nfserr_jukebox; ses->se_flags |= NFS4_SESSION_DEAD; return nfs_ok; } static bool is_client_expired(struct nfs4_client *clp) { return clp->cl_time == 0; } static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn, struct nfs4_client *clp) { if (clp->cl_state != NFSD4_ACTIVE) atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); } static __be32 get_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (is_client_expired(clp)) return nfserr_expired; atomic_inc(&clp->cl_rpc_users); nfsd4_dec_courtesy_client_count(nn, clp); clp->cl_state = NFSD4_ACTIVE; return nfs_ok; } /* must be called under the client_lock */ static inline void renew_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); if (is_client_expired(clp)) { WARN_ON(1); printk("%s: client (clientid %08x/%08x) already expired\n", __func__, clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id); return; } list_move_tail(&clp->cl_lru, &nn->client_lru); clp->cl_time = ktime_get_boottime_seconds(); nfsd4_dec_courtesy_client_count(nn, clp); clp->cl_state = NFSD4_ACTIVE; } static void put_client_renew_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (!atomic_dec_and_test(&clp->cl_rpc_users)) return; if (!is_client_expired(clp)) renew_client_locked(clp); else wake_up_all(&expiry_wq); } static void put_client_renew(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) return; if (!is_client_expired(clp)) renew_client_locked(clp); else wake_up_all(&expiry_wq); spin_unlock(&nn->client_lock); } static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) { __be32 status; if (is_session_dead(ses)) return nfserr_badsession; status = get_client_locked(ses->se_client); if (status) return status; atomic_inc(&ses->se_ref); return nfs_ok; } static void nfsd4_put_session_locked(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) free_session(ses); put_client_renew_locked(clp); } static void nfsd4_put_session(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); spin_lock(&nn->client_lock); nfsd4_put_session_locked(ses); spin_unlock(&nn->client_lock); } static struct nfsd4_blocked_lock * find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, struct nfsd_net *nn) { struct nfsd4_blocked_lock *cur, *found = NULL; spin_lock(&nn->blocked_locks_lock); list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { if (fh_match(fh, &cur->nbl_fh)) { list_del_init(&cur->nbl_list); WARN_ON(list_empty(&cur->nbl_lru)); list_del_init(&cur->nbl_lru); found = cur; break; } } spin_unlock(&nn->blocked_locks_lock); if (found) locks_delete_block(&found->nbl_lock); return found; } static struct nfsd4_blocked_lock * find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, struct nfsd_net *nn) { struct nfsd4_blocked_lock *nbl; nbl = find_blocked_lock(lo, fh, nn); if (!nbl) { nbl = kmalloc(sizeof(*nbl), GFP_KERNEL); if (nbl) { INIT_LIST_HEAD(&nbl->nbl_list); INIT_LIST_HEAD(&nbl->nbl_lru); fh_copy_shallow(&nbl->nbl_fh, fh); locks_init_lock(&nbl->nbl_lock); kref_init(&nbl->nbl_kref); nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, &nfsd4_cb_notify_lock_ops, NFSPROC4_CLNT_CB_NOTIFY_LOCK); } } return nbl; } static void free_nbl(struct kref *kref) { struct nfsd4_blocked_lock *nbl; nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref); kfree(nbl); } static void free_blocked_lock(struct nfsd4_blocked_lock *nbl) { locks_delete_block(&nbl->nbl_lock); locks_release_private(&nbl->nbl_lock); kref_put(&nbl->nbl_kref, free_nbl); } static void remove_blocked_locks(struct nfs4_lockowner *lo) { struct nfs4_client *clp = lo->lo_owner.so_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); struct nfsd4_blocked_lock *nbl; LIST_HEAD(reaplist); /* Dequeue all blocked locks */ spin_lock(&nn->blocked_locks_lock); while (!list_empty(&lo->lo_blocked)) { nbl = list_first_entry(&lo->lo_blocked, struct nfsd4_blocked_lock, nbl_list); list_del_init(&nbl->nbl_list); WARN_ON(list_empty(&nbl->nbl_lru)); list_move(&nbl->nbl_lru, &reaplist); } spin_unlock(&nn->blocked_locks_lock); /* Now free them */ while (!list_empty(&reaplist)) { nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); free_blocked_lock(nbl); } } static void nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) { struct nfsd4_blocked_lock *nbl = container_of(cb, struct nfsd4_blocked_lock, nbl_cb); locks_delete_block(&nbl->nbl_lock); } static int nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) { trace_nfsd_cb_notify_lock_done(&zero_stateid, task); /* * Since this is just an optimization, we don't try very hard if it * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and * just quit trying on anything else. */ switch (task->tk_status) { case -NFS4ERR_DELAY: rpc_delay(task, 1 * HZ); return 0; default: return 1; } } static void nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) { struct nfsd4_blocked_lock *nbl = container_of(cb, struct nfsd4_blocked_lock, nbl_cb); free_blocked_lock(nbl); } static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { .prepare = nfsd4_cb_notify_lock_prepare, .done = nfsd4_cb_notify_lock_done, .release = nfsd4_cb_notify_lock_release, }; /* * We store the NONE, READ, WRITE, and BOTH bits separately in the * st_{access,deny}_bmap field of the stateid, in order to track not * only what share bits are currently in force, but also what * combinations of share bits previous opens have used. This allows us * to enforce the recommendation in * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that * the server return an error if the client attempt to downgrade to a * combination of share bits not explicable by closing some of its * previous opens. * * This enforcement is arguably incomplete, since we don't keep * track of access/deny bit combinations; so, e.g., we allow: * * OPEN allow read, deny write * OPEN allow both, deny none * DOWNGRADE allow read, deny none * * which we should reject. * * But you could also argue that our current code is already overkill, * since it only exists to return NFS4ERR_INVAL on incorrect client * behavior. */ static unsigned int bmap_to_share_mode(unsigned long bmap) { int i; unsigned int access = 0; for (i = 1; i < 4; i++) { if (test_bit(i, &bmap)) access |= i; } return access; } /* set share access for a given stateid */ static inline void set_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); stp->st_access_bmap |= mask; } /* clear share access for a given stateid */ static inline void clear_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); stp->st_access_bmap &= ~mask; } /* test whether a given stateid has access */ static inline bool test_access(u32 access, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << access; return (bool)(stp->st_access_bmap & mask); } /* set share deny for a given stateid */ static inline void set_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); stp->st_deny_bmap |= mask; } /* clear share deny for a given stateid */ static inline void clear_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); stp->st_deny_bmap &= ~mask; } /* test whether a given stateid is denying specific access */ static inline bool test_deny(u32 deny, struct nfs4_ol_stateid *stp) { unsigned char mask = 1 << deny; return (bool)(stp->st_deny_bmap & mask); } static int nfs4_access_to_omode(u32 access) { switch (access & NFS4_SHARE_ACCESS_BOTH) { case NFS4_SHARE_ACCESS_READ: return O_RDONLY; case NFS4_SHARE_ACCESS_WRITE: return O_WRONLY; case NFS4_SHARE_ACCESS_BOTH: return O_RDWR; } WARN_ON_ONCE(1); return O_RDONLY; } static inline int access_permit_read(struct nfs4_ol_stateid *stp) { return test_access(NFS4_SHARE_ACCESS_READ, stp) || test_access(NFS4_SHARE_ACCESS_BOTH, stp) || test_access(NFS4_SHARE_ACCESS_WRITE, stp); } static inline int access_permit_write(struct nfs4_ol_stateid *stp) { return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || test_access(NFS4_SHARE_ACCESS_BOTH, stp); } static inline struct nfs4_stateowner * nfs4_get_stateowner(struct nfs4_stateowner *sop) { atomic_inc(&sop->so_count); return sop; } static int same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) { return (sop->so_owner.len == owner->len) && 0 == memcmp(sop->so_owner.data, owner->data, owner->len); } static struct nfs4_openowner * find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open, struct nfs4_client *clp) { struct nfs4_stateowner *so; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], so_strhash) { if (!so->so_is_open_owner) continue; if (same_owner_str(so, &open->op_owner)) return openowner(nfs4_get_stateowner(so)); } return NULL; } static struct nfs4_openowner * find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, struct nfs4_client *clp) { struct nfs4_openowner *oo; spin_lock(&clp->cl_lock); oo = find_openstateowner_str_locked(hashval, open, clp); spin_unlock(&clp->cl_lock); return oo; } static inline u32 opaque_hashval(const void *ptr, int nbytes) { unsigned char *cptr = (unsigned char *) ptr; u32 x = 0; while (nbytes--) { x *= 37; x += *cptr++; } return x; } static void nfsd4_free_file_rcu(struct rcu_head *rcu) { struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu); kmem_cache_free(file_slab, fp); } void put_nfs4_file(struct nfs4_file *fi) { if (refcount_dec_and_test(&fi->fi_ref)) { nfsd4_file_hash_remove(fi); WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); } } static struct nfsd_file * find_writeable_file_locked(struct nfs4_file *f) { struct nfsd_file *ret; lockdep_assert_held(&f->fi_lock); ret = nfsd_file_get(f->fi_fds[O_WRONLY]); if (!ret) ret = nfsd_file_get(f->fi_fds[O_RDWR]); return ret; } static struct nfsd_file * find_writeable_file(struct nfs4_file *f) { struct nfsd_file *ret; spin_lock(&f->fi_lock); ret = find_writeable_file_locked(f); spin_unlock(&f->fi_lock); return ret; } static struct nfsd_file * find_readable_file_locked(struct nfs4_file *f) { struct nfsd_file *ret; lockdep_assert_held(&f->fi_lock); ret = nfsd_file_get(f->fi_fds[O_RDONLY]); if (!ret) ret = nfsd_file_get(f->fi_fds[O_RDWR]); return ret; } static struct nfsd_file * find_readable_file(struct nfs4_file *f) { struct nfsd_file *ret; spin_lock(&f->fi_lock); ret = find_readable_file_locked(f); spin_unlock(&f->fi_lock); return ret; } static struct nfsd_file * find_rw_file(struct nfs4_file *f) { struct nfsd_file *ret; spin_lock(&f->fi_lock); ret = nfsd_file_get(f->fi_fds[O_RDWR]); spin_unlock(&f->fi_lock); return ret; } struct nfsd_file * find_any_file(struct nfs4_file *f) { struct nfsd_file *ret; if (!f) return NULL; spin_lock(&f->fi_lock); ret = nfsd_file_get(f->fi_fds[O_RDWR]); if (!ret) { ret = nfsd_file_get(f->fi_fds[O_WRONLY]); if (!ret) ret = nfsd_file_get(f->fi_fds[O_RDONLY]); } spin_unlock(&f->fi_lock); return ret; } static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) { lockdep_assert_held(&f->fi_lock); if (f->fi_fds[O_RDWR]) return f->fi_fds[O_RDWR]; if (f->fi_fds[O_WRONLY]) return f->fi_fds[O_WRONLY]; if (f->fi_fds[O_RDONLY]) return f->fi_fds[O_RDONLY]; return NULL; } static atomic_long_t num_delegations; unsigned long max_delegations; /* * Open owner state (share locks) */ /* hash tables for lock and open owners */ #define OWNER_HASH_BITS 8 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) { unsigned int ret; ret = opaque_hashval(ownername->data, ownername->len); return ret & OWNER_HASH_MASK; } static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp; static const struct rhashtable_params nfs4_file_rhash_params = { .key_len = sizeof_field(struct nfs4_file, fi_inode), .key_offset = offsetof(struct nfs4_file, fi_inode), .head_offset = offsetof(struct nfs4_file, fi_rlist), /* * Start with a single page hash table to reduce resizing churn * on light workloads. */ .min_size = 256, .automatic_shrinking = true, }; /* * Check if courtesy clients have conflicting access and resolve it if possible * * access: is op_share_access if share_access is true. * Check if access mode, op_share_access, would conflict with * the current deny mode of the file 'fp'. * access: is op_share_deny if share_access is false. * Check if the deny mode, op_share_deny, would conflict with * current access of the file 'fp'. * stp: skip checking this entry. * new_stp: normal open, not open upgrade. * * Function returns: * false - access/deny mode conflict with normal client. * true - no conflict or conflict with courtesy client(s) is resolved. */ static bool nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp, struct nfs4_ol_stateid *stp, u32 access, bool share_access) { struct nfs4_ol_stateid *st; bool resolvable = true; unsigned char bmap; struct nfsd_net *nn; struct nfs4_client *clp; lockdep_assert_held(&fp->fi_lock); list_for_each_entry(st, &fp->fi_stateids, st_perfile) { /* ignore lock stateid */ if (st->st_openstp) continue; if (st == stp && new_stp) continue; /* check file access against deny mode or vice versa */ bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; if (!(access & bmap_to_share_mode(bmap))) continue; clp = st->st_stid.sc_client; if (try_to_expire_client(clp)) continue; resolvable = false; break; } if (resolvable) { clp = stp->st_stid.sc_client; nn = net_generic(clp->net, nfsd_net_id); mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); } return resolvable; } static void __nfs4_file_get_access(struct nfs4_file *fp, u32 access) { lockdep_assert_held(&fp->fi_lock); if (access & NFS4_SHARE_ACCESS_WRITE) atomic_inc(&fp->fi_access[O_WRONLY]); if (access & NFS4_SHARE_ACCESS_READ) atomic_inc(&fp->fi_access[O_RDONLY]); } static __be32 nfs4_file_get_access(struct nfs4_file *fp, u32 access) { lockdep_assert_held(&fp->fi_lock); /* Does this access mode make sense? */ if (access & ~NFS4_SHARE_ACCESS_BOTH) return nfserr_inval; /* Does it conflict with a deny mode already set? */ if ((access & fp->fi_share_deny) != 0) return nfserr_share_denied; __nfs4_file_get_access(fp, access); return nfs_ok; } static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) { /* Common case is that there is no deny mode. */ if (deny) { /* Does this deny mode make sense? */ if (deny & ~NFS4_SHARE_DENY_BOTH) return nfserr_inval; if ((deny & NFS4_SHARE_DENY_READ) && atomic_read(&fp->fi_access[O_RDONLY])) return nfserr_share_denied; if ((deny & NFS4_SHARE_DENY_WRITE) && atomic_read(&fp->fi_access[O_WRONLY])) return nfserr_share_denied; } return nfs_ok; } static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) { might_lock(&fp->fi_lock); if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { struct nfsd_file *f1 = NULL; struct nfsd_file *f2 = NULL; swap(f1, fp->fi_fds[oflag]); if (atomic_read(&fp->fi_access[1 - oflag]) == 0) swap(f2, fp->fi_fds[O_RDWR]); spin_unlock(&fp->fi_lock); if (f1) nfsd_file_put(f1); if (f2) nfsd_file_put(f2); } } static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) { WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); if (access & NFS4_SHARE_ACCESS_WRITE) __nfs4_file_put_access(fp, O_WRONLY); if (access & NFS4_SHARE_ACCESS_READ) __nfs4_file_put_access(fp, O_RDONLY); } /* * Allocate a new open/delegation state counter. This is needed for * pNFS for proper return on close semantics. * * Note that we only allocate it for pNFS-enabled exports, otherwise * all pointers to struct nfs4_clnt_odstate are always NULL. */ static struct nfs4_clnt_odstate * alloc_clnt_odstate(struct nfs4_client *clp) { struct nfs4_clnt_odstate *co; co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); if (co) { co->co_client = clp; refcount_set(&co->co_odcount, 1); } return co; } static void hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) { struct nfs4_file *fp = co->co_file; lockdep_assert_held(&fp->fi_lock); list_add(&co->co_perfile, &fp->fi_clnt_odstate); } static inline void get_clnt_odstate(struct nfs4_clnt_odstate *co) { if (co) refcount_inc(&co->co_odcount); } static void put_clnt_odstate(struct nfs4_clnt_odstate *co) { struct nfs4_file *fp; if (!co) return; fp = co->co_file; if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { list_del(&co->co_perfile); spin_unlock(&fp->fi_lock); nfsd4_return_all_file_layouts(co->co_client, fp); kmem_cache_free(odstate_slab, co); } } static struct nfs4_clnt_odstate * find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) { struct nfs4_clnt_odstate *co; struct nfs4_client *cl; if (!new) return NULL; cl = new->co_client; spin_lock(&fp->fi_lock); list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { if (co->co_client == cl) { get_clnt_odstate(co); goto out; } } co = new; co->co_file = fp; hash_clnt_odstate_locked(new); out: spin_unlock(&fp->fi_lock); return co; } struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, void (*sc_free)(struct nfs4_stid *)) { struct nfs4_stid *stid; int new_id; stid = kmem_cache_zalloc(slab, GFP_KERNEL); if (!stid) return NULL; idr_preload(GFP_KERNEL); spin_lock(&cl->cl_lock); /* Reserving 0 for start of file in nfsdfs "states" file: */ new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); spin_unlock(&cl->cl_lock); idr_preload_end(); if (new_id < 0) goto out_free; stid->sc_free = sc_free; stid->sc_client = cl; stid->sc_stateid.si_opaque.so_id = new_id; stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; /* Will be incremented before return to client: */ refcount_set(&stid->sc_count, 1); spin_lock_init(&stid->sc_lock); INIT_LIST_HEAD(&stid->sc_cp_list); /* * It shouldn't be a problem to reuse an opaque stateid value. * I don't think it is for 4.1. But with 4.0 I worry that, for * example, a stray write retransmission could be accepted by * the server when it should have been rejected. Therefore, * adopt a trick from the sctp code to attempt to maximize the * amount of time until an id is reused, by ensuring they always * "increase" (mod INT_MAX): */ return stid; out_free: kmem_cache_free(slab, stid); return NULL; } /* * Create a unique stateid_t to represent each COPY. */ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, unsigned char cs_type) { int new_id; stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; idr_preload(GFP_KERNEL); spin_lock(&nn->s2s_cp_lock); new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); stid->cs_stid.si_opaque.so_id = new_id; stid->cs_stid.si_generation = 1; spin_unlock(&nn->s2s_cp_lock); idr_preload_end(); if (new_id < 0) return 0; stid->cs_type = cs_type; return 1; } int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) { return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); } struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, struct nfs4_stid *p_stid) { struct nfs4_cpntf_state *cps; cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); if (!cps) return NULL; cps->cpntf_time = ktime_get_boottime_seconds(); refcount_set(&cps->cp_stateid.cs_count, 1); if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) goto out_free; spin_lock(&nn->s2s_cp_lock); list_add(&cps->cp_list, &p_stid->sc_cp_list); spin_unlock(&nn->s2s_cp_lock); return cps; out_free: kfree(cps); return NULL; } void nfs4_free_copy_state(struct nfsd4_copy *copy) { struct nfsd_net *nn; if (copy->cp_stateid.cs_type != NFS4_COPY_STID) return; nn = net_generic(copy->cp_clp->net, nfsd_net_id); spin_lock(&nn->s2s_cp_lock); idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.cs_stid.si_opaque.so_id); spin_unlock(&nn->s2s_cp_lock); } static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) { struct nfs4_cpntf_state *cps; struct nfsd_net *nn; nn = net_generic(net, nfsd_net_id); spin_lock(&nn->s2s_cp_lock); while (!list_empty(&stid->sc_cp_list)) { cps = list_first_entry(&stid->sc_cp_list, struct nfs4_cpntf_state, cp_list); _free_cpntf_state_locked(nn, cps); } spin_unlock(&nn->s2s_cp_lock); } static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) { struct nfs4_stid *stid; stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); if (!stid) return NULL; return openlockstateid(stid); } static void nfs4_free_deleg(struct nfs4_stid *stid) { struct nfs4_delegation *dp = delegstateid(stid); WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); kmem_cache_free(deleg_slab, stid); atomic_long_dec(&num_delegations); } /* * When we recall a delegation, we should be careful not to hand it * out again straight away. * To ensure this we keep a pair of bloom filters ('new' and 'old') * in which the filehandles of recalled delegations are "stored". * If a filehandle appear in either filter, a delegation is blocked. * When a delegation is recalled, the filehandle is stored in the "new" * filter. * Every 30 seconds we swap the filters and clear the "new" one, * unless both are empty of course. * * Each filter is 256 bits. We hash the filehandle to 32bit and use the * low 3 bytes as hash-table indices. * * 'blocked_delegations_lock', which is always taken in block_delegations(), * is used to manage concurrent access. Testing does not need the lock * except when swapping the two filters. */ static DEFINE_SPINLOCK(blocked_delegations_lock); static struct bloom_pair { int entries, old_entries; time64_t swap_time; int new; /* index into 'set' */ DECLARE_BITMAP(set[2], 256); } blocked_delegations; static int delegation_blocked(struct knfsd_fh *fh) { u32 hash; struct bloom_pair *bd = &blocked_delegations; if (bd->entries == 0) return 0; if (ktime_get_seconds() - bd->swap_time > 30) { spin_lock(&blocked_delegations_lock); if (ktime_get_seconds() - bd->swap_time > 30) { bd->entries -= bd->old_entries; bd->old_entries = bd->entries; memset(bd->set[bd->new], 0, sizeof(bd->set[0])); bd->new = 1-bd->new; bd->swap_time = ktime_get_seconds(); } spin_unlock(&blocked_delegations_lock); } hash = jhash(&fh->fh_raw, fh->fh_size, 0); if (test_bit(hash&255, bd->set[0]) && test_bit((hash>>8)&255, bd->set[0]) && test_bit((hash>>16)&255, bd->set[0])) return 1; if (test_bit(hash&255, bd->set[1]) && test_bit((hash>>8)&255, bd->set[1]) && test_bit((hash>>16)&255, bd->set[1])) return 1; return 0; } static void block_delegations(struct knfsd_fh *fh) { u32 hash; struct bloom_pair *bd = &blocked_delegations; hash = jhash(&fh->fh_raw, fh->fh_size, 0); spin_lock(&blocked_delegations_lock); __set_bit(hash&255, bd->set[bd->new]); __set_bit((hash>>8)&255, bd->set[bd->new]); __set_bit((hash>>16)&255, bd->set[bd->new]); if (bd->entries == 0) bd->swap_time = ktime_get_seconds(); bd->entries += 1; spin_unlock(&blocked_delegations_lock); } static struct nfs4_delegation * alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate, u32 dl_type) { struct nfs4_delegation *dp; struct nfs4_stid *stid; long n; dprintk("NFSD alloc_init_deleg\n"); n = atomic_long_inc_return(&num_delegations); if (n < 0 || n > max_delegations) goto out_dec; if (delegation_blocked(&fp->fi_fhandle)) goto out_dec; stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg); if (stid == NULL) goto out_dec; dp = delegstateid(stid); /* * delegation seqid's are never incremented. The 4.1 special * meaning of seqid 0 isn't meaningful, really, but let's avoid * 0 anyway just for consistency and use 1: */ dp->dl_stid.sc_stateid.si_generation = 1; INIT_LIST_HEAD(&dp->dl_perfile); INIT_LIST_HEAD(&dp->dl_perclnt); INIT_LIST_HEAD(&dp->dl_recall_lru); dp->dl_clnt_odstate = odstate; get_clnt_odstate(odstate); dp->dl_type = dl_type; dp->dl_retries = 1; dp->dl_recalled = false; nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); get_nfs4_file(fp); dp->dl_stid.sc_file = fp; return dp; out_dec: atomic_long_dec(&num_delegations); return NULL; } void nfs4_put_stid(struct nfs4_stid *s) { struct nfs4_file *fp = s->sc_file; struct nfs4_client *clp = s->sc_client; might_lock(&clp->cl_lock); if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { wake_up_all(&close_wq); return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); nfs4_free_cpntf_statelist(clp->net, s); spin_unlock(&clp->cl_lock); s->sc_free(s); if (fp) put_nfs4_file(fp); } void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) { stateid_t *src = &stid->sc_stateid; spin_lock(&stid->sc_lock); if (unlikely(++src->si_generation == 0)) src->si_generation = 1; memcpy(dst, src, sizeof(*dst)); spin_unlock(&stid->sc_lock); } static void put_deleg_file(struct nfs4_file *fp) { struct nfsd_file *nf = NULL; spin_lock(&fp->fi_lock); if (--fp->fi_delegees == 0) swap(nf, fp->fi_deleg_file); spin_unlock(&fp->fi_lock); if (nf) nfsd_file_put(nf); } static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; struct nfsd_file *nf = fp->fi_deleg_file; WARN_ON_ONCE(!fp->fi_delegees); vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); put_deleg_file(fp); } static void destroy_unhashed_deleg(struct nfs4_delegation *dp) { put_clnt_odstate(dp->dl_clnt_odstate); nfs4_unlock_deleg_lease(dp); nfs4_put_stid(&dp->dl_stid); } void nfs4_unhash_stid(struct nfs4_stid *s) { s->sc_type = 0; } /** * nfs4_delegation_exists - Discover if this delegation already exists * @clp: a pointer to the nfs4_client we're granting a delegation to * @fp: a pointer to the nfs4_file we're granting a delegation on * * Return: * On success: true iff an existing delegation is found */ static bool nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) { struct nfs4_delegation *searchdp = NULL; struct nfs4_client *searchclp = NULL; lockdep_assert_held(&state_lock); lockdep_assert_held(&fp->fi_lock); list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { searchclp = searchdp->dl_stid.sc_client; if (clp == searchclp) { return true; } } return false; } /** * hash_delegation_locked - Add a delegation to the appropriate lists * @dp: a pointer to the nfs4_delegation we are adding. * @fp: a pointer to the nfs4_file we're granting a delegation on * * Return: * On success: NULL if the delegation was successfully hashed. * * On error: -EAGAIN if one was previously granted to this * nfs4_client for this nfs4_file. Delegation is not hashed. * */ static int hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) { struct nfs4_client *clp = dp->dl_stid.sc_client; lockdep_assert_held(&state_lock); lockdep_assert_held(&fp->fi_lock); if (nfs4_delegation_exists(clp, fp)) return -EAGAIN; refcount_inc(&dp->dl_stid.sc_count); dp->dl_stid.sc_type = NFS4_DELEG_STID; list_add(&dp->dl_perfile, &fp->fi_delegations); list_add(&dp->dl_perclnt, &clp->cl_delegations); return 0; } static bool delegation_hashed(struct nfs4_delegation *dp) { return !(list_empty(&dp->dl_perfile)); } static bool unhash_delegation_locked(struct nfs4_delegation *dp) { struct nfs4_file *fp = dp->dl_stid.sc_file; lockdep_assert_held(&state_lock); if (!delegation_hashed(dp)) return false; dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; /* Ensure that deleg break won't try to requeue it */ ++dp->dl_time; spin_lock(&fp->fi_lock); list_del_init(&dp->dl_perclnt); list_del_init(&dp->dl_recall_lru); list_del_init(&dp->dl_perfile); spin_unlock(&fp->fi_lock); return true; } static void destroy_delegation(struct nfs4_delegation *dp) { bool unhashed; spin_lock(&state_lock); unhashed = unhash_delegation_locked(dp); spin_unlock(&state_lock); if (unhashed) destroy_unhashed_deleg(dp); } static void revoke_delegation(struct nfs4_delegation *dp) { struct nfs4_client *clp = dp->dl_stid.sc_client; WARN_ON(!list_empty(&dp->dl_recall_lru)); trace_nfsd_stid_revoke(&dp->dl_stid); if (clp->cl_minorversion) { spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } destroy_unhashed_deleg(dp); } /* * SETCLIENTID state */ static unsigned int clientid_hashval(u32 id) { return id & CLIENT_HASH_MASK; } static unsigned int clientstr_hashval(struct xdr_netobj name) { return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; } /* * A stateid that had a deny mode associated with it is being released * or downgraded. Recalculate the deny mode on the file. */ static void recalculate_deny_mode(struct nfs4_file *fp) { struct nfs4_ol_stateid *stp; spin_lock(&fp->fi_lock); fp->fi_share_deny = 0; list_for_each_entry(stp, &fp->fi_stateids, st_perfile) fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); spin_unlock(&fp->fi_lock); } static void reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) { int i; bool change = false; for (i = 1; i < 4; i++) { if ((i & deny) != i) { change = true; clear_deny(i, stp); } } /* Recalculate per-file deny mode if there was a change */ if (change) recalculate_deny_mode(stp->st_stid.sc_file); } /* release all access and file references for a given stateid */ static void release_all_access(struct nfs4_ol_stateid *stp) { int i; struct nfs4_file *fp = stp->st_stid.sc_file; if (fp && stp->st_deny_bmap != 0) recalculate_deny_mode(fp); for (i = 1; i < 4; i++) { if (test_access(i, stp)) nfs4_file_put_access(stp->st_stid.sc_file, i); clear_access(i, stp); } } static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) { kfree(sop->so_owner.data); sop->so_ops->so_free(sop); } static void nfs4_put_stateowner(struct nfs4_stateowner *sop) { struct nfs4_client *clp = sop->so_client; might_lock(&clp->cl_lock); if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) return; sop->so_ops->so_unhash(sop); spin_unlock(&clp->cl_lock); nfs4_free_stateowner(sop); } static bool nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) { return list_empty(&stp->st_perfile); } static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_file *fp = stp->st_stid.sc_file; lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); if (list_empty(&stp->st_perfile)) return false; spin_lock(&fp->fi_lock); list_del_init(&stp->st_perfile); spin_unlock(&fp->fi_lock); list_del(&stp->st_perstateowner); return true; } static void nfs4_free_ol_stateid(struct nfs4_stid *stid) { struct nfs4_ol_stateid *stp = openlockstateid(stid); put_clnt_odstate(stp->st_clnt_odstate); release_all_access(stp); if (stp->st_stateowner) nfs4_put_stateowner(stp->st_stateowner); WARN_ON(!list_empty(&stid->sc_cp_list)); kmem_cache_free(stateid_slab, stid); } static void nfs4_free_lock_stateid(struct nfs4_stid *stid) { struct nfs4_ol_stateid *stp = openlockstateid(stid); struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); struct nfsd_file *nf; nf = find_any_file(stp->st_stid.sc_file); if (nf) { get_file(nf->nf_file); filp_close(nf->nf_file, (fl_owner_t)lo); nfsd_file_put(nf); } nfs4_free_ol_stateid(stid); } /* * Put the persistent reference to an already unhashed generic stateid, while * holding the cl_lock. If it's the last reference, then put it onto the * reaplist for later destruction. */ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { struct nfs4_stid *s = &stp->st_stid; struct nfs4_client *clp = s->sc_client; lockdep_assert_held(&clp->cl_lock); WARN_ON_ONCE(!list_empty(&stp->st_locks)); if (!refcount_dec_and_test(&s->sc_count)) { wake_up_all(&close_wq); return; } idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); list_add(&stp->st_locks, reaplist); } static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); if (!unhash_ol_stateid(stp)) return false; list_del_init(&stp->st_locks); nfs4_unhash_stid(&stp->st_stid); return true; } static void release_lock_stateid(struct nfs4_ol_stateid *stp) { struct nfs4_client *clp = stp->st_stid.sc_client; bool unhashed; spin_lock(&clp->cl_lock); unhashed = unhash_lock_stateid(stp); spin_unlock(&clp->cl_lock); if (unhashed) nfs4_put_stid(&stp->st_stid); } static void unhash_lockowner_locked(struct nfs4_lockowner *lo) { struct nfs4_client *clp = lo->lo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_del_init(&lo->lo_owner.so_strhash); } /* * Free a list of generic stateids that were collected earlier after being * fully unhashed. */ static void free_ol_stateid_reaplist(struct list_head *reaplist) { struct nfs4_ol_stateid *stp; struct nfs4_file *fp; might_sleep(); while (!list_empty(reaplist)) { stp = list_first_entry(reaplist, struct nfs4_ol_stateid, st_locks); list_del(&stp->st_locks); fp = stp->st_stid.sc_file; stp->st_stid.sc_free(&stp->st_stid); if (fp) put_nfs4_file(fp); } } static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, struct list_head *reaplist) { struct nfs4_ol_stateid *stp; lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); while (!list_empty(&open_stp->st_locks)) { stp = list_entry(open_stp->st_locks.next, struct nfs4_ol_stateid, st_locks); WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, reaplist); } } static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, struct list_head *reaplist) { lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); if (!unhash_ol_stateid(stp)) return false; release_open_stateid_locks(stp, reaplist); return true; } static void release_open_stateid(struct nfs4_ol_stateid *stp) { LIST_HEAD(reaplist); spin_lock(&stp->st_stid.sc_client->cl_lock); if (unhash_open_stateid(stp, &reaplist)) put_ol_stateid_locked(stp, &reaplist); spin_unlock(&stp->st_stid.sc_client->cl_lock); free_ol_stateid_reaplist(&reaplist); } static void unhash_openowner_locked(struct nfs4_openowner *oo) { struct nfs4_client *clp = oo->oo_owner.so_client; lockdep_assert_held(&clp->cl_lock); list_del_init(&oo->oo_owner.so_strhash); list_del_init(&oo->oo_perclient); } static void release_last_closed_stateid(struct nfs4_openowner *oo) { struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, nfsd_net_id); struct nfs4_ol_stateid *s; spin_lock(&nn->client_lock); s = oo->oo_last_closed_stid; if (s) { list_del_init(&oo->oo_close_lru); oo->oo_last_closed_stid = NULL; } spin_unlock(&nn->client_lock); if (s) nfs4_put_stid(&s->st_stid); } static void release_openowner(struct nfs4_openowner *oo) { struct nfs4_ol_stateid *stp; struct nfs4_client *clp = oo->oo_owner.so_client; struct list_head reaplist; INIT_LIST_HEAD(&reaplist); spin_lock(&clp->cl_lock); unhash_openowner_locked(oo); while (!list_empty(&oo->oo_owner.so_stateids)) { stp = list_first_entry(&oo->oo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); if (unhash_open_stateid(stp, &reaplist)) put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); release_last_closed_stateid(oo); nfs4_put_stateowner(&oo->oo_owner); } static inline int hash_sessionid(struct nfs4_sessionid *sessionid) { struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; return sid->sequence % SESSION_HASH_SIZE; } #ifdef CONFIG_SUNRPC_DEBUG static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { u32 *ptr = (u32 *)(&sessionid->data[0]); dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); } #else static inline void dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) { } #endif /* * Bump the seqid on cstate->replay_owner, and clear replay_owner if it * won't be used for replay. */ void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) { struct nfs4_stateowner *so = cstate->replay_owner; if (nfserr == nfserr_replay_me) return; if (!seqid_mutating_err(ntohl(nfserr))) { nfsd4_cstate_clear_replay(cstate); return; } if (!so) return; if (so->so_is_open_owner) release_last_closed_stateid(openowner(so)); so->so_seqid++; return; } static void gen_sessionid(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_sessionid *sid; sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; sid->clientid = clp->cl_clientid; sid->sequence = current_sessionid++; sid->reserved = 0; } /* * The protocol defines ca_maxresponssize_cached to include the size of * the rpc header, but all we need to cache is the data starting after * the end of the initial SEQUENCE operation--the rest we regenerate * each time. Therefore we can advertise a ca_maxresponssize_cached * value that is the number of bytes in our cache plus a few additional * bytes. In order to stay on the safe side, and not promise more than * we can cache, those additional bytes must be the minimum possible: 24 * bytes of rpc header (xid through accept state, with AUTH_NULL * verifier), 12 for the compound header (with zero-length tag), and 44 * for the SEQUENCE op response: */ #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) static void free_session_slots(struct nfsd4_session *ses) { int i; for (i = 0; i < ses->se_fchannel.maxreqs; i++) { free_svc_cred(&ses->se_slots[i]->sl_cred); kfree(ses->se_slots[i]); } } /* * We don't actually need to cache the rpc and session headers, so we * can allocate a little less for each slot: */ static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) { u32 size; if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) size = 0; else size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; return size + sizeof(struct nfsd4_slot); } /* * XXX: If we run out of reserved DRC memory we could (up to a point) * re-negotiate active sessions and reduce their slot usage to make * room for new connections. For now we just fail the create session. */ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) { u32 slotsize = slot_bytes(ca); u32 num = ca->maxreqs; unsigned long avail, total_avail; unsigned int scale_factor; spin_lock(&nfsd_drc_lock); if (nfsd_drc_max_mem > nfsd_drc_mem_used) total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; else /* We have handed out more space than we chose in * set_max_drc() to allow. That isn't really a * problem as long as that doesn't make us think we * have lots more due to integer overflow. */ total_avail = 0; avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); /* * Never use more than a fraction of the remaining memory, * unless it's the only way to give this client a slot. * The chosen fraction is either 1/8 or 1/number of threads, * whichever is smaller. This ensures there are adequate * slots to support multiple clients per thread. * Give the client one slot even if that would require * over-allocation--it is better than failure. */ scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); avail = clamp_t(unsigned long, avail, slotsize, total_avail/scale_factor); num = min_t(int, num, avail / slotsize); num = max_t(int, num, 1); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); return num; } static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) { int slotsize = slot_bytes(ca); spin_lock(&nfsd_drc_lock); nfsd_drc_mem_used -= slotsize * ca->maxreqs; spin_unlock(&nfsd_drc_lock); } static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, struct nfsd4_channel_attrs *battrs) { int numslots = fattrs->maxreqs; int slotsize = slot_bytes(fattrs); struct nfsd4_session *new; int i; BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION) > PAGE_SIZE); new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL); if (!new) return NULL; /* allocate each struct nfsd4_slot and data cache in one piece */ for (i = 0; i < numslots; i++) { new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); if (!new->se_slots[i]) goto out_free; } memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs)); return new; out_free: while (i--) kfree(new->se_slots[i]); kfree(new); return NULL; } static void free_conn(struct nfsd4_conn *c) { svc_xprt_put(c->cn_xprt); kfree(c); } static void nfsd4_conn_lost(struct svc_xpt_user *u) { struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); struct nfs4_client *clp = c->cn_session->se_client; trace_nfsd_cb_lost(clp); spin_lock(&clp->cl_lock); if (!list_empty(&c->cn_persession)) { list_del(&c->cn_persession); free_conn(c); } nfsd4_probe_callback(clp); spin_unlock(&clp->cl_lock); } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) { struct nfsd4_conn *conn; conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); if (!conn) return NULL; svc_xprt_get(rqstp->rq_xprt); conn->cn_xprt = rqstp->rq_xprt; conn->cn_flags = flags; INIT_LIST_HEAD(&conn->cn_xpt_user.list); return conn; } static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { conn->cn_session = ses; list_add(&conn->cn_persession, &ses->se_conns); } static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; spin_lock(&clp->cl_lock); __nfsd4_hash_conn(conn, ses); spin_unlock(&clp->cl_lock); } static int nfsd4_register_conn(struct nfsd4_conn *conn) { conn->cn_xpt_user.callback = nfsd4_conn_lost; return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); } static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) { int ret; nfsd4_hash_conn(conn, ses); ret = nfsd4_register_conn(conn); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&conn->cn_xpt_user); /* We may have gained or lost a callback channel: */ nfsd4_probe_callback_sync(ses->se_client); } static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) { u32 dir = NFS4_CDFC4_FORE; if (cses->flags & SESSION4_BACK_CHAN) dir |= NFS4_CDFC4_BACK; return alloc_conn(rqstp, dir); } /* must be called under client_lock */ static void nfsd4_del_conns(struct nfsd4_session *s) { struct nfs4_client *clp = s->se_client; struct nfsd4_conn *c; spin_lock(&clp->cl_lock); while (!list_empty(&s->se_conns)) { c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); list_del_init(&c->cn_persession); spin_unlock(&clp->cl_lock); unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); free_conn(c); spin_lock(&clp->cl_lock); } spin_unlock(&clp->cl_lock); } static void __free_session(struct nfsd4_session *ses) { free_session_slots(ses); kfree(ses); } static void free_session(struct nfsd4_session *ses) { nfsd4_del_conns(ses); nfsd4_put_drc_mem(&ses->se_fchannel); __free_session(ses); } static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) { int idx; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); new->se_client = clp; gen_sessionid(new); INIT_LIST_HEAD(&new->se_conns); new->se_cb_seq_nr = 1; new->se_flags = cses->flags; new->se_cb_prog = cses->callback_prog; new->se_cb_sec = cses->cb_sec; atomic_set(&new->se_ref, 0); idx = hash_sessionid(&new->se_sessionid); list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); spin_lock(&clp->cl_lock); list_add(&new->se_perclnt, &clp->cl_sessions); spin_unlock(&clp->cl_lock); { struct sockaddr *sa = svc_addr(rqstp); /* * This is a little silly; with sessions there's no real * use for the callback address. Use the peer address * as a reasonable default for now, but consider fixing * the rpc client not to require an address in the * future: */ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); } } /* caller must hold client_lock */ static struct nfsd4_session * __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) { struct nfsd4_session *elem; int idx; struct nfsd_net *nn = net_generic(net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); dump_sessionid(__func__, sessionid); idx = hash_sessionid(sessionid); /* Search in the appropriate list */ list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { if (!memcmp(elem->se_sessionid.data, sessionid->data, NFS4_MAX_SESSIONID_LEN)) { return elem; } } dprintk("%s: session not found\n", __func__); return NULL; } static struct nfsd4_session * find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, __be32 *ret) { struct nfsd4_session *session; __be32 status = nfserr_badsession; session = __find_in_sessionid_hashtbl(sessionid, net); if (!session) goto out; status = nfsd4_get_session_locked(session); if (status) session = NULL; out: *ret = status; return session; } /* caller must hold client_lock */ static void unhash_session(struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); list_del(&ses->se_hash); spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); spin_unlock(&ses->se_client->cl_lock); } /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ static int STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) { /* * We're assuming the clid was not given out from a boot * precisely 2^32 (about 136 years) before this one. That seems * a safe assumption: */ if (clid->cl_boot == (u32)nn->boot_time) return 0; trace_nfsd_clid_stale(clid); return 1; } /* * XXX Should we use a slab cache ? * This type of memory management is somewhat inefficient, but we use it * anyway since SETCLIENTID is not a common operation. */ static struct nfs4_client *alloc_client(struct xdr_netobj name, struct nfsd_net *nn) { struct nfs4_client *clp; int i; if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) { mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); return NULL; } clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); if (clp == NULL) return NULL; xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); if (clp->cl_name.data == NULL) goto err_no_name; clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); if (!clp->cl_ownerstr_hashtbl) goto err_no_hashtbl; for (i = 0; i < OWNER_HASH_SIZE; i++) INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); INIT_LIST_HEAD(&clp->cl_sessions); idr_init(&clp->cl_stateids); atomic_set(&clp->cl_rpc_users, 0); clp->cl_cb_state = NFSD4_CB_UNKNOWN; clp->cl_state = NFSD4_ACTIVE; atomic_inc(&nn->nfs4_client_count); atomic_set(&clp->cl_delegs_in_recall, 0); INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); INIT_LIST_HEAD(&clp->cl_revoked); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&clp->cl_lo_states); #endif INIT_LIST_HEAD(&clp->async_copies); spin_lock_init(&clp->async_lock); spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; err_no_hashtbl: kfree(clp->cl_name.data); err_no_name: kmem_cache_free(client_slab, clp); return NULL; } static void __free_client(struct kref *k) { struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); free_svc_cred(&clp->cl_cred); kfree(clp->cl_ownerstr_hashtbl); kfree(clp->cl_name.data); kfree(clp->cl_nii_domain.data); kfree(clp->cl_nii_name.data); idr_destroy(&clp->cl_stateids); kfree(clp->cl_ra); kmem_cache_free(client_slab, clp); } static void drop_client(struct nfs4_client *clp) { kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); } static void free_client(struct nfs4_client *clp) { while (!list_empty(&clp->cl_sessions)) { struct nfsd4_session *ses; ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, se_perclnt); list_del(&ses->se_perclnt); WARN_ON_ONCE(atomic_read(&ses->se_ref)); free_session(ses); } rpc_destroy_wait_queue(&clp->cl_cb_waitq); if (clp->cl_nfsd_dentry) { nfsd_client_rmdir(clp->cl_nfsd_dentry); clp->cl_nfsd_dentry = NULL; wake_up_all(&expiry_wq); } drop_client(clp); } /* must be called under the client_lock */ static void unhash_client_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); struct nfsd4_session *ses; lockdep_assert_held(&nn->client_lock); /* Mark the client as expired! */ clp->cl_time = 0; /* Make it invisible */ if (!list_empty(&clp->cl_idhash)) { list_del_init(&clp->cl_idhash); if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) rb_erase(&clp->cl_namenode, &nn->conf_name_tree); else rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); } list_del_init(&clp->cl_lru); spin_lock(&clp->cl_lock); list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) list_del_init(&ses->se_hash); spin_unlock(&clp->cl_lock); } static void unhash_client(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); spin_lock(&nn->client_lock); unhash_client_locked(clp); spin_unlock(&nn->client_lock); } static __be32 mark_client_expired_locked(struct nfs4_client *clp) { if (atomic_read(&clp->cl_rpc_users)) return nfserr_jukebox; unhash_client_locked(clp); return nfs_ok; } static void __destroy_client(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); int i; struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct list_head reaplist; INIT_LIST_HEAD(&reaplist); spin_lock(&state_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); while (!list_empty(&reaplist)) { dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); destroy_unhashed_deleg(dp); } while (!list_empty(&clp->cl_revoked)) { dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); nfs4_put_stid(&dp->dl_stid); } while (!list_empty(&clp->cl_openowners)) { oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); nfs4_get_stateowner(&oo->oo_owner); release_openowner(oo); } for (i = 0; i < OWNER_HASH_SIZE; i++) { struct nfs4_stateowner *so, *tmp; list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], so_strhash) { /* Should be no openowners at this point */ WARN_ON_ONCE(so->so_is_open_owner); remove_blocked_locks(lockowner(so)); } } nfsd4_return_all_client_layouts(clp); nfsd4_shutdown_copy(clp); nfsd4_shutdown_callback(clp); if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); atomic_add_unless(&nn->nfs4_client_count, -1, 0); nfsd4_dec_courtesy_client_count(nn, clp); free_client(clp); wake_up_all(&expiry_wq); } static void destroy_client(struct nfs4_client *clp) { unhash_client(clp); __destroy_client(clp); } static void inc_reclaim_complete(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); if (!nn->track_reclaim_completes) return; if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) return; if (atomic_inc_return(&nn->nr_reclaim_complete) == nn->reclaim_str_hashtbl_size) { printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", clp->net->ns.inum); nfsd4_end_grace(nn); } } static void expire_client(struct nfs4_client *clp) { unhash_client(clp); nfsd4_client_record_remove(clp); __destroy_client(clp); } static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) { memcpy(target->cl_verifier.data, source->data, sizeof(target->cl_verifier.data)); } static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) { target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; target->cl_clientid.cl_id = source->cl_clientid.cl_id; } static int copy_cred(struct svc_cred *target, struct svc_cred *source) { target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); target->cr_raw_principal = kstrdup(source->cr_raw_principal, GFP_KERNEL); target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); if ((source->cr_principal && !target->cr_principal) || (source->cr_raw_principal && !target->cr_raw_principal) || (source->cr_targ_princ && !target->cr_targ_princ)) return -ENOMEM; target->cr_flavor = source->cr_flavor; target->cr_uid = source->cr_uid; target->cr_gid = source->cr_gid; target->cr_group_info = source->cr_group_info; get_group_info(target->cr_group_info); target->cr_gss_mech = source->cr_gss_mech; if (source->cr_gss_mech) gss_mech_get(source->cr_gss_mech); return 0; } static int compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) { if (o1->len < o2->len) return -1; if (o1->len > o2->len) return 1; return memcmp(o1->data, o2->data, o1->len); } static int same_verf(nfs4_verifier *v1, nfs4_verifier *v2) { return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); } static int same_clid(clientid_t *cl1, clientid_t *cl2) { return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); } static bool groups_equal(struct group_info *g1, struct group_info *g2) { int i; if (g1->ngroups != g2->ngroups) return false; for (i=0; i<g1->ngroups; i++) if (!gid_eq(g1->gid[i], g2->gid[i])) return false; return true; } /* * RFC 3530 language requires clid_inuse be returned when the * "principal" associated with a requests differs from that previously * used. We use uid, gid's, and gss principal string as our best * approximation. We also don't want to allow non-gss use of a client * established using gss: in theory cr_principal should catch that * change, but in practice cr_principal can be null even in the gss case * since gssd doesn't always pass down a principal string. */ static bool is_gss_cred(struct svc_cred *cr) { /* Is cr_flavor one of the gss "pseudoflavors"?: */ return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); } static bool same_creds(struct svc_cred *cr1, struct svc_cred *cr2) { if ((is_gss_cred(cr1) != is_gss_cred(cr2)) || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) return false; /* XXX: check that cr_targ_princ fields match ? */ if (cr1->cr_principal == cr2->cr_principal) return true; if (!cr1->cr_principal || !cr2->cr_principal) return false; return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); } static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) { struct svc_cred *cr = &rqstp->rq_cred; u32 service; if (!cr->cr_gss_mech) return false; service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); return service == RPC_GSS_SVC_INTEGRITY || service == RPC_GSS_SVC_PRIVACY; } bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) { struct svc_cred *cr = &rqstp->rq_cred; if (!cl->cl_mach_cred) return true; if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) return false; if (!svc_rqst_integrity_protected(rqstp)) return false; if (cl->cl_cred.cr_raw_principal) return 0 == strcmp(cl->cl_cred.cr_raw_principal, cr->cr_raw_principal); if (!cr->cr_principal) return false; return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); } static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) { __be32 verf[2]; /* * This is opaque to client, so no need to byte-swap. Use * __force to keep sparse happy */ verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); verf[1] = (__force __be32)nn->clverifier_counter++; memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); } static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) { clp->cl_clientid.cl_boot = (u32)nn->boot_time; clp->cl_clientid.cl_id = nn->clientid_counter++; gen_confirm(clp, nn); } static struct nfs4_stid * find_stateid_locked(struct nfs4_client *cl, stateid_t *t) { struct nfs4_stid *ret; ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); if (!ret || !ret->sc_type) return NULL; return ret; } static struct nfs4_stid * find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) { struct nfs4_stid *s; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, t); if (s != NULL) { if (typemask & s->sc_type) refcount_inc(&s->sc_count); else s = NULL; } spin_unlock(&cl->cl_lock); return s; } static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) { struct nfsdfs_client *nc; nc = get_nfsdfs_client(inode); if (!nc) return NULL; return container_of(nc, struct nfs4_client, cl_nfsdfs); } static void seq_quote_mem(struct seq_file *m, char *data, int len) { seq_printf(m, "\""); seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\"); seq_printf(m, "\""); } static const char *cb_state2str(int state) { switch (state) { case NFSD4_CB_UP: return "UP"; case NFSD4_CB_UNKNOWN: return "UNKNOWN"; case NFSD4_CB_DOWN: return "DOWN"; case NFSD4_CB_FAULT: return "FAULT"; } return "UNDEFINED"; } static int client_info_show(struct seq_file *m, void *v) { struct inode *inode = file_inode(m->file); struct nfs4_client *clp; u64 clid; clp = get_nfsdfs_clp(inode); if (!clp) return -ENXIO; memcpy(&clid, &clp->cl_clientid, sizeof(clid)); seq_printf(m, "clientid: 0x%llx\n", clid); seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); if (clp->cl_state == NFSD4_COURTESY) seq_puts(m, "status: courtesy\n"); else if (clp->cl_state == NFSD4_EXPIRABLE) seq_puts(m, "status: expirable\n"); else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) seq_puts(m, "status: confirmed\n"); else seq_puts(m, "status: unconfirmed\n"); seq_printf(m, "seconds from last renew: %lld\n", ktime_get_boottime_seconds() - clp->cl_time); seq_printf(m, "name: "); seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); if (clp->cl_nii_domain.data) { seq_printf(m, "Implementation domain: "); seq_quote_mem(m, clp->cl_nii_domain.data, clp->cl_nii_domain.len); seq_printf(m, "\nImplementation name: "); seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); seq_printf(m, "\nImplementation time: [%lld, %ld]\n", clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); } seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr); drop_client(clp); return 0; } DEFINE_SHOW_ATTRIBUTE(client_info); static void *states_start(struct seq_file *s, loff_t *pos) __acquires(&clp->cl_lock) { struct nfs4_client *clp = s->private; unsigned long id = *pos; void *ret; spin_lock(&clp->cl_lock); ret = idr_get_next_ul(&clp->cl_stateids, &id); *pos = id; return ret; } static void *states_next(struct seq_file *s, void *v, loff_t *pos) { struct nfs4_client *clp = s->private; unsigned long id = *pos; void *ret; id = *pos; id++; ret = idr_get_next_ul(&clp->cl_stateids, &id); *pos = id; return ret; } static void states_stop(struct seq_file *s, void *v) __releases(&clp->cl_lock) { struct nfs4_client *clp = s->private; spin_unlock(&clp->cl_lock); } static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) { seq_printf(s, "filename: \"%pD2\"", f->nf_file); } static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) { struct inode *inode = file_inode(f->nf_file); seq_printf(s, "superblock: \"%02x:%02x:%ld\"", MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); } static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) { seq_printf(s, "owner: "); seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); } static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) { seq_printf(s, "0x%.8x", stid->si_generation); seq_printf(s, "%12phN", &stid->si_opaque); } static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_ol_stateid *ols; struct nfs4_file *nf; struct nfsd_file *file; struct nfs4_stateowner *oo; unsigned int access, deny; if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID) return 0; /* XXX: or SEQ_SKIP? */ ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; spin_lock(&nf->fi_lock); file = find_any_file_locked(nf); if (!file) goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); seq_printf(s, ": { type: open, "); access = bmap_to_share_mode(ols->st_access_bmap); deny = bmap_to_share_mode(ols->st_deny_bmap); seq_printf(s, "access: %s%s, ", access & NFS4_SHARE_ACCESS_READ ? "r" : "-", access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); seq_printf(s, "deny: %s%s, ", deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); nfs4_show_superblock(s, file); seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); out: spin_unlock(&nf->fi_lock); return 0; } static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_ol_stateid *ols; struct nfs4_file *nf; struct nfsd_file *file; struct nfs4_stateowner *oo; ols = openlockstateid(st); oo = ols->st_stateowner; nf = st->sc_file; spin_lock(&nf->fi_lock); file = find_any_file_locked(nf); if (!file) goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); seq_printf(s, ": { type: lock, "); /* * Note: a lock stateid isn't really the same thing as a lock, * it's the locking state held by one owner on a file, and there * may be multiple (or no) lock ranges associated with it. * (Same for the matter is true of open stateids.) */ nfs4_show_superblock(s, file); /* XXX: open stateid? */ seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, ", "); nfs4_show_owner(s, oo); seq_printf(s, " }\n"); out: spin_unlock(&nf->fi_lock); return 0; } static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_delegation *ds; struct nfs4_file *nf; struct nfsd_file *file; ds = delegstateid(st); nf = st->sc_file; spin_lock(&nf->fi_lock); file = nf->fi_deleg_file; if (!file) goto out; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); seq_printf(s, ": { type: deleg, "); /* Kinda dead code as long as we only support read delegs: */ seq_printf(s, "access: %s, ", ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); /* XXX: lease time, whether it's being recalled. */ nfs4_show_superblock(s, file); seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); out: spin_unlock(&nf->fi_lock); return 0; } static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_layout_stateid *ls; struct nfsd_file *file; ls = container_of(st, struct nfs4_layout_stateid, ls_stid); file = ls->ls_file; seq_printf(s, "- "); nfs4_show_stateid(s, &st->sc_stateid); seq_printf(s, ": { type: layout, "); /* XXX: What else would be useful? */ nfs4_show_superblock(s, file); seq_printf(s, ", "); nfs4_show_fname(s, file); seq_printf(s, " }\n"); return 0; } static int states_show(struct seq_file *s, void *v) { struct nfs4_stid *st = v; switch (st->sc_type) { case NFS4_OPEN_STID: return nfs4_show_open(s, st); case NFS4_LOCK_STID: return nfs4_show_lock(s, st); case NFS4_DELEG_STID: return nfs4_show_deleg(s, st); case NFS4_LAYOUT_STID: return nfs4_show_layout(s, st); default: return 0; /* XXX: or SEQ_SKIP? */ } /* XXX: copy stateids? */ } static struct seq_operations states_seq_ops = { .start = states_start, .next = states_next, .stop = states_stop, .show = states_show }; static int client_states_open(struct inode *inode, struct file *file) { struct seq_file *s; struct nfs4_client *clp; int ret; clp = get_nfsdfs_clp(inode); if (!clp) return -ENXIO; ret = seq_open(file, &states_seq_ops); if (ret) return ret; s = file->private_data; s->private = clp; return 0; } static int client_opens_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; struct nfs4_client *clp = m->private; /* XXX: alternatively, we could get/drop in seq start/stop */ drop_client(clp); return seq_release(inode, file); } static const struct file_operations client_states_fops = { .open = client_states_open, .read = seq_read, .llseek = seq_lseek, .release = client_opens_release, }; /* * Normally we refuse to destroy clients that are in use, but here the * administrator is telling us to just do it. We also want to wait * so the caller has a guarantee that the client's locks are gone by * the time the write returns: */ static void force_expire_client(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); bool already_expired; trace_nfsd_clid_admin_expired(&clp->cl_clientid); spin_lock(&nn->client_lock); clp->cl_time = 0; spin_unlock(&nn->client_lock); wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); spin_lock(&nn->client_lock); already_expired = list_empty(&clp->cl_lru); if (!already_expired) unhash_client_locked(clp); spin_unlock(&nn->client_lock); if (!already_expired) expire_client(clp); else wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); } static ssize_t client_ctl_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { char *data; struct nfs4_client *clp; data = simple_transaction_get(file, buf, size); if (IS_ERR(data)) return PTR_ERR(data); if (size != 7 || 0 != memcmp(data, "expire\n", 7)) return -EINVAL; clp = get_nfsdfs_clp(file_inode(file)); if (!clp) return -ENXIO; force_expire_client(clp); drop_client(clp); return 7; } static const struct file_operations client_ctl_fops = { .write = client_ctl_write, .release = simple_transaction_release, }; static const struct tree_descr client_files[] = { [0] = {"info", &client_info_fops, S_IRUSR}, [1] = {"states", &client_states_fops, S_IRUSR}, [2] = {"ctl", &client_ctl_fops, S_IWUSR}, [3] = {""}, }; static int nfsd4_cb_recall_any_done(struct nfsd4_callback *cb, struct rpc_task *task) { trace_nfsd_cb_recall_any_done(cb, task); switch (task->tk_status) { case -NFS4ERR_DELAY: rpc_delay(task, 2 * HZ); return 0; default: return 1; } } static void nfsd4_cb_recall_any_release(struct nfsd4_callback *cb) { struct nfs4_client *clp = cb->cb_clp; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); spin_lock(&nn->client_lock); clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); put_client_renew_locked(clp); spin_unlock(&nn->client_lock); } static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { .done = nfsd4_cb_recall_any_done, .release = nfsd4_cb_recall_any_release, }; static struct nfs4_client *create_client(struct xdr_netobj name, struct svc_rqst *rqstp, nfs4_verifier *verf) { struct nfs4_client *clp; struct sockaddr *sa = svc_addr(rqstp); int ret; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct dentry *dentries[ARRAY_SIZE(client_files)]; clp = alloc_client(name, nn); if (clp == NULL) return NULL; ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); if (ret) { free_client(clp); return NULL; } gen_clid(clp, nn); kref_init(&clp->cl_nfsdfs.cl_ref); nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); clp->cl_time = ktime_get_boottime_seconds(); clear_bit(0, &clp->cl_cb_slot_busy); copy_verf(clp, verf); memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); clp->cl_cb_session = NULL; clp->net = net; clp->cl_nfsd_dentry = nfsd_client_mkdir( nn, &clp->cl_nfsdfs, clp->cl_clientid.cl_id - nn->clientid_base, client_files, dentries); clp->cl_nfsd_info_dentry = dentries[0]; if (!clp->cl_nfsd_dentry) { free_client(clp); return NULL; } clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); if (!clp->cl_ra) { free_client(clp); return NULL; } clp->cl_ra_time = 0; nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, NFSPROC4_CLNT_CB_RECALL_ANY); return clp; } static void add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) { struct rb_node **new = &(root->rb_node), *parent = NULL; struct nfs4_client *clp; while (*new) { clp = rb_entry(*new, struct nfs4_client, cl_namenode); parent = *new; if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) new = &((*new)->rb_left); else new = &((*new)->rb_right); } rb_link_node(&new_clp->cl_namenode, parent, new); rb_insert_color(&new_clp->cl_namenode, root); } static struct nfs4_client * find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) { int cmp; struct rb_node *node = root->rb_node; struct nfs4_client *clp; while (node) { clp = rb_entry(node, struct nfs4_client, cl_namenode); cmp = compare_blob(&clp->cl_name, name); if (cmp > 0) node = node->rb_left; else if (cmp < 0) node = node->rb_right; else return clp; } return NULL; } static void add_to_unconfirmed(struct nfs4_client *clp) { unsigned int idhashval; struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); add_clp_to_name_tree(clp, &nn->unconf_name_tree); idhashval = clientid_hashval(clp->cl_clientid.cl_id); list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); renew_client_locked(clp); } static void move_to_confirmed(struct nfs4_client *clp) { unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); lockdep_assert_held(&nn->client_lock); list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); add_clp_to_name_tree(clp, &nn->conf_name_tree); set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); trace_nfsd_clid_confirmed(&clp->cl_clientid); renew_client_locked(clp); } static struct nfs4_client * find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) { struct nfs4_client *clp; unsigned int idhashval = clientid_hashval(clid->cl_id); list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { if (same_clid(&clp->cl_clientid, clid)) { if ((bool)clp->cl_minorversion != sessions) return NULL; renew_client_locked(clp); return clp; } } return NULL; } static struct nfs4_client * find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) { struct list_head *tbl = nn->conf_id_hashtbl; lockdep_assert_held(&nn->client_lock); return find_client_in_id_table(tbl, clid, sessions); } static struct nfs4_client * find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) { struct list_head *tbl = nn->unconf_id_hashtbl; lockdep_assert_held(&nn->client_lock); return find_client_in_id_table(tbl, clid, sessions); } static bool clp_used_exchangeid(struct nfs4_client *clp) { return clp->cl_exchange_flags != 0; } static struct nfs4_client * find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) { lockdep_assert_held(&nn->client_lock); return find_clp_in_name_tree(name, &nn->conf_name_tree); } static struct nfs4_client * find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) { lockdep_assert_held(&nn->client_lock); return find_clp_in_name_tree(name, &nn->unconf_name_tree); } static void gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) { struct nfs4_cb_conn *conn = &clp->cl_cb_conn; struct sockaddr *sa = svc_addr(rqstp); u32 scopeid = rpc_get_scope_id(sa); unsigned short expected_family; /* Currently, we only support tcp and tcp6 for the callback channel */ if (se->se_callback_netid_len == 3 && !memcmp(se->se_callback_netid_val, "tcp", 3)) expected_family = AF_INET; else if (se->se_callback_netid_len == 4 && !memcmp(se->se_callback_netid_val, "tcp6", 4)) expected_family = AF_INET6; else goto out_err; conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, se->se_callback_addr_len, (struct sockaddr *)&conn->cb_addr, sizeof(conn->cb_addr)); if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) goto out_err; if (conn->cb_addr.ss_family == AF_INET6) ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); trace_nfsd_cb_args(clp, conn); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; conn->cb_addrlen = 0; trace_nfsd_cb_nodelegs(clp); return; } /* * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. */ static void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) { struct xdr_buf *buf = resp->xdr->buf; struct nfsd4_slot *slot = resp->cstate.slot; unsigned int base; dprintk("--> %s slot %p\n", __func__, slot); slot->sl_flags |= NFSD4_SLOT_INITIALIZED; slot->sl_opcnt = resp->opcnt; slot->sl_status = resp->cstate.status; free_svc_cred(&slot->sl_cred); copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); if (!nfsd4_cache_this(resp)) { slot->sl_flags &= ~NFSD4_SLOT_CACHED; return; } slot->sl_flags |= NFSD4_SLOT_CACHED; base = resp->cstate.data_offset; slot->sl_datalen = buf->len - base; if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) WARN(1, "%s: sessions DRC could not cache compound\n", __func__); return; } /* * Encode the replay sequence operation from the slot values. * If cachethis is FALSE encode the uncached rep error on the next * operation which sets resp->p and increments resp->opcnt for * nfs4svc_encode_compoundres. * */ static __be32 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, struct nfsd4_compoundres *resp) { struct nfsd4_op *op; struct nfsd4_slot *slot = resp->cstate.slot; /* Encode the replayed sequence operation */ op = &args->ops[resp->opcnt - 1]; nfsd4_encode_operation(resp, op); if (slot->sl_flags & NFSD4_SLOT_CACHED) return op->status; if (args->opcnt == 1) { /* * The original operation wasn't a solo sequence--we * always cache those--so this retry must not match the * original: */ op->status = nfserr_seq_false_retry; } else { op = &args->ops[resp->opcnt++]; op->status = nfserr_retry_uncached_rep; nfsd4_encode_operation(resp, op); } return op->status; } /* * The sequence operation is not cached because we can use the slot and * session values. */ static __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq) { struct nfsd4_slot *slot = resp->cstate.slot; struct xdr_stream *xdr = resp->xdr; __be32 *p; __be32 status; dprintk("--> %s slot %p\n", __func__, slot); status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); if (status) return status; p = xdr_reserve_space(xdr, slot->sl_datalen); if (!p) { WARN_ON_ONCE(1); return nfserr_serverfault; } xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); xdr_commit_encode(xdr); resp->opcnt = slot->sl_opcnt; return slot->sl_status; } /* * Set the exchange_id flags returned by the server. */ static void nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) { #ifdef CONFIG_NFSD_PNFS new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; #else new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; #endif /* Referrals are supported, Migration is not. */ new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; /* set the wire flags to return to client. */ clid->flags = new->cl_exchange_flags; } static bool client_has_openowners(struct nfs4_client *clp) { struct nfs4_openowner *oo; list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { if (!list_empty(&oo->oo_owner.so_stateids)) return true; } return false; } static bool client_has_state(struct nfs4_client *clp) { return client_has_openowners(clp) #ifdef CONFIG_NFSD_PNFS || !list_empty(&clp->cl_lo_states) #endif || !list_empty(&clp->cl_delegations) || !list_empty(&clp->cl_sessions) || !list_empty(&clp->async_copies); } static __be32 copy_impl_id(struct nfs4_client *clp, struct nfsd4_exchange_id *exid) { if (!exid->nii_domain.data) return 0; xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); if (!clp->cl_nii_domain.data) return nfserr_jukebox; xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); if (!clp->cl_nii_name.data) return nfserr_jukebox; clp->cl_nii_time = exid->nii_time; return 0; } __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_exchange_id *exid = &u->exchange_id; struct nfs4_client *conf, *new; struct nfs4_client *unconf = NULL; __be32 status; char addr_str[INET6_ADDRSTRLEN]; nfs4_verifier verf = exid->verifier; struct sockaddr *sa = svc_addr(rqstp); bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); rpc_ntop(sa, addr_str, sizeof(addr_str)); dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " "ip_addr=%s flags %x, spa_how %u\n", __func__, rqstp, exid, exid->clname.len, exid->clname.data, addr_str, exid->flags, exid->spa_how); if (exid->flags & ~EXCHGID4_FLAG_MASK_A) return nfserr_inval; new = create_client(exid->clname, rqstp, &verf); if (new == NULL) return nfserr_jukebox; status = copy_impl_id(new, exid); if (status) goto out_nolock; switch (exid->spa_how) { case SP4_MACH_CRED: exid->spo_must_enforce[0] = 0; exid->spo_must_enforce[1] = ( 1 << (OP_BIND_CONN_TO_SESSION - 32) | 1 << (OP_EXCHANGE_ID - 32) | 1 << (OP_CREATE_SESSION - 32) | 1 << (OP_DESTROY_SESSION - 32) | 1 << (OP_DESTROY_CLIENTID - 32)); exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 1 << (OP_OPEN_DOWNGRADE) | 1 << (OP_LOCKU) | 1 << (OP_DELEGRETURN)); exid->spo_must_allow[1] &= ( 1 << (OP_TEST_STATEID - 32) | 1 << (OP_FREE_STATEID - 32)); if (!svc_rqst_integrity_protected(rqstp)) { status = nfserr_inval; goto out_nolock; } /* * Sometimes userspace doesn't give us a principal. * Which is a bug, really. Anyway, we can't enforce * MACH_CRED in that case, better to give up now: */ if (!new->cl_cred.cr_principal && !new->cl_cred.cr_raw_principal) { status = nfserr_serverfault; goto out_nolock; } new->cl_mach_cred = true; break; case SP4_NONE: break; default: /* checked by xdr code */ WARN_ON_ONCE(1); fallthrough; case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; } /* Cases below refer to rfc 5661 section 18.35.4: */ spin_lock(&nn->client_lock); conf = find_confirmed_client_by_name(&exid->clname, nn); if (conf) { bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); bool verfs_match = same_verf(&verf, &conf->cl_verifier); if (update) { if (!clp_used_exchangeid(conf)) { /* buggy client */ status = nfserr_inval; goto out; } if (!nfsd4_mach_creds_match(conf, rqstp)) { status = nfserr_wrong_cred; goto out; } if (!creds_match) { /* case 9 */ status = nfserr_perm; goto out; } if (!verfs_match) { /* case 8 */ status = nfserr_not_same; goto out; } /* case 6 */ exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; trace_nfsd_clid_confirmed_r(conf); goto out_copy; } if (!creds_match) { /* case 3 */ if (client_has_state(conf)) { status = nfserr_clid_inuse; trace_nfsd_clid_cred_mismatch(conf, rqstp); goto out; } goto out_new; } if (verfs_match) { /* case 2 */ conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; trace_nfsd_clid_confirmed_r(conf); goto out_copy; } /* case 5, client reboot */ trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf); conf = NULL; goto out_new; } if (update) { /* case 7 */ status = nfserr_noent; goto out; } unconf = find_unconfirmed_client_by_name(&exid->clname, nn); if (unconf) /* case 4, possible retry or client restart */ unhash_client_locked(unconf); /* case 1, new owner ID */ trace_nfsd_clid_fresh(new); out_new: if (conf) { status = mark_client_expired_locked(conf); if (status) goto out; trace_nfsd_clid_replaced(&conf->cl_clientid); } new->cl_minorversion = cstate->minorversion; new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; add_to_unconfirmed(new); swap(new, conf); out_copy: exid->clientid.cl_boot = conf->cl_clientid.cl_boot; exid->clientid.cl_id = conf->cl_clientid.cl_id; exid->seqid = conf->cl_cs_slot.sl_seqid + 1; nfsd4_set_ex_flags(conf, exid); dprintk("nfsd4_exchange_id seqid %d flags %x\n", conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); status = nfs_ok; out: spin_unlock(&nn->client_lock); out_nolock: if (new) expire_client(new); if (unconf) { trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); expire_client(unconf); } return status; } static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) { dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, slot_seqid); /* The slot is in use, and no response has been sent. */ if (slot_inuse) { if (seqid == slot_seqid) return nfserr_jukebox; else return nfserr_seq_misordered; } /* Note unsigned 32-bit arithmetic handles wraparound: */ if (likely(seqid == slot_seqid + 1)) return nfs_ok; if (seqid == slot_seqid) return nfserr_replay_cache; return nfserr_seq_misordered; } /* * Cache the create session result into the create session single DRC * slot cache by saving the xdr structure. sl_seqid has been set. * Do this for solo or embedded create session operations. */ static void nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot, __be32 nfserr) { slot->sl_status = nfserr; memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); } static __be32 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, struct nfsd4_clid_slot *slot) { memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); return slot->sl_status; } #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* version, opcount, opcode */ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, cache */ \ 4 ) * sizeof(__be32)) #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 2 + /* verifier: AUTH_NULL, length 0 */\ 1 + /* status */ \ 1 + /* MIN tag is length with zero, only length */ \ 3 + /* opcount, opcode, opstatus*/ \ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ /* seqid, slotID, slotID, slotID, status */ \ 5 ) * sizeof(__be32)) static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) { u32 maxrpc = nn->nfsd_serv->sv_max_mesg; if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) return nfserr_toosmall; if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) return nfserr_toosmall; ca->headerpadsz = 0; ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); ca->maxresp_cached = min_t(u32, ca->maxresp_cached, NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); /* * Note decreasing slot size below client's request may make it * difficult for client to function correctly, whereas * decreasing the number of slots will (just?) affect * performance. When short on memory we therefore prefer to * decrease number of slots instead of their size. Clients that * request larger slots than they need will get poor results: * Note that we always allow at least one slot, because our * accounting is soft and provides no guarantees either way. */ ca->maxreqs = nfsd4_get_drc_mem(ca, nn); return nfs_ok; } /* * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. * These are based on similar macros in linux/sunrpc/msg_prot.h . */ #define RPC_MAX_HEADER_WITH_AUTH_SYS \ (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ sizeof(__be32)) static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) { ca->headerpadsz = 0; if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) return nfserr_toosmall; if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) return nfserr_toosmall; ca->maxresp_cached = 0; if (ca->maxops < 2) return nfserr_toosmall; return nfs_ok; } static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) { switch (cbs->flavor) { case RPC_AUTH_NULL: case RPC_AUTH_UNIX: return nfs_ok; default: /* * GSS case: the spec doesn't allow us to return this * error. But it also doesn't allow us not to support * GSS. * I'd rather this fail hard than return some error the * client might think it can already handle: */ return nfserr_encr_alg_unsupp; } } __be32 nfsd4_create_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_create_session *cr_ses = &u->create_session; struct sockaddr *sa = svc_addr(rqstp); struct nfs4_client *conf, *unconf; struct nfs4_client *old = NULL; struct nfsd4_session *new; struct nfsd4_conn *conn; struct nfsd4_clid_slot *cs_slot = NULL; __be32 status = 0; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) return nfserr_inval; status = nfsd4_check_cb_sec(&cr_ses->cb_sec); if (status) return status; status = check_forechannel_attrs(&cr_ses->fore_channel, nn); if (status) return status; status = check_backchannel_attrs(&cr_ses->back_channel); if (status) goto out_release_drc_mem; status = nfserr_jukebox; new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); if (!new) goto out_release_drc_mem; conn = alloc_conn_from_crses(rqstp, cr_ses); if (!conn) goto out_free_session; spin_lock(&nn->client_lock); unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); conf = find_confirmed_client(&cr_ses->clientid, true, nn); WARN_ON_ONCE(conf && unconf); if (conf) { status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(conf, rqstp)) goto out_free_conn; cs_slot = &conf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { if (status == nfserr_replay_cache) status = nfsd4_replay_create_session(cr_ses, cs_slot); goto out_free_conn; } } else if (unconf) { status = nfserr_clid_inuse; if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { trace_nfsd_clid_cred_mismatch(unconf, rqstp); goto out_free_conn; } status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(unconf, rqstp)) goto out_free_conn; cs_slot = &unconf->cl_cs_slot; status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); if (status) { /* an unconfirmed replay returns misordered */ status = nfserr_seq_misordered; goto out_free_conn; } old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = mark_client_expired_locked(old); if (status) { old = NULL; goto out_free_conn; } trace_nfsd_clid_replaced(&old->cl_clientid); } move_to_confirmed(unconf); conf = unconf; } else { status = nfserr_stale_clientid; goto out_free_conn; } status = nfs_ok; /* Persistent sessions are not supported */ cr_ses->flags &= ~SESSION4_PERSIST; /* Upshifting from TCP to RDMA is not supported */ cr_ses->flags &= ~SESSION4_RDMA; init_session(rqstp, new, conf, cr_ses); nfsd4_get_session_locked(new); memcpy(cr_ses->sessionid.data, new->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); cs_slot->sl_seqid++; cr_ses->seqid = cs_slot->sl_seqid; /* cache solo and embedded create sessions under the client_lock */ nfsd4_cache_create_session(cr_ses, cs_slot, status); spin_unlock(&nn->client_lock); if (conf == unconf) fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); /* init connection and backchannel */ nfsd4_init_conn(rqstp, conn, new); nfsd4_put_session(new); if (old) expire_client(old); return status; out_free_conn: spin_unlock(&nn->client_lock); free_conn(conn); if (old) expire_client(old); out_free_session: __free_session(new); out_release_drc_mem: nfsd4_put_drc_mem(&cr_ses->fore_channel); return status; } static __be32 nfsd4_map_bcts_dir(u32 *dir) { switch (*dir) { case NFS4_CDFC4_FORE: case NFS4_CDFC4_BACK: return nfs_ok; case NFS4_CDFC4_FORE_OR_BOTH: case NFS4_CDFC4_BACK_OR_BOTH: *dir = NFS4_CDFC4_BOTH; return nfs_ok; } return nfserr_inval; } __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; struct nfsd4_session *session = cstate->session; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); __be32 status; status = nfsd4_check_cb_sec(&bc->bc_cb_sec); if (status) return status; spin_lock(&nn->client_lock); session->se_cb_prog = bc->bc_cb_program; session->se_cb_sec = bc->bc_cb_sec; spin_unlock(&nn->client_lock); nfsd4_probe_callback(session->se_client); return nfs_ok; } static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) { struct nfsd4_conn *c; list_for_each_entry(c, &s->se_conns, cn_persession) { if (c->cn_xprt == xpt) { return c; } } return NULL; } static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) { struct nfs4_client *clp = session->se_client; struct svc_xprt *xpt = rqst->rq_xprt; struct nfsd4_conn *c; __be32 status; /* Following the last paragraph of RFC 5661 Section 18.34.3: */ spin_lock(&clp->cl_lock); c = __nfsd4_find_conn(xpt, session); if (!c) status = nfserr_noent; else if (req == c->cn_flags) status = nfs_ok; else if (req == NFS4_CDFC4_FORE_OR_BOTH && c->cn_flags != NFS4_CDFC4_BACK) status = nfs_ok; else if (req == NFS4_CDFC4_BACK_OR_BOTH && c->cn_flags != NFS4_CDFC4_FORE) status = nfs_ok; else status = nfserr_inval; spin_unlock(&clp->cl_lock); if (status == nfs_ok && conn) *conn = c; return status; } __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; __be32 status; struct nfsd4_conn *conn; struct nfsd4_session *session; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (!nfsd4_last_compound_op(rqstp)) return nfserr_not_only_op; spin_lock(&nn->client_lock); session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); spin_unlock(&nn->client_lock); if (!session) goto out_no_session; status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(session->se_client, rqstp)) goto out; status = nfsd4_match_existing_connection(rqstp, session, bcts->dir, &conn); if (status == nfs_ok) { if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || bcts->dir == NFS4_CDFC4_BACK) conn->cn_flags |= NFS4_CDFC4_BACK; nfsd4_probe_callback(session->se_client); goto out; } if (status == nfserr_inval) goto out; status = nfsd4_map_bcts_dir(&bcts->dir); if (status) goto out; conn = alloc_conn(rqstp, bcts->dir); status = nfserr_jukebox; if (!conn) goto out; nfsd4_init_conn(rqstp, conn, session); status = nfs_ok; out: nfsd4_put_session(session); out_no_session: return status; } static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) { if (!cstate->session) return false; return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); } __be32 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; struct nfsd4_session *ses; __be32 status; int ref_held_by_me = 0; struct net *net = SVC_NET(r); struct nfsd_net *nn = net_generic(net, nfsd_net_id); status = nfserr_not_only_op; if (nfsd4_compound_in_session(cstate, sessionid)) { if (!nfsd4_last_compound_op(r)) goto out; ref_held_by_me++; } dump_sessionid(__func__, sessionid); spin_lock(&nn->client_lock); ses = find_in_sessionid_hashtbl(sessionid, net, &status); if (!ses) goto out_client_lock; status = nfserr_wrong_cred; if (!nfsd4_mach_creds_match(ses->se_client, r)) goto out_put_session; status = mark_session_dead_locked(ses, 1 + ref_held_by_me); if (status) goto out_put_session; unhash_session(ses); spin_unlock(&nn->client_lock); nfsd4_probe_callback_sync(ses->se_client); spin_lock(&nn->client_lock); status = nfs_ok; out_put_session: nfsd4_put_session_locked(ses); out_client_lock: spin_unlock(&nn->client_lock); out: return status; } static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) { struct nfs4_client *clp = ses->se_client; struct nfsd4_conn *c; __be32 status = nfs_ok; int ret; spin_lock(&clp->cl_lock); c = __nfsd4_find_conn(new->cn_xprt, ses); if (c) goto out_free; status = nfserr_conn_not_bound_to_session; if (clp->cl_mach_cred) goto out_free; __nfsd4_hash_conn(new, ses); spin_unlock(&clp->cl_lock); ret = nfsd4_register_conn(new); if (ret) /* oops; xprt is already down: */ nfsd4_conn_lost(&new->cn_xpt_user); return nfs_ok; out_free: spin_unlock(&clp->cl_lock); free_conn(new); return status; } static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct nfsd4_compoundargs *args = rqstp->rq_argp; return args->opcnt > session->se_fchannel.maxops; } static bool nfsd4_request_too_big(struct svc_rqst *rqstp, struct nfsd4_session *session) { struct xdr_buf *xb = &rqstp->rq_arg; return xb->len > session->se_fchannel.maxreq_sz; } static bool replay_matches_cache(struct svc_rqst *rqstp, struct nfsd4_sequence *seq, struct nfsd4_slot *slot) { struct nfsd4_compoundargs *argp = rqstp->rq_argp; if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != (bool)seq->cachethis) return false; /* * If there's an error then the reply can have fewer ops than * the call. */ if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) return false; /* * But if we cached a reply with *more* ops than the call you're * sending us now, then this new call is clearly not really a * replay of the old one: */ if (slot->sl_opcnt > argp->opcnt) return false; /* This is the only check explicitly called by spec: */ if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) return false; /* * There may be more comparisons we could actually do, but the * spec doesn't require us to catch every case where the calls * don't match (that would require caching the call as well as * the reply), so we don't bother. */ return true; } __be32 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_sequence *seq = &u->sequence; struct nfsd4_compoundres *resp = rqstp->rq_resp; struct xdr_stream *xdr = resp->xdr; struct nfsd4_session *session; struct nfs4_client *clp; struct nfsd4_slot *slot; struct nfsd4_conn *conn; __be32 status; int buflen; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); if (resp->opcnt != 1) return nfserr_sequence_pos; /* * Will be either used or freed by nfsd4_sequence_check_conn * below. */ conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); if (!conn) return nfserr_jukebox; spin_lock(&nn->client_lock); session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); if (!session) goto out_no_session; clp = session->se_client; status = nfserr_too_many_ops; if (nfsd4_session_too_many_ops(rqstp, session)) goto out_put_session; status = nfserr_req_too_big; if (nfsd4_request_too_big(rqstp, session)) goto out_put_session; status = nfserr_badslot; if (seq->slotid >= session->se_fchannel.maxreqs) goto out_put_session; slot = session->se_slots[seq->slotid]; dprintk("%s: slotid %d\n", __func__, seq->slotid); /* We do not negotiate the number of slots yet, so set the * maxslots to the session maxreqs which is used to encode * sr_highest_slotid and the sr_target_slot id to maxslots */ seq->maxslots = session->se_fchannel.maxreqs; status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags & NFSD4_SLOT_INUSE); if (status == nfserr_replay_cache) { status = nfserr_seq_misordered; if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) goto out_put_session; status = nfserr_seq_false_retry; if (!replay_matches_cache(rqstp, seq, slot)) goto out_put_session; cstate->slot = slot; cstate->session = session; cstate->clp = clp; /* Return the cached reply status and set cstate->status * for nfsd4_proc_compound processing */ status = nfsd4_replay_cache_entry(resp, seq); cstate->status = nfserr_replay_cache; goto out; } if (status) goto out_put_session; status = nfsd4_sequence_check_conn(conn, session); conn = NULL; if (status) goto out_put_session; buflen = (seq->cachethis) ? session->se_fchannel.maxresp_cached : session->se_fchannel.maxresp_sz; status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : nfserr_rep_too_big; if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) goto out_put_session; svc_reserve(rqstp, buflen); status = nfs_ok; /* Success! bump slot seqid */ slot->sl_seqid = seq->seqid; slot->sl_flags |= NFSD4_SLOT_INUSE; if (seq->cachethis) slot->sl_flags |= NFSD4_SLOT_CACHETHIS; else slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; cstate->slot = slot; cstate->session = session; cstate->clp = clp; out: switch (clp->cl_cb_state) { case NFSD4_CB_DOWN: seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; break; case NFSD4_CB_FAULT: seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; break; default: seq->status_flags = 0; } if (!list_empty(&clp->cl_revoked)) seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; out_no_session: if (conn) free_conn(conn); spin_unlock(&nn->client_lock); return status; out_put_session: nfsd4_put_session_locked(session); goto out_no_session; } void nfsd4_sequence_done(struct nfsd4_compoundres *resp) { struct nfsd4_compound_state *cs = &resp->cstate; if (nfsd4_has_session(cs)) { if (cs->status != nfserr_replay_cache) { nfsd4_store_cache_entry(resp); cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; } /* Drop session reference that was taken in nfsd4_sequence() */ nfsd4_put_session(cs->session); } else if (cs->clp) put_client_renew(cs->clp); } __be32 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; struct nfs4_client *conf, *unconf; struct nfs4_client *clp = NULL; __be32 status = 0; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); spin_lock(&nn->client_lock); unconf = find_unconfirmed_client(&dc->clientid, true, nn); conf = find_confirmed_client(&dc->clientid, true, nn); WARN_ON_ONCE(conf && unconf); if (conf) { if (client_has_state(conf)) { status = nfserr_clientid_busy; goto out; } status = mark_client_expired_locked(conf); if (status) goto out; clp = conf; } else if (unconf) clp = unconf; else { status = nfserr_stale_clientid; goto out; } if (!nfsd4_mach_creds_match(clp, rqstp)) { clp = NULL; status = nfserr_wrong_cred; goto out; } trace_nfsd_clid_destroyed(&clp->cl_clientid); unhash_client_locked(clp); out: spin_unlock(&nn->client_lock); if (clp) expire_client(clp); return status; } __be32 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; struct nfs4_client *clp = cstate->clp; __be32 status = 0; if (rc->rca_one_fs) { if (!cstate->current_fh.fh_dentry) return nfserr_nofilehandle; /* * We don't take advantage of the rca_one_fs case. * That's OK, it's optional, we can safely ignore it. */ return nfs_ok; } status = nfserr_complete_already; if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) goto out; status = nfserr_stale_clientid; if (is_client_expired(clp)) /* * The following error isn't really legal. * But we only get here if the client just explicitly * destroyed the client. Surely it no longer cares what * error it gets back on an operation for the dead * client. */ goto out; status = nfs_ok; trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); nfsd4_client_record_create(clp); inc_reclaim_complete(clp); out: return status; } __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_setclientid *setclid = &u->setclientid; struct xdr_netobj clname = setclid->se_name; nfs4_verifier clverifier = setclid->se_verf; struct nfs4_client *conf, *new; struct nfs4_client *unconf = NULL; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); new = create_client(clname, rqstp, &clverifier); if (new == NULL) return nfserr_jukebox; spin_lock(&nn->client_lock); conf = find_confirmed_client_by_name(&clname, nn); if (conf && client_has_state(conf)) { status = nfserr_clid_inuse; if (clp_used_exchangeid(conf)) goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { trace_nfsd_clid_cred_mismatch(conf, rqstp); goto out; } } unconf = find_unconfirmed_client_by_name(&clname, nn); if (unconf) unhash_client_locked(unconf); if (conf) { if (same_verf(&conf->cl_verifier, &clverifier)) { copy_clid(new, conf); gen_confirm(new, nn); } else trace_nfsd_clid_verf_mismatch(conf, rqstp, &clverifier); } else trace_nfsd_clid_fresh(new); new->cl_minorversion = 0; gen_callback(new, setclid, rqstp); add_to_unconfirmed(new); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); new = NULL; status = nfs_ok; out: spin_unlock(&nn->client_lock); if (new) free_client(new); if (unconf) { trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); expire_client(unconf); } return status; } __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_setclientid_confirm *setclientid_confirm = &u->setclientid_confirm; struct nfs4_client *conf, *unconf; struct nfs4_client *old = NULL; nfs4_verifier confirm = setclientid_confirm->sc_confirm; clientid_t * clid = &setclientid_confirm->sc_clientid; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (STALE_CLIENTID(clid, nn)) return nfserr_stale_clientid; spin_lock(&nn->client_lock); conf = find_confirmed_client(clid, false, nn); unconf = find_unconfirmed_client(clid, false, nn); /* * We try hard to give out unique clientid's, so if we get an * attempt to confirm the same clientid with a different cred, * the client may be buggy; this should never happen. * * Nevertheless, RFC 7530 recommends INUSE for this case: */ status = nfserr_clid_inuse; if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { trace_nfsd_clid_cred_mismatch(unconf, rqstp); goto out; } if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { trace_nfsd_clid_cred_mismatch(conf, rqstp); goto out; } if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { if (conf && same_verf(&confirm, &conf->cl_confirm)) { status = nfs_ok; } else status = nfserr_stale_clientid; goto out; } status = nfs_ok; if (conf) { old = unconf; unhash_client_locked(old); nfsd4_change_callback(conf, &unconf->cl_cb_conn); } else { old = find_confirmed_client_by_name(&unconf->cl_name, nn); if (old) { status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, &old->cl_cred)) { old = NULL; goto out; } status = mark_client_expired_locked(old); if (status) { old = NULL; goto out; } trace_nfsd_clid_replaced(&old->cl_clientid); } move_to_confirmed(unconf); conf = unconf; } get_client_locked(conf); spin_unlock(&nn->client_lock); if (conf == unconf) fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); nfsd4_probe_callback(conf); spin_lock(&nn->client_lock); put_client_renew_locked(conf); out: spin_unlock(&nn->client_lock); if (old) expire_client(old); return status; } static struct nfs4_file *nfsd4_alloc_file(void) { return kmem_cache_alloc(file_slab, GFP_KERNEL); } /* OPEN Share state helper functions */ static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp) { refcount_set(&fp->fi_ref, 1); spin_lock_init(&fp->fi_lock); INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); INIT_LIST_HEAD(&fp->fi_clnt_odstate); fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); fp->fi_deleg_file = NULL; fp->fi_had_conflict = false; fp->fi_share_deny = 0; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); fp->fi_aliased = false; fp->fi_inode = d_inode(fh->fh_dentry); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&fp->fi_lo_states); atomic_set(&fp->fi_lo_recalls, 0); #endif } void nfsd4_free_slabs(void) { kmem_cache_destroy(client_slab); kmem_cache_destroy(openowner_slab); kmem_cache_destroy(lockowner_slab); kmem_cache_destroy(file_slab); kmem_cache_destroy(stateid_slab); kmem_cache_destroy(deleg_slab); kmem_cache_destroy(odstate_slab); } int nfsd4_init_slabs(void) { client_slab = kmem_cache_create("nfsd4_clients", sizeof(struct nfs4_client), 0, 0, NULL); if (client_slab == NULL) goto out; openowner_slab = kmem_cache_create("nfsd4_openowners", sizeof(struct nfs4_openowner), 0, 0, NULL); if (openowner_slab == NULL) goto out_free_client_slab; lockowner_slab = kmem_cache_create("nfsd4_lockowners", sizeof(struct nfs4_lockowner), 0, 0, NULL); if (lockowner_slab == NULL) goto out_free_openowner_slab; file_slab = kmem_cache_create("nfsd4_files", sizeof(struct nfs4_file), 0, 0, NULL); if (file_slab == NULL) goto out_free_lockowner_slab; stateid_slab = kmem_cache_create("nfsd4_stateids", sizeof(struct nfs4_ol_stateid), 0, 0, NULL); if (stateid_slab == NULL) goto out_free_file_slab; deleg_slab = kmem_cache_create("nfsd4_delegations", sizeof(struct nfs4_delegation), 0, 0, NULL); if (deleg_slab == NULL) goto out_free_stateid_slab; odstate_slab = kmem_cache_create("nfsd4_odstate", sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); if (odstate_slab == NULL) goto out_free_deleg_slab; return 0; out_free_deleg_slab: kmem_cache_destroy(deleg_slab); out_free_stateid_slab: kmem_cache_destroy(stateid_slab); out_free_file_slab: kmem_cache_destroy(file_slab); out_free_lockowner_slab: kmem_cache_destroy(lockowner_slab); out_free_openowner_slab: kmem_cache_destroy(openowner_slab); out_free_client_slab: kmem_cache_destroy(client_slab); out: return -ENOMEM; } static unsigned long nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) { int count; struct nfsd_net *nn = shrink->private_data; count = atomic_read(&nn->nfsd_courtesy_clients); if (!count) count = atomic_long_read(&num_delegations); if (count) queue_work(laundry_wq, &nn->nfsd_shrinker_work); return (unsigned long)count; } static unsigned long nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) { return SHRINK_STOP; } void nfsd4_init_leases_net(struct nfsd_net *nn) { struct sysinfo si; u64 max_clients; nn->nfsd4_lease = 90; /* default lease time */ nn->nfsd4_grace = 90; nn->somebody_reclaimed = false; nn->track_reclaim_completes = false; nn->clverifier_counter = get_random_u32(); nn->clientid_base = get_random_u32(); nn->clientid_counter = nn->clientid_base + 1; nn->s2s_cp_cl_id = nn->clientid_counter++; atomic_set(&nn->nfs4_client_count, 0); si_meminfo(&si); max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024); max_clients *= NFS4_CLIENTS_PER_GB; nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); atomic_set(&nn->nfsd_courtesy_clients, 0); } static void init_nfs4_replay(struct nfs4_replay *rp) { rp->rp_status = nfserr_serverfault; rp->rp_buflen = 0; rp->rp_buf = rp->rp_ibuf; mutex_init(&rp->rp_mutex); } static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so) { if (!nfsd4_has_session(cstate)) { mutex_lock(&so->so_replay.rp_mutex); cstate->replay_owner = nfs4_get_stateowner(so); } } void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) { struct nfs4_stateowner *so = cstate->replay_owner; if (so != NULL) { cstate->replay_owner = NULL; mutex_unlock(&so->so_replay.rp_mutex); nfs4_put_stateowner(so); } } static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) { struct nfs4_stateowner *sop; sop = kmem_cache_alloc(slab, GFP_KERNEL); if (!sop) return NULL; xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); if (!sop->so_owner.data) { kmem_cache_free(slab, sop); return NULL; } INIT_LIST_HEAD(&sop->so_stateids); sop->so_client = clp; init_nfs4_replay(&sop->so_replay); atomic_set(&sop->so_count, 1); return sop; } static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) { lockdep_assert_held(&clp->cl_lock); list_add(&oo->oo_owner.so_strhash, &clp->cl_ownerstr_hashtbl[strhashval]); list_add(&oo->oo_perclient, &clp->cl_openowners); } static void nfs4_unhash_openowner(struct nfs4_stateowner *so) { unhash_openowner_locked(openowner(so)); } static void nfs4_free_openowner(struct nfs4_stateowner *so) { struct nfs4_openowner *oo = openowner(so); kmem_cache_free(openowner_slab, oo); } static const struct nfs4_stateowner_operations openowner_ops = { .so_unhash = nfs4_unhash_openowner, .so_free = nfs4_free_openowner, }; static struct nfs4_ol_stateid * nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_ol_stateid *local, *ret = NULL; struct nfs4_openowner *oo = open->op_openowner; lockdep_assert_held(&fp->fi_lock); list_for_each_entry(local, &fp->fi_stateids, st_perfile) { /* ignore lock owners */ if (local->st_stateowner->so_is_open_owner == 0) continue; if (local->st_stateowner != &oo->oo_owner) continue; if (local->st_stid.sc_type == NFS4_OPEN_STID) { ret = local; refcount_inc(&ret->st_stid.sc_count); break; } } return ret; } static __be32 nfsd4_verify_open_stid(struct nfs4_stid *s) { __be32 ret = nfs_ok; switch (s->sc_type) { default: break; case 0: case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: ret = nfserr_bad_stateid; break; case NFS4_REVOKED_DELEG_STID: ret = nfserr_deleg_revoked; } return ret; } /* Lock the stateid st_mutex, and deal with races with CLOSE */ static __be32 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) { __be32 ret; mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); ret = nfsd4_verify_open_stid(&stp->st_stid); if (ret != nfs_ok) mutex_unlock(&stp->st_mutex); return ret; } static struct nfs4_ol_stateid * nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_ol_stateid *stp; for (;;) { spin_lock(&fp->fi_lock); stp = nfsd4_find_existing_open(fp, open); spin_unlock(&fp->fi_lock); if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) break; nfs4_put_stid(&stp->st_stid); } return stp; } static struct nfs4_openowner * alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, struct nfsd4_compound_state *cstate) { struct nfs4_client *clp = cstate->clp; struct nfs4_openowner *oo, *ret; oo = alloc_stateowner(openowner_slab, &open->op_owner, clp); if (!oo) return NULL; oo->oo_owner.so_ops = &openowner_ops; oo->oo_owner.so_is_open_owner = 1; oo->oo_owner.so_seqid = open->op_seqid; oo->oo_flags = 0; if (nfsd4_has_session(cstate)) oo->oo_flags |= NFS4_OO_CONFIRMED; oo->oo_time = 0; oo->oo_last_closed_stid = NULL; INIT_LIST_HEAD(&oo->oo_close_lru); spin_lock(&clp->cl_lock); ret = find_openstateowner_str_locked(strhashval, open, clp); if (ret == NULL) { hash_openowner(oo, clp, strhashval); ret = oo; } else nfs4_free_stateowner(&oo->oo_owner); spin_unlock(&clp->cl_lock); return ret; } static struct nfs4_ol_stateid * init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) { struct nfs4_openowner *oo = open->op_openowner; struct nfs4_ol_stateid *retstp = NULL; struct nfs4_ol_stateid *stp; stp = open->op_stp; /* We are moving these outside of the spinlocks to avoid the warnings */ mutex_init(&stp->st_mutex); mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); retstp = nfsd4_find_existing_open(fp, open); if (retstp) goto out_unlock; open->op_stp = NULL; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_OPEN_STID; INIT_LIST_HEAD(&stp->st_locks); stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); get_nfs4_file(fp); stp->st_stid.sc_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = 0; stp->st_openstp = NULL; list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); out_unlock: spin_unlock(&fp->fi_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock); if (retstp) { /* Handle races with CLOSE */ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { nfs4_put_stid(&retstp->st_stid); goto retry; } /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); stp = retstp; } return stp; } /* * In the 4.0 case we need to keep the owners around a little while to handle * CLOSE replay. We still do need to release any file access that is held by * them before returning however. */ static void move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) { struct nfs4_ol_stateid *last; struct nfs4_openowner *oo = openowner(s->st_stateowner); struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, nfsd_net_id); dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); /* * We know that we hold one reference via nfsd4_close, and another * "persistent" reference for the client. If the refcount is higher * than 2, then there are still calls in progress that are using this * stateid. We can't put the sc_file reference until they are finished. * Wait for the refcount to drop to 2. Since it has been unhashed, * there should be no danger of the refcount going back up again at * this point. */ wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); release_all_access(s); if (s->st_stid.sc_file) { put_nfs4_file(s->st_stid.sc_file); s->st_stid.sc_file = NULL; } spin_lock(&nn->client_lock); last = oo->oo_last_closed_stid; oo->oo_last_closed_stid = s; list_move_tail(&oo->oo_close_lru, &nn->close_lru); oo->oo_time = ktime_get_boottime_seconds(); spin_unlock(&nn->client_lock); if (last) nfs4_put_stid(&last->st_stid); } static noinline_for_stack struct nfs4_file * nfsd4_file_hash_lookup(const struct svc_fh *fhp) { struct inode *inode = d_inode(fhp->fh_dentry); struct rhlist_head *tmp, *list; struct nfs4_file *fi; rcu_read_lock(); list = rhltable_lookup(&nfs4_file_rhltable, &inode, nfs4_file_rhash_params); rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { if (refcount_inc_not_zero(&fi->fi_ref)) { rcu_read_unlock(); return fi; } } } rcu_read_unlock(); return NULL; } /* * On hash insertion, identify entries with the same inode but * distinct filehandles. They will all be on the list returned * by rhltable_lookup(). * * inode->i_lock prevents racing insertions from adding an entry * for the same inode/fhp pair twice. */ static noinline_for_stack struct nfs4_file * nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) { struct inode *inode = d_inode(fhp->fh_dentry); struct rhlist_head *tmp, *list; struct nfs4_file *ret = NULL; bool alias_found = false; struct nfs4_file *fi; int err; rcu_read_lock(); spin_lock(&inode->i_lock); list = rhltable_lookup(&nfs4_file_rhltable, &inode, nfs4_file_rhash_params); rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { if (refcount_inc_not_zero(&fi->fi_ref)) ret = fi; } else fi->fi_aliased = alias_found = true; } if (ret) goto out_unlock; nfsd4_file_init(fhp, new); err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, nfs4_file_rhash_params); if (err) goto out_unlock; new->fi_aliased = alias_found; ret = new; out_unlock: spin_unlock(&inode->i_lock); rcu_read_unlock(); return ret; } static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) { rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, nfs4_file_rhash_params); } /* * Called to check deny when READ with all zero stateid or * WRITE with all zero or all one stateid */ static __be32 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) { struct nfs4_file *fp; __be32 ret = nfs_ok; fp = nfsd4_file_hash_lookup(current_fh); if (!fp) return ret; /* Check for conflicting share reservations */ spin_lock(&fp->fi_lock); if (fp->fi_share_deny & deny_type) ret = nfserr_locked; spin_unlock(&fp->fi_lock); put_nfs4_file(fp); return ret; } static bool nfsd4_deleg_present(const struct inode *inode) { struct file_lock_context *ctx = locks_inode_context(inode); return ctx && !list_empty_careful(&ctx->flc_lease); } /** * nfsd_wait_for_delegreturn - wait for delegations to be returned * @rqstp: the RPC transaction being executed * @inode: in-core inode of the file being waited for * * The timeout prevents deadlock if all nfsd threads happen to be * tied up waiting for returning delegations. * * Return values: * %true: delegation was returned * %false: timed out waiting for delegreturn */ bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode) { long __maybe_unused timeo; timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode), NFSD_DELEGRETURN_TIMEOUT); trace_nfsd_delegret_wakeup(rqstp, inode, timeo); return timeo > 0; } static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) { struct nfs4_delegation *dp = cb_to_delegation(cb); struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, nfsd_net_id); block_delegations(&dp->dl_stid.sc_file->fi_fhandle); /* * We can't do this in nfsd_break_deleg_cb because it is * already holding inode->i_lock. * * If the dl_time != 0, then we know that it has already been * queued for a lease break. Don't queue it again. */ spin_lock(&state_lock); if (delegation_hashed(dp) && dp->dl_time == 0) { dp->dl_time = ktime_get_boottime_seconds(); list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); } spin_unlock(&state_lock); } static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, struct rpc_task *task) { struct nfs4_delegation *dp = cb_to_delegation(cb); trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID || dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) return 1; switch (task->tk_status) { case 0: return 1; case -NFS4ERR_DELAY: rpc_delay(task, 2 * HZ); return 0; case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: /* * Race: client probably got cb_recall before open reply * granting delegation. */ if (dp->dl_retries--) { rpc_delay(task, 2 * HZ); return 0; } fallthrough; default: return 1; } } static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) { struct nfs4_delegation *dp = cb_to_delegation(cb); nfs4_put_stid(&dp->dl_stid); } static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { .prepare = nfsd4_cb_recall_prepare, .done = nfsd4_cb_recall_done, .release = nfsd4_cb_recall_release, }; static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease * callback (and since the lease code is serialized by the * flc_lock) we know the server hasn't removed the lease yet, and * we know it's safe to take a reference. */ refcount_inc(&dp->dl_stid.sc_count); WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); } /* Called from break_lease() with flc_lock held. */ static bool nfsd_break_deleg_cb(struct file_lock *fl) { struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; struct nfs4_file *fp = dp->dl_stid.sc_file; struct nfs4_client *clp = dp->dl_stid.sc_client; struct nfsd_net *nn; trace_nfsd_cb_recall(&dp->dl_stid); dp->dl_recalled = true; atomic_inc(&clp->cl_delegs_in_recall); if (try_to_expire_client(clp)) { nn = net_generic(clp->net, nfsd_net_id); mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); } /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if a delegation isn't returned * in time: */ fl->fl_break_time = 0; spin_lock(&fp->fi_lock); fp->fi_had_conflict = true; nfsd_break_one_deleg(dp); spin_unlock(&fp->fi_lock); return false; } /** * nfsd_breaker_owns_lease - Check if lease conflict was resolved * @fl: Lock state to check * * Return values: * %true: Lease conflict was resolved * %false: Lease conflict was not resolved. */ static bool nfsd_breaker_owns_lease(struct file_lock *fl) { struct nfs4_delegation *dl = fl->fl_owner; struct svc_rqst *rqst; struct nfs4_client *clp; if (!i_am_nfsd()) return false; rqst = kthread_data(current); /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */ if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4) return false; clp = *(rqst->rq_lease_breaker); return dl->dl_stid.sc_client == clp; } static int nfsd_change_deleg_cb(struct file_lock *onlist, int arg, struct list_head *dispose) { struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner; struct nfs4_client *clp = dp->dl_stid.sc_client; if (arg & F_UNLCK) { if (dp->dl_recalled) atomic_dec(&clp->cl_delegs_in_recall); return lease_modify(onlist, arg, dispose); } else return -EAGAIN; } static const struct lock_manager_operations nfsd_lease_mng_ops = { .lm_breaker_owns_lease = nfsd_breaker_owns_lease, .lm_break = nfsd_break_deleg_cb, .lm_change = nfsd_change_deleg_cb, }; static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) { if (nfsd4_has_session(cstate)) return nfs_ok; if (seqid == so->so_seqid - 1) return nfserr_replay_me; if (seqid == so->so_seqid) return nfs_ok; return nfserr_bad_seqid; } static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, struct nfsd_net *nn) { struct nfs4_client *found; spin_lock(&nn->client_lock); found = find_confirmed_client(clid, sessions, nn); if (found) atomic_inc(&found->cl_rpc_users); spin_unlock(&nn->client_lock); return found; } static __be32 set_client(clientid_t *clid, struct nfsd4_compound_state *cstate, struct nfsd_net *nn) { if (cstate->clp) { if (!same_clid(&cstate->clp->cl_clientid, clid)) return nfserr_stale_clientid; return nfs_ok; } if (STALE_CLIENTID(clid, nn)) return nfserr_stale_clientid; /* * We're in the 4.0 case (otherwise the SEQUENCE op would have * set cstate->clp), so session = false: */ cstate->clp = lookup_clientid(clid, false, nn); if (!cstate->clp) return nfserr_expired; return nfs_ok; } __be32 nfsd4_process_open1(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct nfsd_net *nn) { clientid_t *clientid = &open->op_clientid; struct nfs4_client *clp = NULL; unsigned int strhashval; struct nfs4_openowner *oo = NULL; __be32 status; /* * In case we need it later, after we've already created the * file and don't want to risk a further failure: */ open->op_file = nfsd4_alloc_file(); if (open->op_file == NULL) return nfserr_jukebox; status = set_client(clientid, cstate, nn); if (status) return status; clp = cstate->clp; strhashval = ownerstr_hashval(&open->op_owner); oo = find_openstateowner_str(strhashval, open, clp); open->op_openowner = oo; if (!oo) { goto new_owner; } if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { /* Replace unconfirmed owners without checking for replay. */ release_openowner(oo); open->op_openowner = NULL; goto new_owner; } status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); if (status) return status; goto alloc_stateid; new_owner: oo = alloc_init_open_stateowner(strhashval, open, cstate); if (oo == NULL) return nfserr_jukebox; open->op_openowner = oo; alloc_stateid: open->op_stp = nfs4_alloc_open_stateid(clp); if (!open->op_stp) return nfserr_jukebox; if (nfsd4_has_session(cstate) && (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { open->op_odstate = alloc_clnt_odstate(clp); if (!open->op_odstate) return nfserr_jukebox; } return nfs_ok; } static inline __be32 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) { if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) return nfserr_openmode; else return nfs_ok; } static int share_access_to_flags(u32 share_access) { return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; } static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s) { struct nfs4_stid *ret; ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); } static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) { return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; } static __be32 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, struct nfs4_delegation **dp) { int flags; __be32 status = nfserr_bad_stateid; struct nfs4_delegation *deleg; deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { nfs4_put_stid(&deleg->dl_stid); if (cl->cl_minorversion) status = nfserr_deleg_revoked; goto out; } flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { nfs4_put_stid(&deleg->dl_stid); goto out; } *dp = deleg; out: if (!nfsd4_is_deleg_cur(open)) return nfs_ok; if (status) return status; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; return nfs_ok; } static inline int nfs4_access_to_access(u32 nfs4_access) { int flags = 0; if (nfs4_access & NFS4_SHARE_ACCESS_READ) flags |= NFSD_MAY_READ; if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) flags |= NFSD_MAY_WRITE; return flags; } static inline __be32 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, struct nfsd4_open *open) { struct iattr iattr = { .ia_valid = ATTR_SIZE, .ia_size = 0, }; struct nfsd_attrs attrs = { .na_iattr = &iattr, }; if (!open->op_truncate) return 0; if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) return nfserr_inval; return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0); } static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open, bool new_stp) { struct nfsd_file *nf = NULL; __be32 status; int oflag = nfs4_access_to_omode(open->op_share_access); int access = nfs4_access_to_access(open->op_share_access); unsigned char old_access_bmap, old_deny_bmap; spin_lock(&fp->fi_lock); /* * Are we trying to set a deny mode that would conflict with * current access? */ status = nfs4_file_check_deny(fp, open->op_share_deny); if (status != nfs_ok) { if (status != nfserr_share_denied) { spin_unlock(&fp->fi_lock); goto out; } if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, stp, open->op_share_deny, false)) status = nfserr_jukebox; spin_unlock(&fp->fi_lock); goto out; } /* set access to the file */ status = nfs4_file_get_access(fp, open->op_share_access); if (status != nfs_ok) { if (status != nfserr_share_denied) { spin_unlock(&fp->fi_lock); goto out; } if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, stp, open->op_share_access, true)) status = nfserr_jukebox; spin_unlock(&fp->fi_lock); goto out; } /* Set access bits in stateid */ old_access_bmap = stp->st_access_bmap; set_access(open->op_share_access, stp); /* Set new deny mask */ old_deny_bmap = stp->st_deny_bmap; set_deny(open->op_share_deny, stp); fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); if (!fp->fi_fds[oflag]) { spin_unlock(&fp->fi_lock); status = nfsd_file_acquire_opened(rqstp, cur_fh, access, open->op_filp, &nf); if (status != nfs_ok) goto out_put_access; spin_lock(&fp->fi_lock); if (!fp->fi_fds[oflag]) { fp->fi_fds[oflag] = nf; nf = NULL; } } spin_unlock(&fp->fi_lock); if (nf) nfsd_file_put(nf); status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, access)); if (status) goto out_put_access; status = nfsd4_truncate(rqstp, cur_fh, open); if (status) goto out_put_access; out: return status; out_put_access: stp->st_access_bmap = old_access_bmap; nfs4_file_put_access(fp, open->op_share_access); reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); goto out; } static __be32 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { __be32 status; unsigned char old_deny_bmap = stp->st_deny_bmap; if (!test_access(open->op_share_access, stp)) return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false); /* test and set deny mode */ spin_lock(&fp->fi_lock); status = nfs4_file_check_deny(fp, open->op_share_deny); switch (status) { case nfs_ok: set_deny(open->op_share_deny, stp); fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); break; case nfserr_share_denied: if (nfs4_resolve_deny_conflicts_locked(fp, false, stp, open->op_share_deny, false)) status = nfserr_jukebox; break; } spin_unlock(&fp->fi_lock); if (status != nfs_ok) return status; status = nfsd4_truncate(rqstp, cur_fh, open); if (status != nfs_ok) reset_union_bmap_deny(old_deny_bmap, stp); return status; } /* Should we give out recallable state?: */ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) { if (clp->cl_cb_state == NFSD4_CB_UP) return true; /* * In the sessions case, since we don't have to establish a * separate connection for callbacks, we assume it's OK * until we hear otherwise: */ return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) { struct file_lock *fl; fl = locks_alloc_lock(); if (!fl) return NULL; fl->fl_lmops = &nfsd_lease_mng_ops; fl->fl_flags = FL_DELEG; fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl->fl_end = OFFSET_MAX; fl->fl_owner = (fl_owner_t)dp; fl->fl_pid = current->tgid; fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; return fl; } static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, struct nfs4_file *fp) { struct nfs4_ol_stateid *st; struct file *f = fp->fi_deleg_file->nf_file; struct inode *ino = file_inode(f); int writes; writes = atomic_read(&ino->i_writecount); if (!writes) return 0; /* * There could be multiple filehandles (hence multiple * nfs4_files) referencing this file, but that's not too * common; let's just give up in that case rather than * trying to go look up all the clients using that other * nfs4_file as well: */ if (fp->fi_aliased) return -EAGAIN; /* * If there's a close in progress, make sure that we see it * clear any fi_fds[] entries before we see it decrement * i_writecount: */ smp_mb__after_atomic(); if (fp->fi_fds[O_WRONLY]) writes--; if (fp->fi_fds[O_RDWR]) writes--; if (writes > 0) return -EAGAIN; /* There may be non-NFSv4 writers */ /* * It's possible there are non-NFSv4 write opens in progress, * but if they haven't incremented i_writecount yet then they * also haven't called break lease yet; so, they'll break this * lease soon enough. So, all that's left to check for is NFSv4 * opens: */ spin_lock(&fp->fi_lock); list_for_each_entry(st, &fp->fi_stateids, st_perfile) { if (st->st_openstp == NULL /* it's an open */ && access_permit_write(st) && st->st_stid.sc_client != clp) { spin_unlock(&fp->fi_lock); return -EAGAIN; } } spin_unlock(&fp->fi_lock); /* * There's a small chance that we could be racing with another * NFSv4 open. However, any open that hasn't added itself to * the fi_stateids list also hasn't called break_lease yet; so, * they'll break this lease soon enough. */ return 0; } /* * It's possible that between opening the dentry and setting the delegation, * that it has been renamed or unlinked. Redo the lookup to verify that this * hasn't happened. */ static int nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp, struct svc_fh *parent) { struct svc_export *exp; struct dentry *child; __be32 err; err = nfsd_lookup_dentry(open->op_rqstp, parent, open->op_fname, open->op_fnamelen, &exp, &child); if (err) return -EAGAIN; exp_put(exp); dput(child); if (child != file_dentry(fp->fi_deleg_file->nf_file)) return -EAGAIN; return 0; } /* * We avoid breaking delegations held by a client due to its own activity, but * clearing setuid/setgid bits on a write is an implicit activity and the client * may not notice and continue using the old mode. Avoid giving out a delegation * on setuid/setgid files when the client is requesting an open for write. */ static int nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf) { struct inode *inode = file_inode(nf->nf_file); if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && (inode->i_mode & (S_ISUID|S_ISGID))) return -EAGAIN; return 0; } static struct nfs4_delegation * nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *parent) { int status = 0; struct nfs4_client *clp = stp->st_stid.sc_client; struct nfs4_file *fp = stp->st_stid.sc_file; struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; struct nfs4_delegation *dp; struct nfsd_file *nf = NULL; struct file_lock *fl; u32 dl_type; /* * The fi_had_conflict and nfs_get_existing_delegation checks * here are just optimizations; we'll need to recheck them at * the end: */ if (fp->fi_had_conflict) return ERR_PTR(-EAGAIN); /* * Try for a write delegation first. RFC8881 section 10.4 says: * * "An OPEN_DELEGATE_WRITE delegation allows the client to handle, * on its own, all opens." * * Furthermore the client can use a write delegation for most READ * operations as well, so we require a O_RDWR file here. * * Offer a write delegation in the case of a BOTH open, and ensure * we get the O_RDWR descriptor. */ if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { nf = find_rw_file(fp); dl_type = NFS4_OPEN_DELEGATE_WRITE; } /* * If the file is being opened O_RDONLY or we couldn't get a O_RDWR * file for some reason, then try for a read delegation instead. */ if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { nf = find_readable_file(fp); dl_type = NFS4_OPEN_DELEGATE_READ; } if (!nf) return ERR_PTR(-EAGAIN); spin_lock(&state_lock); spin_lock(&fp->fi_lock); if (nfs4_delegation_exists(clp, fp)) status = -EAGAIN; else if (nfsd4_verify_setuid_write(open, nf)) status = -EAGAIN; else if (!fp->fi_deleg_file) { fp->fi_deleg_file = nf; /* increment early to prevent fi_deleg_file from being * cleared */ fp->fi_delegees = 1; nf = NULL; } else fp->fi_delegees++; spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); if (nf) nfsd_file_put(nf); if (status) return ERR_PTR(status); status = -ENOMEM; dp = alloc_init_deleg(clp, fp, odstate, dl_type); if (!dp) goto out_delegees; fl = nfs4_alloc_init_lease(dp, dl_type); if (!fl) goto out_clnt_odstate; status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); if (fl) locks_free_lock(fl); if (status) goto out_clnt_odstate; if (parent) { status = nfsd4_verify_deleg_dentry(open, fp, parent); if (status) goto out_unlock; } status = nfsd4_check_conflicting_opens(clp, fp); if (status) goto out_unlock; /* * Now that the deleg is set, check again to ensure that nothing * raced in and changed the mode while we weren't lookng. */ status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); if (status) goto out_unlock; spin_lock(&state_lock); spin_lock(&fp->fi_lock); if (fp->fi_had_conflict) status = -EAGAIN; else status = hash_delegation_locked(dp, fp); spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); if (status) goto out_unlock; return dp; out_unlock: vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); out_clnt_odstate: put_clnt_odstate(dp->dl_clnt_odstate); nfs4_put_stid(&dp->dl_stid); out_delegees: put_deleg_file(fp); return ERR_PTR(status); } static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; if (status == -EAGAIN) open->op_why_no_deleg = WND4_CONTENTION; else { open->op_why_no_deleg = WND4_RESOURCE; switch (open->op_deleg_want) { case NFS4_SHARE_WANT_READ_DELEG: case NFS4_SHARE_WANT_WRITE_DELEG: case NFS4_SHARE_WANT_ANY_DELEG: break; case NFS4_SHARE_WANT_CANCEL: open->op_why_no_deleg = WND4_CANCELLED; break; case NFS4_SHARE_WANT_NO_DELEG: WARN_ON_ONCE(1); } } } /* * The Linux NFS server does not offer write delegations to NFSv4.0 * clients in order to avoid conflicts between write delegations and * GETATTRs requesting CHANGE or SIZE attributes. * * With NFSv4.1 and later minorversions, the SEQUENCE operation that * begins each COMPOUND contains a client ID. Delegation recall can * be avoided when the server recognizes the client sending a * GETATTR also holds write delegation it conflicts with. * * However, the NFSv4.0 protocol does not enable a server to * determine that a GETATTR originated from the client holding the * conflicting delegation versus coming from some other client. Per * RFC 7530 Section 16.7.5, the server must recall or send a * CB_GETATTR even when the GETATTR originates from the client that * holds the conflicting delegation. * * An NFSv4.0 client can trigger a pathological situation if it * always sends a DELEGRETURN preceded by a conflicting GETATTR in * the same COMPOUND. COMPOUND execution will always stop at the * GETATTR and the DELEGRETURN will never get executed. The server * eventually revokes the delegation, which can result in loss of * open or lock state. */ static void nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *currentfh) { struct nfs4_delegation *dp; struct nfs4_openowner *oo = openowner(stp->st_stateowner); struct nfs4_client *clp = stp->st_stid.sc_client; struct svc_fh *parent = NULL; int cb_up; int status = 0; cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); open->op_recall = false; switch (open->op_claim_type) { case NFS4_OPEN_CLAIM_PREVIOUS: if (!cb_up) open->op_recall = true; break; case NFS4_OPEN_CLAIM_NULL: parent = currentfh; fallthrough; case NFS4_OPEN_CLAIM_FH: /* * Let's not give out any delegations till everyone's * had the chance to reclaim theirs, *and* until * NLM locks have all been reclaimed: */ if (locks_in_grace(clp->net)) goto out_no_deleg; if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) goto out_no_deleg; if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && !clp->cl_minorversion) goto out_no_deleg; break; default: goto out_no_deleg; } dp = nfs4_set_delegation(open, stp, parent); if (IS_ERR(dp)) goto out_no_deleg; memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); } else { open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); } nfs4_put_stid(&dp->dl_stid); return; out_no_deleg: open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { dprintk("NFSD: WARNING: refusing delegation reclaim\n"); open->op_recall = true; } /* 4.1 client asking for a delegation? */ if (open->op_deleg_want) nfsd4_open_deleg_none_ext(open, status); return; } static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, struct nfs4_delegation *dp) { if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; } /* Otherwise the client must be confused wanting a delegation * it already has, therefore we don't return * NFS4_OPEN_DELEGATE_NONE_EXT and reason. */ } /** * nfsd4_process_open2 - finish open processing * @rqstp: the RPC transaction being executed * @current_fh: NFSv4 COMPOUND's current filehandle * @open: OPEN arguments * * If successful, (1) truncate the file if open->op_truncate was * set, (2) set open->op_stateid, (3) set open->op_delegation. * * Returns %nfs_ok on success; otherwise an nfs4stat value in * network byte order is returned. */ __be32 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) { struct nfsd4_compoundres *resp = rqstp->rq_resp; struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_file *fp = NULL; struct nfs4_ol_stateid *stp = NULL; struct nfs4_delegation *dp = NULL; __be32 status; bool new_stp = false; /* * Lookup file; if found, lookup stateid and check open request, * and check for delegations in the process of being recalled. * If not found, create the nfs4_file struct */ fp = nfsd4_file_hash_insert(open->op_file, current_fh); if (unlikely(!fp)) return nfserr_jukebox; if (fp != open->op_file) { status = nfs4_check_deleg(cl, open, &dp); if (status) goto out; stp = nfsd4_find_and_lock_existing_open(fp, open); } else { open->op_file = NULL; status = nfserr_bad_stateid; if (nfsd4_is_deleg_cur(open)) goto out; } if (!stp) { stp = init_open_stateid(fp, open); if (!open->op_stp) new_stp = true; } /* * OPEN the file, or upgrade an existing OPEN. * If truncate fails, the OPEN fails. * * stp is already locked. */ if (!new_stp) { /* Stateid was found, this is an OPEN upgrade */ status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); if (status) { mutex_unlock(&stp->st_mutex); goto out; } } else { status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true); if (status) { stp->st_stid.sc_type = NFS4_CLOSED_STID; release_open_stateid(stp); mutex_unlock(&stp->st_mutex); goto out; } stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, open->op_odstate); if (stp->st_clnt_odstate == open->op_odstate) open->op_odstate = NULL; } nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); if (nfsd4_has_session(&resp->cstate)) { if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_WANTED; goto nodeleg; } } /* * Attempt to hand out a delegation. No error return, because the * OPEN succeeds even if we fail. */ nfs4_open_delegation(open, stp, &resp->cstate.current_fh); nodeleg: status = nfs_ok; trace_nfsd_open(&stp->st_stid.sc_stateid); out: /* 4.1 client trying to upgrade/downgrade delegation? */ if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && open->op_deleg_want) nfsd4_deleg_xgrade_none_ext(open, dp); if (fp) put_nfs4_file(fp); if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; /* * To finish the open response, we just need to set the rflags. */ open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; if (nfsd4_has_session(&resp->cstate)) open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; if (dp) nfs4_put_stid(&dp->dl_stid); if (stp) nfs4_put_stid(&stp->st_stid); return status; } void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, struct nfsd4_open *open) { if (open->op_openowner) { struct nfs4_stateowner *so = &open->op_openowner->oo_owner; nfsd4_cstate_assign_replay(cstate, so); nfs4_put_stateowner(so); } if (open->op_file) kmem_cache_free(file_slab, open->op_file); if (open->op_stp) nfs4_put_stid(&open->op_stp->st_stid); if (open->op_odstate) kmem_cache_free(odstate_slab, open->op_odstate); } __be32 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { clientid_t *clid = &u->renew; struct nfs4_client *clp; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); trace_nfsd_clid_renew(clid); status = set_client(clid, cstate, nn); if (status) return status; clp = cstate->clp; if (!list_empty(&clp->cl_delegations) && clp->cl_cb_state != NFSD4_CB_UP) return nfserr_cb_path_down; return nfs_ok; } void nfsd4_end_grace(struct nfsd_net *nn) { /* do nothing if grace period already ended */ if (nn->grace_ended) return; trace_nfsd_grace_complete(nn); nn->grace_ended = true; /* * If the server goes down again right now, an NFSv4 * client will still be allowed to reclaim after it comes back up, * even if it hasn't yet had a chance to reclaim state this time. * */ nfsd4_record_grace_done(nn); /* * At this point, NFSv4 clients can still reclaim. But if the * server crashes, any that have not yet reclaimed will be out * of luck on the next boot. * * (NFSv4.1+ clients are considered to have reclaimed once they * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to * have reclaimed after their first OPEN.) */ locks_end_grace(&nn->nfsd4_manager); /* * At this point, and once lockd and/or any other containers * exit their grace period, further reclaims will fail and * regular locking can resume. */ } /* * If we've waited a lease period but there are still clients trying to * reclaim, wait a little longer to give them a chance to finish. */ static bool clients_still_reclaiming(struct nfsd_net *nn) { time64_t double_grace_period_end = nn->boot_time + 2 * nn->nfsd4_lease; if (nn->track_reclaim_completes && atomic_read(&nn->nr_reclaim_complete) == nn->reclaim_str_hashtbl_size) return false; if (!nn->somebody_reclaimed) return false; nn->somebody_reclaimed = false; /* * If we've given them *two* lease times to reclaim, and they're * still not done, give up: */ if (ktime_get_boottime_seconds() > double_grace_period_end) return false; return true; } struct laundry_time { time64_t cutoff; time64_t new_timeo; }; static bool state_expired(struct laundry_time *lt, time64_t last_refresh) { time64_t time_remaining; if (last_refresh < lt->cutoff) return true; time_remaining = last_refresh - lt->cutoff; lt->new_timeo = min(lt->new_timeo, time_remaining); return false; } #ifdef CONFIG_NFSD_V4_2_INTER_SSC void nfsd4_ssc_init_umount_work(struct nfsd_net *nn) { spin_lock_init(&nn->nfsd_ssc_lock); INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); init_waitqueue_head(&nn->nfsd_ssc_waitq); } EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work); /* * This is called when nfsd is being shutdown, after all inter_ssc * cleanup were done, to destroy the ssc delayed unmount list. */ static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn) { struct nfsd4_ssc_umount_item *ni = NULL; struct nfsd4_ssc_umount_item *tmp; spin_lock(&nn->nfsd_ssc_lock); list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { list_del(&ni->nsui_list); spin_unlock(&nn->nfsd_ssc_lock); mntput(ni->nsui_vfsmount); kfree(ni); spin_lock(&nn->nfsd_ssc_lock); } spin_unlock(&nn->nfsd_ssc_lock); } static void nfsd4_ssc_expire_umount(struct nfsd_net *nn) { bool do_wakeup = false; struct nfsd4_ssc_umount_item *ni = NULL; struct nfsd4_ssc_umount_item *tmp; spin_lock(&nn->nfsd_ssc_lock); list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { if (time_after(jiffies, ni->nsui_expire)) { if (refcount_read(&ni->nsui_refcnt) > 1) continue; /* mark being unmount */ ni->nsui_busy = true; spin_unlock(&nn->nfsd_ssc_lock); mntput(ni->nsui_vfsmount); spin_lock(&nn->nfsd_ssc_lock); /* waiters need to start from begin of list */ list_del(&ni->nsui_list); kfree(ni); /* wakeup ssc_connect waiters */ do_wakeup = true; continue; } break; } if (do_wakeup) wake_up_all(&nn->nfsd_ssc_waitq); spin_unlock(&nn->nfsd_ssc_lock); } #endif /* Check if any lock belonging to this lockowner has any blockers */ static bool nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo) { struct file_lock_context *ctx; struct nfs4_ol_stateid *stp; struct nfs4_file *nf; list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { nf = stp->st_stid.sc_file; ctx = locks_inode_context(nf->fi_inode); if (!ctx) continue; if (locks_owner_has_blockers(ctx, lo)) return true; } return false; } static bool nfs4_anylock_blockers(struct nfs4_client *clp) { int i; struct nfs4_stateowner *so; struct nfs4_lockowner *lo; if (atomic_read(&clp->cl_delegs_in_recall)) return true; spin_lock(&clp->cl_lock); for (i = 0; i < OWNER_HASH_SIZE; i++) { list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], so_strhash) { if (so->so_is_open_owner) continue; lo = lockowner(so); if (nfs4_lockowner_has_blockers(lo)) { spin_unlock(&clp->cl_lock); return true; } } } spin_unlock(&clp->cl_lock); return false; } static void nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist, struct laundry_time *lt) { unsigned int maxreap, reapcnt = 0; struct list_head *pos, *next; struct nfs4_client *clp; maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? NFSD_CLIENT_MAX_TRIM_PER_RUN : 0; INIT_LIST_HEAD(reaplist); spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); if (clp->cl_state == NFSD4_EXPIRABLE) goto exp_client; if (!state_expired(lt, clp->cl_time)) break; if (!atomic_read(&clp->cl_rpc_users)) { if (clp->cl_state == NFSD4_ACTIVE) atomic_inc(&nn->nfsd_courtesy_clients); clp->cl_state = NFSD4_COURTESY; } if (!client_has_state(clp)) goto exp_client; if (!nfs4_anylock_blockers(clp)) if (reapcnt >= maxreap) continue; exp_client: if (!mark_client_expired_locked(clp)) { list_add(&clp->cl_lru, reaplist); reapcnt++; } } spin_unlock(&nn->client_lock); } static void nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist) { unsigned int maxreap = 0, reapcnt = 0; struct list_head *pos, *next; struct nfs4_client *clp; maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN; INIT_LIST_HEAD(reaplist); spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); if (clp->cl_state == NFSD4_ACTIVE) break; if (reapcnt >= maxreap) break; if (!mark_client_expired_locked(clp)) { list_add(&clp->cl_lru, reaplist); reapcnt++; } } spin_unlock(&nn->client_lock); } static void nfs4_process_client_reaplist(struct list_head *reaplist) { struct list_head *pos, *next; struct nfs4_client *clp; list_for_each_safe(pos, next, reaplist) { clp = list_entry(pos, struct nfs4_client, cl_lru); trace_nfsd_clid_purged(&clp->cl_clientid); list_del_init(&clp->cl_lru); expire_client(clp); } } static time64_t nfs4_laundromat(struct nfsd_net *nn) { struct nfs4_openowner *oo; struct nfs4_delegation *dp; struct nfs4_ol_stateid *stp; struct nfsd4_blocked_lock *nbl; struct list_head *pos, *next, reaplist; struct laundry_time lt = { .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, .new_timeo = nn->nfsd4_lease }; struct nfs4_cpntf_state *cps; copy_stateid_t *cps_t; int i; if (clients_still_reclaiming(nn)) { lt.new_timeo = 0; goto out; } nfsd4_end_grace(nn); spin_lock(&nn->s2s_cp_lock); idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && state_expired(<, cps->cpntf_time)) _free_cpntf_state_locked(nn, cps); } spin_unlock(&nn->s2s_cp_lock); nfs4_get_client_reaplist(nn, &reaplist, <); nfs4_process_client_reaplist(&reaplist); spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); if (!state_expired(<, dp->dl_time)) break; WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); while (!list_empty(&reaplist)) { dp = list_first_entry(&reaplist, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); revoke_delegation(dp); } spin_lock(&nn->client_lock); while (!list_empty(&nn->close_lru)) { oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, oo_close_lru); if (!state_expired(<, oo->oo_time)) break; list_del_init(&oo->oo_close_lru); stp = oo->oo_last_closed_stid; oo->oo_last_closed_stid = NULL; spin_unlock(&nn->client_lock); nfs4_put_stid(&stp->st_stid); spin_lock(&nn->client_lock); } spin_unlock(&nn->client_lock); /* * It's possible for a client to try and acquire an already held lock * that is being held for a long time, and then lose interest in it. * So, we clean out any un-revisited request after a lease period * under the assumption that the client is no longer interested. * * RFC5661, sec. 9.6 states that the client must not rely on getting * notifications and must continue to poll for locks, even when the * server supports them. Thus this shouldn't lead to clients blocking * indefinitely once the lock does become free. */ BUG_ON(!list_empty(&reaplist)); spin_lock(&nn->blocked_locks_lock); while (!list_empty(&nn->blocked_locks_lru)) { nbl = list_first_entry(&nn->blocked_locks_lru, struct nfsd4_blocked_lock, nbl_lru); if (!state_expired(<, nbl->nbl_time)) break; list_move(&nbl->nbl_lru, &reaplist); list_del_init(&nbl->nbl_list); } spin_unlock(&nn->blocked_locks_lock); while (!list_empty(&reaplist)) { nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, nbl_lru); list_del_init(&nbl->nbl_lru); free_blocked_lock(nbl); } #ifdef CONFIG_NFSD_V4_2_INTER_SSC /* service the server-to-server copy delayed unmount list */ nfsd4_ssc_expire_umount(nn); #endif out: return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); } static void laundromat_main(struct work_struct *); static void laundromat_main(struct work_struct *laundry) { time64_t t; struct delayed_work *dwork = to_delayed_work(laundry); struct nfsd_net *nn = container_of(dwork, struct nfsd_net, laundromat_work); t = nfs4_laundromat(nn); queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); } static void courtesy_client_reaper(struct nfsd_net *nn) { struct list_head reaplist; nfs4_get_courtesy_client_reaplist(nn, &reaplist); nfs4_process_client_reaplist(&reaplist); } static void deleg_reaper(struct nfsd_net *nn) { struct list_head *pos, *next; struct nfs4_client *clp; struct list_head cblist; INIT_LIST_HEAD(&cblist); spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); if (clp->cl_state != NFSD4_ACTIVE || list_empty(&clp->cl_delegations) || atomic_read(&clp->cl_delegs_in_recall) || test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)) { continue; } list_add(&clp->cl_ra_cblist, &cblist); /* release in nfsd4_cb_recall_any_release */ atomic_inc(&clp->cl_rpc_users); set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags); clp->cl_ra_time = ktime_get_boottime_seconds(); } spin_unlock(&nn->client_lock); while (!list_empty(&cblist)) { clp = list_first_entry(&cblist, struct nfs4_client, cl_ra_cblist); list_del_init(&clp->cl_ra_cblist); clp->cl_ra->ra_keep = 0; clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG); trace_nfsd_cb_recall_any(clp->cl_ra); nfsd4_run_cb(&clp->cl_ra->ra_cb); } } static void nfsd4_state_shrinker_worker(struct work_struct *work) { struct nfsd_net *nn = container_of(work, struct nfsd_net, nfsd_shrinker_work); courtesy_client_reaper(nn); deleg_reaper(nn); } static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) { if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } static __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) { __be32 status = nfserr_openmode; /* For lock stateid's, we test the parent open, not the lock: */ if (stp->st_openstp) stp = stp->st_openstp; if ((flags & WR_STATE) && !access_permit_write(stp)) goto out; if ((flags & RD_STATE) && !access_permit_read(stp)) goto out; status = nfs_ok; out: return status; } static inline __be32 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) { if (ONE_STATEID(stateid) && (flags & RD_STATE)) return nfs_ok; else if (opens_in_grace(net)) { /* Answer in remaining cases depends on existence of * conflicting state; so we must wait out the grace period. */ return nfserr_grace; } else if (flags & WR_STATE) return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_WRITE); else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ return nfs4_share_conflict(current_fh, NFS4_SHARE_DENY_READ); } static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) { /* * When sessions are used the stateid generation number is ignored * when it is zero. */ if (has_session && in->si_generation == 0) return nfs_ok; if (in->si_generation == ref->si_generation) return nfs_ok; /* If the client sends us a stateid from the future, it's buggy: */ if (nfsd4_stateid_generation_after(in, ref)) return nfserr_bad_stateid; /* * However, we could see a stateid from the past, even from a * non-buggy client. For example, if the client sends a lock * while some IO is outstanding, the lock may bump si_generation * while the IO is still in flight. The client could avoid that * situation by waiting for responses on all the IO requests, * but better performance may result in retrying IO that * receives an old_stateid error if requests are rarely * reordered in flight: */ return nfserr_old_stateid; } static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) { __be32 ret; spin_lock(&s->sc_lock); ret = nfsd4_verify_open_stid(s); if (ret == nfs_ok) ret = check_stateid_generation(in, &s->sc_stateid, has_session); spin_unlock(&s->sc_lock); return ret; } static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) { if (ols->st_stateowner->so_is_open_owner && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) return nfserr_bad_stateid; return nfs_ok; } static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) { struct nfs4_stid *s; __be32 status = nfserr_bad_stateid; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) return status; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) goto out_unlock; status = nfsd4_stid_check_stateid_generation(stateid, s, 1); if (status) goto out_unlock; switch (s->sc_type) { case NFS4_DELEG_STID: status = nfs_ok; break; case NFS4_REVOKED_DELEG_STID: status = nfserr_deleg_revoked; break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: status = nfsd4_check_openowner_confirmed(openlockstateid(s)); break; default: printk("unknown stateid type %x\n", s->sc_type); fallthrough; case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; } out_unlock: spin_unlock(&cl->cl_lock); return status; } __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; struct nfs4_stid *stid; bool return_revoked = false; /* * only return revoked delegations if explicitly asked. * otherwise we report revoked or bad_stateid status. */ if (typemask & NFS4_REVOKED_DELEG_STID) return_revoked = true; else if (typemask & NFS4_DELEG_STID) typemask |= NFS4_REVOKED_DELEG_STID; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) return nfserr_bad_stateid; status = set_client(&stateid->si_opaque.so_clid, cstate, nn); if (status == nfserr_stale_clientid) { if (cstate->session) return nfserr_bad_stateid; return nfserr_stale_stateid; } if (status) return status; stid = find_stateid_by_type(cstate->clp, stateid, typemask); if (!stid) return nfserr_bad_stateid; if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { nfs4_put_stid(stid); if (cstate->minorversion) return nfserr_deleg_revoked; return nfserr_bad_stateid; } *s = stid; return nfs_ok; } static struct nfsd_file * nfs4_find_file(struct nfs4_stid *s, int flags) { struct nfsd_file *ret = NULL; if (!s) return NULL; switch (s->sc_type) { case NFS4_DELEG_STID: spin_lock(&s->sc_file->fi_lock); ret = nfsd_file_get(s->sc_file->fi_deleg_file); spin_unlock(&s->sc_file->fi_lock); break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: if (flags & RD_STATE) ret = find_readable_file(s->sc_file); else ret = find_writeable_file(s->sc_file); } return ret; } static __be32 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) { __be32 status; status = nfsd4_check_openowner_confirmed(ols); if (status) return status; return nfs4_check_openmode(ols, flags); } static __be32 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, struct nfsd_file **nfp, int flags) { int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; struct nfsd_file *nf; __be32 status; nf = nfs4_find_file(s, flags); if (nf) { status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry, acc | NFSD_MAY_OWNER_OVERRIDE); if (status) { nfsd_file_put(nf); goto out; } } else { status = nfsd_file_acquire(rqstp, fhp, acc, &nf); if (status) return status; } *nfp = nf; out: return status; } static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) { WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) return; list_del(&cps->cp_list); idr_remove(&nn->s2s_cp_stateids, cps->cp_stateid.cs_stid.si_opaque.so_id); kfree(cps); } /* * A READ from an inter server to server COPY will have a * copy stateid. Look up the copy notify stateid from the * idr structure and take a reference on it. */ __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, struct nfs4_client *clp, struct nfs4_cpntf_state **cps) { copy_stateid_t *cps_t; struct nfs4_cpntf_state *state = NULL; if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) return nfserr_bad_stateid; spin_lock(&nn->s2s_cp_lock); cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); if (cps_t) { state = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { state = NULL; goto unlock; } if (!clp) refcount_inc(&state->cp_stateid.cs_count); else _free_cpntf_state_locked(nn, state); } unlock: spin_unlock(&nn->s2s_cp_lock); if (!state) return nfserr_bad_stateid; if (!clp && state) *cps = state; return 0; } static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, struct nfs4_stid **stid) { __be32 status; struct nfs4_cpntf_state *cps = NULL; struct nfs4_client *found; status = manage_cpntf_state(nn, st, NULL, &cps); if (status) return status; cps->cpntf_time = ktime_get_boottime_seconds(); status = nfserr_expired; found = lookup_clientid(&cps->cp_p_clid, true, nn); if (!found) goto out; *stid = find_stateid_by_type(found, &cps->cp_p_stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID); if (*stid) status = nfs_ok; else status = nfserr_bad_stateid; put_client_renew(found); out: nfs4_put_cpntf_state(nn, cps); return status; } void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) { spin_lock(&nn->s2s_cp_lock); _free_cpntf_state_locked(nn, cps); spin_unlock(&nn->s2s_cp_lock); } /** * nfs4_preprocess_stateid_op - find and prep stateid for an operation * @rqstp: incoming request from client * @cstate: current compound state * @fhp: filehandle associated with requested stateid * @stateid: stateid (provided by client) * @flags: flags describing type of operation to be done * @nfp: optional nfsd_file return pointer (may be NULL) * @cstid: optional returned nfs4_stid pointer (may be NULL) * * Given info from the client, look up a nfs4_stid for the operation. On * success, it returns a reference to the nfs4_stid and/or the nfsd_file * associated with it. */ __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct svc_fh *fhp, stateid_t *stateid, int flags, struct nfsd_file **nfp, struct nfs4_stid **cstid) { struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfs4_stid *s = NULL; __be32 status; if (nfp) *nfp = NULL; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { if (cstid) status = nfserr_bad_stateid; else status = check_special_stateids(net, fhp, stateid, flags); goto done; } status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s, nn); if (status == nfserr_bad_stateid) status = find_cpntf_state(nn, stateid, &s); if (status) return status; status = nfsd4_stid_check_stateid_generation(stateid, s, nfsd4_has_session(cstate)); if (status) goto out; switch (s->sc_type) { case NFS4_DELEG_STID: status = nfs4_check_delegmode(delegstateid(s), flags); break; case NFS4_OPEN_STID: case NFS4_LOCK_STID: status = nfs4_check_olstateid(openlockstateid(s), flags); break; default: status = nfserr_bad_stateid; break; } if (status) goto out; status = nfs4_check_fh(fhp, s); done: if (status == nfs_ok && nfp) status = nfs4_check_file(rqstp, fhp, s, nfp, flags); out: if (s) { if (!status && cstid) *cstid = s; else nfs4_put_stid(s); } return status; } /* * Test if the stateid is valid */ __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_test_stateid *test_stateid = &u->test_stateid; struct nfsd4_test_stateid_id *stateid; struct nfs4_client *cl = cstate->clp; list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) stateid->ts_id_status = nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); return nfs_ok; } static __be32 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) { struct nfs4_ol_stateid *stp = openlockstateid(s); __be32 ret; ret = nfsd4_lock_ol_stateid(stp); if (ret) goto out_put_stid; ret = check_stateid_generation(stateid, &s->sc_stateid, 1); if (ret) goto out; ret = nfserr_locks_held; if (check_for_locks(stp->st_stid.sc_file, lockowner(stp->st_stateowner))) goto out; release_lock_stateid(stp); ret = nfs_ok; out: mutex_unlock(&stp->st_mutex); out_put_stid: nfs4_put_stid(s); return ret; } __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_free_stateid *free_stateid = &u->free_stateid; stateid_t *stateid = &free_stateid->fr_stateid; struct nfs4_stid *s; struct nfs4_delegation *dp; struct nfs4_client *cl = cstate->clp; __be32 ret = nfserr_bad_stateid; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) goto out_unlock; spin_lock(&s->sc_lock); switch (s->sc_type) { case NFS4_DELEG_STID: ret = nfserr_locks_held; break; case NFS4_OPEN_STID: ret = check_stateid_generation(stateid, &s->sc_stateid, 1); if (ret) break; ret = nfserr_locks_held; break; case NFS4_LOCK_STID: spin_unlock(&s->sc_lock); refcount_inc(&s->sc_count); spin_unlock(&cl->cl_lock); ret = nfsd4_free_lock_stateid(stateid, s); goto out; case NFS4_REVOKED_DELEG_STID: spin_unlock(&s->sc_lock); dp = delegstateid(s); list_del_init(&dp->dl_recall_lru); spin_unlock(&cl->cl_lock); nfs4_put_stid(s); ret = nfs_ok; goto out; /* Default falls through and returns nfserr_bad_stateid */ } spin_unlock(&s->sc_lock); out_unlock: spin_unlock(&cl->cl_lock); out: return ret; } static inline int setlkflg (int type) { return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? RD_STATE : WR_STATE; } static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) { struct svc_fh *current_fh = &cstate->current_fh; struct nfs4_stateowner *sop = stp->st_stateowner; __be32 status; status = nfsd4_check_seqid(cstate, sop, seqid); if (status) return status; status = nfsd4_lock_ol_stateid(stp); if (status != nfs_ok) return status; status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); if (status == nfs_ok) status = nfs4_check_fh(current_fh, &stp->st_stid); if (status != nfs_ok) mutex_unlock(&stp->st_mutex); return status; } /** * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op * @cstate: compund state * @seqid: seqid (provided by client) * @stateid: stateid (provided by client) * @typemask: mask of allowable types for this operation * @stpp: return pointer for the stateid found * @nn: net namespace for request * * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and * return it in @stpp. On a nfs_ok return, the returned stateid will * have its st_mutex locked. */ static __be32 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, char typemask, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) { __be32 status; struct nfs4_stid *s; struct nfs4_ol_stateid *stp = NULL; trace_nfsd_preprocess(seqid, stateid); *stpp = NULL; status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn); if (status) return status; stp = openlockstateid(s); nfsd4_cstate_assign_replay(cstate, stp->st_stateowner); status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); if (!status) *stpp = stp; else nfs4_put_stid(&stp->st_stid); return status; } static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) { __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, NFS4_OPEN_STID, &stp, nn); if (status) return status; oo = openowner(stp->st_stateowner); if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); return nfserr_bad_stateid; } *stpp = stp; return nfs_ok; } __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_open_confirm *oc = &u->open_confirm; __be32 status; struct nfs4_openowner *oo; struct nfs4_ol_stateid *stp; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_open_confirm on file %pd\n", cstate->current_fh.fh_dentry); status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); if (status) return status; status = nfs4_preprocess_seqid_op(cstate, oc->oc_seqid, &oc->oc_req_stateid, NFS4_OPEN_STID, &stp, nn); if (status) goto out; oo = openowner(stp->st_stateowner); status = nfserr_bad_stateid; if (oo->oo_flags & NFS4_OO_CONFIRMED) { mutex_unlock(&stp->st_mutex); goto put_stateid; } oo->oo_flags |= NFS4_OO_CONFIRMED; nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); mutex_unlock(&stp->st_mutex); trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); nfsd4_client_record_create(oo->oo_owner.so_client); status = nfs_ok; put_stateid: nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); return status; } static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) { if (!test_access(access, stp)) return; nfs4_file_put_access(stp->st_stid.sc_file, access); clear_access(access, stp); } static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) { switch (to_access) { case NFS4_SHARE_ACCESS_READ: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_WRITE: nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); break; case NFS4_SHARE_ACCESS_BOTH: break; default: WARN_ON_ONCE(1); } } __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_open_downgrade *od = &u->open_downgrade; __be32 status; struct nfs4_ol_stateid *stp; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", cstate->current_fh.fh_dentry); /* We don't yet support WANT bits: */ if (od->od_deleg_want) dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, od->od_deleg_want); status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, &od->od_stateid, &stp, nn); if (status) goto out; status = nfserr_inval; if (!test_access(od->od_share_access, stp)) { dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", stp->st_access_bmap, od->od_share_access); goto put_stateid; } if (!test_deny(od->od_share_deny, stp)) { dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", stp->st_deny_bmap, od->od_share_deny); goto put_stateid; } nfs4_stateid_downgrade(stp, od->od_share_access); reset_union_bmap_deny(od->od_share_deny, stp); nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); status = nfs_ok; put_stateid: mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); return status; } static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) { struct nfs4_client *clp = s->st_stid.sc_client; bool unhashed; LIST_HEAD(reaplist); struct nfs4_ol_stateid *stp; spin_lock(&clp->cl_lock); unhashed = unhash_open_stateid(s, &reaplist); if (clp->cl_minorversion) { if (unhashed) put_ol_stateid_locked(s, &reaplist); spin_unlock(&clp->cl_lock); list_for_each_entry(stp, &reaplist, st_locks) nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); free_ol_stateid_reaplist(&reaplist); } else { spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); if (unhashed) move_to_close_lru(s, clp->net); } } /* * nfs4_unlock_state() called after encode */ __be32 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_close *close = &u->close; __be32 status; struct nfs4_ol_stateid *stp; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("NFSD: nfsd4_close on file %pd\n", cstate->current_fh.fh_dentry); status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, &close->cl_stateid, NFS4_OPEN_STID|NFS4_CLOSED_STID, &stp, nn); nfsd4_bump_seqid(cstate, status); if (status) goto out; stp->st_stid.sc_type = NFS4_CLOSED_STID; /* * Technically we don't _really_ have to increment or copy it, since * it should just be gone after this operation and we clobber the * copied value below, but we continue to do so here just to ensure * that racing ops see that there was a state change. */ nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); nfsd4_close_open_stateid(stp); mutex_unlock(&stp->st_mutex); /* v4.1+ suggests that we send a special stateid in here, since the * clients should just ignore this anyway. Since this is not useful * for v4.0 clients either, we set it to the special close_stateid * universally. * * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 */ memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); /* put reference from nfs4_preprocess_seqid_op */ nfs4_put_stid(&stp->st_stid); out: return status; } __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_delegreturn *dr = &u->delegreturn; struct nfs4_delegation *dp; stateid_t *stateid = &dr->dr_stateid; struct nfs4_stid *s; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) return status; status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn); if (status) goto out; dp = delegstateid(s); status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); if (status) goto put_stateid; trace_nfsd_deleg_return(stateid); wake_up_var(d_inode(cstate->current_fh.fh_dentry)); destroy_delegation(dp); put_stateid: nfs4_put_stid(&dp->dl_stid); out: return status; } /* last octet in a range */ static inline u64 last_byte_offset(u64 start, u64 len) { u64 end; WARN_ON_ONCE(!len); end = start + len; return end > start ? end - 1: NFS4_MAX_UINT64; } /* * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that * we can't properly handle lock requests that go beyond the (2^63 - 1)-th * byte, because of sign extension problems. Since NFSv4 calls for 64-bit * locking, this prevents us from being completely protocol-compliant. The * real solution to this problem is to start using unsigned file offsets in * the VFS, but this is a very deep change! */ static inline void nfs4_transform_lock_offset(struct file_lock *lock) { if (lock->fl_start < 0) lock->fl_start = OFFSET_MAX; if (lock->fl_end < 0) lock->fl_end = OFFSET_MAX; } static fl_owner_t nfsd4_lm_get_owner(fl_owner_t owner) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; nfs4_get_stateowner(&lo->lo_owner); return owner; } static void nfsd4_lm_put_owner(fl_owner_t owner) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; if (lo) nfs4_put_stateowner(&lo->lo_owner); } /* return pointer to struct nfs4_client if client is expirable */ static bool nfsd4_lm_lock_expirable(struct file_lock *cfl) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner; struct nfs4_client *clp = lo->lo_owner.so_client; struct nfsd_net *nn; if (try_to_expire_client(clp)) { nn = net_generic(clp->net, nfsd_net_id); mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); return true; } return false; } /* schedule laundromat to run immediately and wait for it to complete */ static void nfsd4_lm_expire_lock(void) { flush_workqueue(laundry_wq); } static void nfsd4_lm_notify(struct file_lock *fl) { struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner; struct net *net = lo->lo_owner.so_client->net; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfsd4_blocked_lock *nbl = container_of(fl, struct nfsd4_blocked_lock, nbl_lock); bool queue = false; /* An empty list means that something else is going to be using it */ spin_lock(&nn->blocked_locks_lock); if (!list_empty(&nbl->nbl_list)) { list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_lru); queue = true; } spin_unlock(&nn->blocked_locks_lock); if (queue) { trace_nfsd_cb_notify_lock(lo, nbl); nfsd4_run_cb(&nbl->nbl_cb); } } static const struct lock_manager_operations nfsd_posix_mng_ops = { .lm_mod_owner = THIS_MODULE, .lm_notify = nfsd4_lm_notify, .lm_get_owner = nfsd4_lm_get_owner, .lm_put_owner = nfsd4_lm_put_owner, .lm_lock_expirable = nfsd4_lm_lock_expirable, .lm_expire_lock = nfsd4_lm_expire_lock, }; static inline void nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) { struct nfs4_lockowner *lo; if (fl->fl_lmops == &nfsd_posix_mng_ops) { lo = (struct nfs4_lockowner *) fl->fl_owner; xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, GFP_KERNEL); if (!deny->ld_owner.data) /* We just don't care that much */ goto nevermind; deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; } else { nevermind: deny->ld_owner.len = 0; deny->ld_owner.data = NULL; deny->ld_clientid.cl_boot = 0; deny->ld_clientid.cl_id = 0; } deny->ld_start = fl->fl_start; deny->ld_length = NFS4_MAX_UINT64; if (fl->fl_end != NFS4_MAX_UINT64) deny->ld_length = fl->fl_end - fl->fl_start + 1; deny->ld_type = NFS4_READ_LT; if (fl->fl_type != F_RDLCK) deny->ld_type = NFS4_WRITE_LT; } static struct nfs4_lockowner * find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) { unsigned int strhashval = ownerstr_hashval(owner); struct nfs4_stateowner *so; lockdep_assert_held(&clp->cl_lock); list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], so_strhash) { if (so->so_is_open_owner) continue; if (same_owner_str(so, owner)) return lockowner(nfs4_get_stateowner(so)); } return NULL; } static struct nfs4_lockowner * find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) { struct nfs4_lockowner *lo; spin_lock(&clp->cl_lock); lo = find_lockowner_str_locked(clp, owner); spin_unlock(&clp->cl_lock); return lo; } static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) { unhash_lockowner_locked(lockowner(sop)); } static void nfs4_free_lockowner(struct nfs4_stateowner *sop) { struct nfs4_lockowner *lo = lockowner(sop); kmem_cache_free(lockowner_slab, lo); } static const struct nfs4_stateowner_operations lockowner_ops = { .so_unhash = nfs4_unhash_lockowner, .so_free = nfs4_free_lockowner, }; /* * Alloc a lock owner structure. * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has * occurred. * * strhashval = ownerstr_hashval */ static struct nfs4_lockowner * alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) { struct nfs4_lockowner *lo, *ret; lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); if (!lo) return NULL; INIT_LIST_HEAD(&lo->lo_blocked); INIT_LIST_HEAD(&lo->lo_owner.so_stateids); lo->lo_owner.so_is_open_owner = 0; lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; lo->lo_owner.so_ops = &lockowner_ops; spin_lock(&clp->cl_lock); ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); if (ret == NULL) { list_add(&lo->lo_owner.so_strhash, &clp->cl_ownerstr_hashtbl[strhashval]); ret = lo; } else nfs4_free_stateowner(&lo->lo_owner); spin_unlock(&clp->cl_lock); return ret; } static struct nfs4_ol_stateid * find_lock_stateid(const struct nfs4_lockowner *lo, const struct nfs4_ol_stateid *ost) { struct nfs4_ol_stateid *lst; lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); /* If ost is not hashed, ost->st_locks will not be valid */ if (!nfs4_ol_stateid_unhashed(ost)) list_for_each_entry(lst, &ost->st_locks, st_locks) { if (lst->st_stateowner == &lo->lo_owner) { refcount_inc(&lst->st_stid.sc_count); return lst; } } return NULL; } static struct nfs4_ol_stateid * init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, struct nfs4_file *fp, struct inode *inode, struct nfs4_ol_stateid *open_stp) { struct nfs4_client *clp = lo->lo_owner.so_client; struct nfs4_ol_stateid *retstp; mutex_init(&stp->st_mutex); mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); retry: spin_lock(&clp->cl_lock); if (nfs4_ol_stateid_unhashed(open_stp)) goto out_close; retstp = find_lock_stateid(lo, open_stp); if (retstp) goto out_found; refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); get_nfs4_file(fp); stp->st_stid.sc_file = fp; stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; spin_lock(&fp->fi_lock); list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perfile, &fp->fi_stateids); spin_unlock(&fp->fi_lock); spin_unlock(&clp->cl_lock); return stp; out_found: spin_unlock(&clp->cl_lock); if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { nfs4_put_stid(&retstp->st_stid); goto retry; } /* To keep mutex tracking happy */ mutex_unlock(&stp->st_mutex); return retstp; out_close: spin_unlock(&clp->cl_lock); mutex_unlock(&stp->st_mutex); return NULL; } static struct nfs4_ol_stateid * find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, struct inode *inode, struct nfs4_ol_stateid *ost, bool *new) { struct nfs4_stid *ns = NULL; struct nfs4_ol_stateid *lst; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *clp = oo->oo_owner.so_client; *new = false; spin_lock(&clp->cl_lock); lst = find_lock_stateid(lo, ost); spin_unlock(&clp->cl_lock); if (lst != NULL) { if (nfsd4_lock_ol_stateid(lst) == nfs_ok) goto out; nfs4_put_stid(&lst->st_stid); } ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); if (ns == NULL) return NULL; lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); if (lst == openlockstateid(ns)) *new = true; else nfs4_put_stid(ns); out: return lst; } static int check_lock_length(u64 offset, u64 length) { return ((length == 0) || ((length != NFS4_MAX_UINT64) && (length > ~offset))); } static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) { struct nfs4_file *fp = lock_stp->st_stid.sc_file; lockdep_assert_held(&fp->fi_lock); if (test_access(access, lock_stp)) return; __nfs4_file_get_access(fp, access); set_access(access, lock_stp); } static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **plst, bool *new) { __be32 status; struct nfs4_file *fi = ost->st_stid.sc_file; struct nfs4_openowner *oo = openowner(ost->st_stateowner); struct nfs4_client *cl = oo->oo_owner.so_client; struct inode *inode = d_inode(cstate->current_fh.fh_dentry); struct nfs4_lockowner *lo; struct nfs4_ol_stateid *lst; unsigned int strhashval; lo = find_lockowner_str(cl, &lock->lk_new_owner); if (!lo) { strhashval = ownerstr_hashval(&lock->lk_new_owner); lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); if (lo == NULL) return nfserr_jukebox; } else { /* with an existing lockowner, seqids must be the same */ status = nfserr_bad_seqid; if (!cstate->minorversion && lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) goto out; } lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); if (lst == NULL) { status = nfserr_jukebox; goto out; } status = nfs_ok; *plst = lst; out: nfs4_put_stateowner(&lo->lo_owner); return status; } /* * LOCK operation */ __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_lock *lock = &u->lock; struct nfs4_openowner *open_sop = NULL; struct nfs4_lockowner *lock_sop = NULL; struct nfs4_ol_stateid *lock_stp = NULL; struct nfs4_ol_stateid *open_stp = NULL; struct nfs4_file *fp; struct nfsd_file *nf = NULL; struct nfsd4_blocked_lock *nbl = NULL; struct file_lock *file_lock = NULL; struct file_lock *conflock = NULL; struct super_block *sb; __be32 status = 0; int lkflg; int err; bool new = false; unsigned char fl_type; unsigned int fl_flags = FL_POSIX; struct net *net = SVC_NET(rqstp); struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", (long long) lock->lk_offset, (long long) lock->lk_length); if (check_lock_length(lock->lk_offset, lock->lk_length)) return nfserr_inval; if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, NFSD_MAY_LOCK))) { dprintk("NFSD: nfsd4_lock: permission denied!\n"); return status; } sb = cstate->current_fh.fh_dentry->d_sb; if (lock->lk_is_new) { if (nfsd4_has_session(cstate)) /* See rfc 5661 18.10.3: given clientid is ignored: */ memcpy(&lock->lk_new_clientid, &cstate->clp->cl_clientid, sizeof(clientid_t)); /* validate and update open stateid and open seqid */ status = nfs4_preprocess_confirmed_seqid_op(cstate, lock->lk_new_open_seqid, &lock->lk_new_open_stateid, &open_stp, nn); if (status) goto out; mutex_unlock(&open_stp->st_mutex); open_sop = openowner(open_stp->st_stateowner); status = nfserr_bad_stateid; if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, &lock->lk_new_clientid)) goto out; status = lookup_or_create_lock_state(cstate, open_stp, lock, &lock_stp, &new); } else { status = nfs4_preprocess_seqid_op(cstate, lock->lk_old_lock_seqid, &lock->lk_old_lock_stateid, NFS4_LOCK_STID, &lock_stp, nn); } if (status) goto out; lock_sop = lockowner(lock_stp->st_stateowner); lkflg = setlkflg(lock->lk_type); status = nfs4_check_openmode(lock_stp, lkflg); if (status) goto out; status = nfserr_grace; if (locks_in_grace(net) && !lock->lk_reclaim) goto out; status = nfserr_no_grace; if (!locks_in_grace(net) && lock->lk_reclaim) goto out; if (lock->lk_reclaim) fl_flags |= FL_RECLAIM; fp = lock_stp->st_stid.sc_file; switch (lock->lk_type) { case NFS4_READW_LT: if (nfsd4_has_session(cstate) || exportfs_lock_op_is_async(sb->s_export_op)) fl_flags |= FL_SLEEP; fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); nf = find_readable_file_locked(fp); if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); spin_unlock(&fp->fi_lock); fl_type = F_RDLCK; break; case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate) || exportfs_lock_op_is_async(sb->s_export_op)) fl_flags |= FL_SLEEP; fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); nf = find_writeable_file_locked(fp); if (nf) get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); spin_unlock(&fp->fi_lock); fl_type = F_WRLCK; break; default: status = nfserr_inval; goto out; } if (!nf) { status = nfserr_openmode; goto out; } /* * Most filesystems with their own ->lock operations will block * the nfsd thread waiting to acquire the lock. That leads to * deadlocks (we don't want every nfsd thread tied up waiting * for file locks), so don't attempt blocking lock notifications * on those filesystems: */ if (!exportfs_lock_op_is_async(sb->s_export_op)) fl_flags &= ~FL_SLEEP; nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); if (!nbl) { dprintk("NFSD: %s: unable to allocate block!\n", __func__); status = nfserr_jukebox; goto out; } file_lock = &nbl->nbl_lock; file_lock->fl_type = fl_type; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); file_lock->fl_pid = current->tgid; file_lock->fl_file = nf->nf_file; file_lock->fl_flags = fl_flags; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = lock->lk_offset; file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); nfs4_transform_lock_offset(file_lock); conflock = locks_alloc_lock(); if (!conflock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto out; } if (fl_flags & FL_SLEEP) { nbl->nbl_time = ktime_get_boottime_seconds(); spin_lock(&nn->blocked_locks_lock); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); kref_get(&nbl->nbl_kref); spin_unlock(&nn->blocked_locks_lock); } err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); switch (err) { case 0: /* success! */ nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); status = 0; if (lock->lk_reclaim) nn->somebody_reclaimed = true; break; case FILE_LOCK_DEFERRED: kref_put(&nbl->nbl_kref, free_nbl); nbl = NULL; fallthrough; case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); nfs4_set_lock_denied(conflock, &lock->lk_denied); break; case -EDEADLK: status = nfserr_deadlock; break; default: dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); status = nfserrno(err); break; } out: if (nbl) { /* dequeue it if we queued it before */ if (fl_flags & FL_SLEEP) { spin_lock(&nn->blocked_locks_lock); if (!list_empty(&nbl->nbl_list) && !list_empty(&nbl->nbl_lru)) { list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_lru); kref_put(&nbl->nbl_kref, free_nbl); } /* nbl can use one of lists to be linked to reaplist */ spin_unlock(&nn->blocked_locks_lock); } free_blocked_lock(nbl); } if (nf) nfsd_file_put(nf); if (lock_stp) { /* Bump seqid manually if the 4.0 replay owner is openowner */ if (cstate->replay_owner && cstate->replay_owner != &lock_sop->lo_owner && seqid_mutating_err(ntohl(status))) lock_sop->lo_owner.so_seqid++; /* * If this is a new, never-before-used stateid, and we are * returning an error, then just go ahead and release it. */ if (status && new) release_lock_stateid(lock_stp); mutex_unlock(&lock_stp->st_mutex); nfs4_put_stid(&lock_stp->st_stid); } if (open_stp) nfs4_put_stid(&open_stp->st_stid); nfsd4_bump_seqid(cstate, status); if (conflock) locks_free_lock(conflock); return status; } void nfsd4_lock_release(union nfsd4_op_u *u) { struct nfsd4_lock *lock = &u->lock; struct nfsd4_lock_denied *deny = &lock->lk_denied; kfree(deny->ld_owner.data); } /* * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, * so we do a temporary open here just to get an open file to pass to * vfs_test_lock. */ static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) { struct nfsd_file *nf; struct inode *inode; __be32 err; err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); if (err) return err; inode = fhp->fh_dentry->d_inode; inode_lock(inode); /* to block new leases till after test_lock: */ err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); if (err) goto out; lock->fl_file = nf->nf_file; err = nfserrno(vfs_test_lock(nf->nf_file, lock)); lock->fl_file = NULL; out: inode_unlock(inode); nfsd_file_put(nf); return err; } /* * LOCKT operation */ __be32 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_lockt *lockt = &u->lockt; struct file_lock *file_lock = NULL; struct nfs4_lockowner *lo = NULL; __be32 status; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (locks_in_grace(SVC_NET(rqstp))) return nfserr_grace; if (check_lock_length(lockt->lt_offset, lockt->lt_length)) return nfserr_inval; if (!nfsd4_has_session(cstate)) { status = set_client(&lockt->lt_clientid, cstate, nn); if (status) goto out; } if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) goto out; file_lock = locks_alloc_lock(); if (!file_lock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto out; } switch (lockt->lt_type) { case NFS4_READ_LT: case NFS4_READW_LT: file_lock->fl_type = F_RDLCK; break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: file_lock->fl_type = F_WRLCK; break; default: dprintk("NFSD: nfs4_lockt: bad lock type!\n"); status = nfserr_inval; goto out; } lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); if (lo) file_lock->fl_owner = (fl_owner_t)lo; file_lock->fl_pid = current->tgid; file_lock->fl_flags = FL_POSIX; file_lock->fl_start = lockt->lt_offset; file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); nfs4_transform_lock_offset(file_lock); status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); if (status) goto out; if (file_lock->fl_type != F_UNLCK) { status = nfserr_denied; nfs4_set_lock_denied(file_lock, &lockt->lt_denied); } out: if (lo) nfs4_put_stateowner(&lo->lo_owner); if (file_lock) locks_free_lock(file_lock); return status; } void nfsd4_lockt_release(union nfsd4_op_u *u) { struct nfsd4_lockt *lockt = &u->lockt; struct nfsd4_lock_denied *deny = &lockt->lt_denied; kfree(deny->ld_owner.data); } __be32 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_locku *locku = &u->locku; struct nfs4_ol_stateid *stp; struct nfsd_file *nf = NULL; struct file_lock *file_lock = NULL; __be32 status; int err; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", (long long) locku->lu_offset, (long long) locku->lu_length); if (check_lock_length(locku->lu_offset, locku->lu_length)) return nfserr_inval; status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, &locku->lu_stateid, NFS4_LOCK_STID, &stp, nn); if (status) goto out; nf = find_any_file(stp->st_stid.sc_file); if (!nf) { status = nfserr_lock_range; goto put_stateid; } file_lock = locks_alloc_lock(); if (!file_lock) { dprintk("NFSD: %s: unable to allocate lock!\n", __func__); status = nfserr_jukebox; goto put_file; } file_lock->fl_type = F_UNLCK; file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); file_lock->fl_pid = current->tgid; file_lock->fl_file = nf->nf_file; file_lock->fl_flags = FL_POSIX; file_lock->fl_lmops = &nfsd_posix_mng_ops; file_lock->fl_start = locku->lu_offset; file_lock->fl_end = last_byte_offset(locku->lu_offset, locku->lu_length); nfs4_transform_lock_offset(file_lock); err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); if (err) { dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); goto out_nfserr; } nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); put_file: nfsd_file_put(nf); put_stateid: mutex_unlock(&stp->st_mutex); nfs4_put_stid(&stp->st_stid); out: nfsd4_bump_seqid(cstate, status); if (file_lock) locks_free_lock(file_lock); return status; out_nfserr: status = nfserrno(err); goto put_file; } /* * returns * true: locks held by lockowner * false: no locks held by lockowner */ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) { struct file_lock *fl; int status = false; struct nfsd_file *nf = find_any_file(fp); struct inode *inode; struct file_lock_context *flctx; if (!nf) { /* Any valid lock stateid should have some sort of access */ WARN_ON_ONCE(1); return status; } inode = file_inode(nf->nf_file); flctx = locks_inode_context(inode); if (flctx && !list_empty_careful(&flctx->flc_posix)) { spin_lock(&flctx->flc_lock); list_for_each_entry(fl, &flctx->flc_posix, fl_list) { if (fl->fl_owner == (fl_owner_t)lowner) { status = true; break; } } spin_unlock(&flctx->flc_lock); } nfsd_file_put(nf); return status; } /** * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations * @rqstp: RPC transaction * @cstate: NFSv4 COMPOUND state * @u: RELEASE_LOCKOWNER arguments * * The lockowner's so_count is bumped when a lock record is added * or when copying a conflicting lock. The latter case is brief, * but can lead to fleeting false positives when looking for * locks-in-use. * * Return values: * %nfs_ok: lockowner released or not found * %nfserr_locks_held: lockowner still in use * %nfserr_stale_clientid: clientid no longer active * %nfserr_expired: clientid not recognized */ __be32 nfsd4_release_lockowner(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); clientid_t *clid = &rlockowner->rl_clientid; struct nfs4_ol_stateid *stp; struct nfs4_lockowner *lo; struct nfs4_client *clp; LIST_HEAD(reaplist); __be32 status; dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", clid->cl_boot, clid->cl_id); status = set_client(clid, cstate, nn); if (status) return status; clp = cstate->clp; spin_lock(&clp->cl_lock); lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); if (!lo) { spin_unlock(&clp->cl_lock); return nfs_ok; } if (atomic_read(&lo->lo_owner.so_count) != 2) { spin_unlock(&clp->cl_lock); nfs4_put_stateowner(&lo->lo_owner); return nfserr_locks_held; } unhash_lockowner_locked(lo); while (!list_empty(&lo->lo_owner.so_stateids)) { stp = list_first_entry(&lo->lo_owner.so_stateids, struct nfs4_ol_stateid, st_perstateowner); WARN_ON(!unhash_lock_stateid(stp)); put_ol_stateid_locked(stp, &reaplist); } spin_unlock(&clp->cl_lock); free_ol_stateid_reaplist(&reaplist); remove_blocked_locks(lo); nfs4_put_stateowner(&lo->lo_owner); return nfs_ok; } static inline struct nfs4_client_reclaim * alloc_reclaim(void) { return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); } bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) { struct nfs4_client_reclaim *crp; crp = nfsd4_find_reclaim_client(name, nn); return (crp && crp->cr_clp); } /* * failure => all reset bets are off, nfserr_no_grace... * * The caller is responsible for freeing name.data if NULL is returned (it * will be freed in nfs4_remove_reclaim_record in the normal case). */ struct nfs4_client_reclaim * nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp; crp = alloc_reclaim(); if (crp) { strhashval = clientstr_hashval(name); INIT_LIST_HEAD(&crp->cr_strhash); list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); crp->cr_name.data = name.data; crp->cr_name.len = name.len; crp->cr_princhash.data = princhash.data; crp->cr_princhash.len = princhash.len; crp->cr_clp = NULL; nn->reclaim_str_hashtbl_size++; } return crp; } void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) { list_del(&crp->cr_strhash); kfree(crp->cr_name.data); kfree(crp->cr_princhash.data); kfree(crp); nn->reclaim_str_hashtbl_size--; } void nfs4_release_reclaim(struct nfsd_net *nn) { struct nfs4_client_reclaim *crp = NULL; int i; for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->reclaim_str_hashtbl[i])) { crp = list_entry(nn->reclaim_str_hashtbl[i].next, struct nfs4_client_reclaim, cr_strhash); nfs4_remove_reclaim_record(crp, nn); } } WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); } /* * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ struct nfs4_client_reclaim * nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp = NULL; strhashval = clientstr_hashval(name); list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { if (compare_blob(&crp->cr_name, &name) == 0) { return crp; } } return NULL; } __be32 nfs4_check_open_reclaim(struct nfs4_client *clp) { if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) return nfserr_no_grace; if (nfsd4_client_record_check(clp)) return nfserr_reclaim_bad; return nfs_ok; } /* * Since the lifetime of a delegation isn't limited to that of an open, a * client may quite reasonably hang on to a delegation as long as it has * the inode cached. This becomes an obvious problem the first time a * client's inode cache approaches the size of the server's total memory. * * For now we avoid this problem by imposing a hard limit on the number * of delegations, which varies according to the server's memory size. */ static void set_max_delegations(void) { /* * Allow at most 4 delegations per megabyte of RAM. Quick * estimates suggest that in the worst case (where every delegation * is for a different inode), a delegation could take about 1.5K, * giving a worst case usage of about 6% of memory. */ max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); } static int nfs4_state_create_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int i; nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); if (!nn->conf_id_hashtbl) goto err; nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); if (!nn->unconf_id_hashtbl) goto err_unconf_id; nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); if (!nn->sessionid_hashtbl) goto err_sessionid; for (i = 0; i < CLIENT_HASH_SIZE; i++) { INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); } for (i = 0; i < SESSION_HASH_SIZE; i++) INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); nn->conf_name_tree = RB_ROOT; nn->unconf_name_tree = RB_ROOT; nn->boot_time = ktime_get_real_seconds(); nn->grace_ended = false; nn->nfsd4_manager.block_opens = true; INIT_LIST_HEAD(&nn->nfsd4_manager.list); INIT_LIST_HEAD(&nn->client_lru); INIT_LIST_HEAD(&nn->close_lru); INIT_LIST_HEAD(&nn->del_recall_lru); spin_lock_init(&nn->client_lock); spin_lock_init(&nn->s2s_cp_lock); idr_init(&nn->s2s_cp_stateids); spin_lock_init(&nn->blocked_locks_lock); INIT_LIST_HEAD(&nn->blocked_locks_lru); INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); get_net(net); nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client"); if (!nn->nfsd_client_shrinker) goto err_shrinker; nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan; nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count; nn->nfsd_client_shrinker->private_data = nn; shrinker_register(nn->nfsd_client_shrinker); return 0; err_shrinker: put_net(net); kfree(nn->sessionid_hashtbl); err_sessionid: kfree(nn->unconf_id_hashtbl); err_unconf_id: kfree(nn->conf_id_hashtbl); err: return -ENOMEM; } static void nfs4_state_destroy_net(struct net *net) { int i; struct nfs4_client *clp = NULL; struct nfsd_net *nn = net_generic(net, nfsd_net_id); for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->conf_id_hashtbl[i])) { clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); destroy_client(clp); } } WARN_ON(!list_empty(&nn->blocked_locks_lru)); for (i = 0; i < CLIENT_HASH_SIZE; i++) { while (!list_empty(&nn->unconf_id_hashtbl[i])) { clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); destroy_client(clp); } } kfree(nn->sessionid_hashtbl); kfree(nn->unconf_id_hashtbl); kfree(nn->conf_id_hashtbl); put_net(net); } int nfs4_state_start_net(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret; ret = nfs4_state_create_net(net); if (ret) return ret; locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) goto skip_grace; printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", nn->nfsd4_grace, net->ns.inum); trace_nfsd_grace_start(nn); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); return 0; skip_grace: printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", net->ns.inum); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); nfsd4_end_grace(nn); return 0; } /* initialization to perform when the nfsd service is started: */ int nfs4_state_start(void) { int ret; ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params); if (ret) return ret; ret = nfsd4_create_callback_queue(); if (ret) { rhltable_destroy(&nfs4_file_rhltable); return ret; } set_max_delegations(); return 0; } void nfs4_state_shutdown_net(struct net *net) { struct nfs4_delegation *dp = NULL; struct list_head *pos, *next, reaplist; struct nfsd_net *nn = net_generic(net, nfsd_net_id); shrinker_free(nn->nfsd_client_shrinker); cancel_work(&nn->nfsd_shrinker_work); cancel_delayed_work_sync(&nn->laundromat_work); locks_end_grace(&nn->nfsd4_manager); INIT_LIST_HEAD(&reaplist); spin_lock(&state_lock); list_for_each_safe(pos, next, &nn->del_recall_lru) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); WARN_ON(!unhash_delegation_locked(dp)); list_add(&dp->dl_recall_lru, &reaplist); } spin_unlock(&state_lock); list_for_each_safe(pos, next, &reaplist) { dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); destroy_unhashed_deleg(dp); } nfsd4_client_tracking_exit(net); nfs4_state_destroy_net(net); #ifdef CONFIG_NFSD_V4_2_INTER_SSC nfsd4_ssc_shutdown_umount(nn); #endif } void nfs4_state_shutdown(void) { nfsd4_destroy_callback_queue(); rhltable_destroy(&nfs4_file_rhltable); } static void get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid)) memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); } static void put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) { if (cstate->minorversion) { memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); } } void clear_current_stateid(struct nfsd4_compound_state *cstate) { CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); } /* * functions to set current state id */ void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { put_stateid(cstate, &u->open_downgrade.od_stateid); } void nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { put_stateid(cstate, &u->open.op_stateid); } void nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { put_stateid(cstate, &u->close.cl_stateid); } void nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { put_stateid(cstate, &u->lock.lk_resp_stateid); } /* * functions to consume current state id */ void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->open_downgrade.od_stateid); } void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->delegreturn.dr_stateid); } void nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->free_stateid.fr_stateid); } void nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->setattr.sa_stateid); } void nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->close.cl_stateid); } void nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->locku.lu_stateid); } void nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->read.rd_stateid); } void nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { get_stateid(cstate, &u->write.wr_stateid); } /** * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict * @rqstp: RPC transaction context * @inode: file to be checked for a conflict * * This function is called when there is a conflict between a write * delegation and a change/size GETATTR from another client. The server * must either use the CB_GETATTR to get the current values of the * attributes from the client that holds the delegation or recall the * delegation before replying to the GETATTR. See RFC 8881 section * 18.7.4. * * The current implementation does not support CB_GETATTR yet. However * this can avoid recalling the delegation could be added in follow up * work. * * Returns 0 if there is no conflict; otherwise an nfs_stat * code is returned. */ __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) { __be32 status; struct file_lock_context *ctx; struct file_lock *fl; struct nfs4_delegation *dp; ctx = locks_inode_context(inode); if (!ctx) return 0; spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_flags == FL_LAYOUT) continue; if (fl->fl_lmops != &nfsd_lease_mng_ops) { /* * non-nfs lease, if it's a lease with F_RDLCK then * we are done; there isn't any write delegation * on this inode */ if (fl->fl_type == F_RDLCK) break; goto break_lease; } if (fl->fl_type == F_WRLCK) { dp = fl->fl_owner; if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { spin_unlock(&ctx->flc_lock); return 0; } break_lease: spin_unlock(&ctx->flc_lock); nfsd_stats_wdeleg_getattr_inc(); status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); if (status != nfserr_jukebox || !nfsd_wait_for_delegreturn(rqstp, inode)) return status; return 0; } break; } spin_unlock(&ctx->flc_lock); return 0; } |
2 2 1 1 1 1 2 2 3 2 1 1 1 1 3 3 3 1 2 1 1 1 1 1 1 23 22 23 23 23 23 23 1 23 1 23 23 23 1 23 1 23 4 1 23 23 1 1 23 23 26 26 26 3 23 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (C) 2016-2019 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/writeback.h> #include <linux/list_sort.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/sched/signal.h> #include <linux/migrate.h> #include "trace.h" #include "../internal.h" #define IOEND_BATCH_SIZE 4096 typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length); /* * Structure allocated for each folio to track per-block uptodate, dirty state * and I/O completions. */ struct iomap_folio_state { spinlock_t state_lock; unsigned int read_bytes_pending; atomic_t write_bytes_pending; /* * Each block has two bits in this bitmap: * Bits [0..blocks_per_folio) has the uptodate status. * Bits [b_p_f...(2*b_p_f)) has the dirty status. */ unsigned long state[]; }; static struct bio_set iomap_ioend_bioset; static inline bool ifs_is_fully_uptodate(struct folio *folio, struct iomap_folio_state *ifs) { struct inode *inode = folio->mapping->host; return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); } static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs, unsigned int block) { return test_bit(block, ifs->state); } static bool ifs_set_range_uptodate(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int first_blk = off >> inode->i_blkbits; unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; bitmap_set(ifs->state, first_blk, nr_blks); return ifs_is_fully_uptodate(folio, ifs); } static void iomap_set_range_uptodate(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; unsigned long flags; bool uptodate = true; if (ifs) { spin_lock_irqsave(&ifs->state_lock, flags); uptodate = ifs_set_range_uptodate(folio, ifs, off, len); spin_unlock_irqrestore(&ifs->state_lock, flags); } if (uptodate) folio_mark_uptodate(folio); } static inline bool ifs_block_is_dirty(struct folio *folio, struct iomap_folio_state *ifs, int block) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); return test_bit(block + blks_per_folio, ifs->state); } static void ifs_clear_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_clear_range_dirty(folio, ifs, off, len); } static void ifs_set_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_set_range_dirty(folio, ifs, off, len); } static struct iomap_folio_state *ifs_alloc(struct inode *inode, struct folio *folio, unsigned int flags) { struct iomap_folio_state *ifs = folio->private; unsigned int nr_blocks = i_blocks_per_folio(inode, folio); gfp_t gfp; if (ifs || nr_blocks <= 1) return ifs; if (flags & IOMAP_NOWAIT) gfp = GFP_NOWAIT; else gfp = GFP_NOFS | __GFP_NOFAIL; /* * ifs->state tracks two sets of state flags when the * filesystem block size is smaller than the folio size. * The first state tracks per-block uptodate and the * second tracks per-block dirty state. */ ifs = kzalloc(struct_size(ifs, state, BITS_TO_LONGS(2 * nr_blocks)), gfp); if (!ifs) return ifs; spin_lock_init(&ifs->state_lock); if (folio_test_uptodate(folio)) bitmap_set(ifs->state, 0, nr_blocks); if (folio_test_dirty(folio)) bitmap_set(ifs->state, nr_blocks, nr_blocks); folio_attach_private(folio, ifs); return ifs; } static void ifs_free(struct folio *folio) { struct iomap_folio_state *ifs = folio_detach_private(folio); if (!ifs) return; WARN_ON_ONCE(ifs->read_bytes_pending != 0); WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != folio_test_uptodate(folio)); kfree(ifs); } /* * Calculate the range inside the folio that we actually need to read. */ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, loff_t *pos, loff_t length, size_t *offp, size_t *lenp) { struct iomap_folio_state *ifs = folio->private; loff_t orig_pos = *pos; loff_t isize = i_size_read(inode); unsigned block_bits = inode->i_blkbits; unsigned block_size = (1 << block_bits); size_t poff = offset_in_folio(folio, *pos); size_t plen = min_t(loff_t, folio_size(folio) - poff, length); unsigned first = poff >> block_bits; unsigned last = (poff + plen - 1) >> block_bits; /* * If the block size is smaller than the page size, we need to check the * per-block uptodate status and adjust the offset and length if needed * to avoid reading in already uptodate ranges. */ if (ifs) { unsigned int i; /* move forward for each leading block marked uptodate */ for (i = first; i <= last; i++) { if (!ifs_block_is_uptodate(ifs, i)) break; *pos += block_size; poff += block_size; plen -= block_size; first++; } /* truncate len if we find any trailing uptodate block(s) */ for ( ; i <= last; i++) { if (ifs_block_is_uptodate(ifs, i)) { plen -= (last - i + 1) * block_size; last = i - 1; break; } } } /* * If the extent spans the block that contains the i_size, we need to * handle both halves separately so that we properly zero data in the * page cache for blocks that are entirely outside of i_size. */ if (orig_pos <= isize && orig_pos + length > isize) { unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; if (first <= end && last > end) plen -= (last - end) * block_size; } *offp = poff; *lenp = plen; } static void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, int error) { struct iomap_folio_state *ifs = folio->private; bool uptodate = !error; bool finished = true; if (ifs) { unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); if (!error) uptodate = ifs_set_range_uptodate(folio, ifs, off, len); ifs->read_bytes_pending -= len; finished = !ifs->read_bytes_pending; spin_unlock_irqrestore(&ifs->state_lock, flags); } if (error) folio_set_error(folio); if (finished) folio_end_read(folio, uptodate); } static void iomap_read_end_io(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; bio_for_each_folio_all(fi, bio) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); bio_put(bio); } struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; struct bio *bio; struct readahead_control *rac; }; /** * iomap_read_inline_data - copy inline data into the page cache * @iter: iteration structure * @folio: folio to copy to * * Copy the inline data in @iter into @folio and zero out the rest of the folio. * Only a single IOMAP_INLINE extent is allowed at the end of each file. * Returns zero for success to complete the read, or the usual negative errno. */ static int iomap_read_inline_data(const struct iomap_iter *iter, struct folio *folio) { const struct iomap *iomap = iomap_iter_srcmap(iter); size_t size = i_size_read(iter->inode) - iomap->offset; size_t offset = offset_in_folio(folio, iomap->offset); if (folio_test_uptodate(folio)) return 0; if (WARN_ON_ONCE(size > iomap->length)) return -EIO; if (offset > 0) ifs_alloc(iter->inode, folio, iter->flags); folio_fill_tail(folio, offset, iomap->inline_data, size); iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, loff_t pos) { const struct iomap *srcmap = iomap_iter_srcmap(iter); return srcmap->type != IOMAP_MAPPED || (srcmap->flags & IOMAP_F_NEW) || pos >= i_size_read(iter->inode); } static loff_t iomap_readpage_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx, loff_t offset) { const struct iomap *iomap = &iter->iomap; loff_t pos = iter->pos + offset; loff_t length = iomap_length(iter) - offset; struct folio *folio = ctx->cur_folio; struct iomap_folio_state *ifs; loff_t orig_pos = pos; size_t poff, plen; sector_t sector; if (iomap->type == IOMAP_INLINE) return iomap_read_inline_data(iter, folio); /* zero post-eof blocks as the page may be mapped */ ifs = ifs_alloc(iter->inode, folio, iter->flags); iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); if (plen == 0) goto done; if (iomap_block_needs_zeroing(iter, pos)) { folio_zero_range(folio, poff, plen); iomap_set_range_uptodate(folio, poff, plen); goto done; } ctx->cur_folio_in_bio = true; if (ifs) { spin_lock_irq(&ifs->state_lock); ifs->read_bytes_pending += plen; spin_unlock_irq(&ifs->state_lock); } sector = iomap_sector(iomap, pos); if (!ctx->bio || bio_end_sector(ctx->bio) != sector || !bio_add_folio(ctx->bio, folio, plen, poff)) { gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); gfp_t orig_gfp = gfp; unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); if (ctx->bio) submit_bio(ctx->bio); if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_read_folio does. */ if (!ctx->bio) { ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); } if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio_nofail(ctx->bio, folio, plen, poff); } done: /* * Move the caller beyond our range so that it keeps making progress. * For that, we have to include any leading non-uptodate ranges, but * we can skip trailing ones as they will be handled in the next * iteration. */ return pos - orig_pos + plen; } int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = folio->mapping->host, .pos = folio_pos(folio), .len = folio_size(folio), }; struct iomap_readpage_ctx ctx = { .cur_folio = folio, }; int ret; trace_iomap_readpage(iter.inode, 1); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_readpage_iter(&iter, &ctx, 0); if (ret < 0) folio_set_error(folio); if (ctx.bio) { submit_bio(ctx.bio); WARN_ON_ONCE(!ctx.cur_folio_in_bio); } else { WARN_ON_ONCE(ctx.cur_folio_in_bio); folio_unlock(folio); } /* * Just like mpage_readahead and block_read_full_folio, we always * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ return 0; } EXPORT_SYMBOL_GPL(iomap_read_folio); static loff_t iomap_readahead_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { loff_t length = iomap_length(iter); loff_t done, ret; for (done = 0; done < length; done += ret) { if (ctx->cur_folio && offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { if (!ctx->cur_folio_in_bio) folio_unlock(ctx->cur_folio); ctx->cur_folio = NULL; } if (!ctx->cur_folio) { ctx->cur_folio = readahead_folio(ctx->rac); ctx->cur_folio_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); if (ret <= 0) return ret; } return done; } /** * iomap_readahead - Attempt to read pages from a file. * @rac: Describes the pages to be read. * @ops: The operations vector for the filesystem. * * This function is for filesystems to call to implement their readahead * address_space operation. * * Context: The @ops callbacks may submit I/O (eg to read the addresses of * blocks from disc), and may wait for it. The caller may be trying to * access a different page, and so sleeping excessively should be avoided. * It may allocate memory, but should avoid costly allocations. This * function is called with memalloc_nofs set, so allocations will not cause * the filesystem to be reentered. */ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = rac->mapping->host, .pos = readahead_pos(rac), .len = readahead_length(rac), }; struct iomap_readpage_ctx ctx = { .rac = rac, }; trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); while (iomap_iter(&iter, ops) > 0) iter.processed = iomap_readahead_iter(&iter, &ctx); if (ctx.bio) submit_bio(ctx.bio); if (ctx.cur_folio) { if (!ctx.cur_folio_in_bio) folio_unlock(ctx.cur_folio); } } EXPORT_SYMBOL_GPL(iomap_readahead); /* * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * * Returns true if all blocks which correspond to the specified part * of the folio are uptodate. */ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { struct iomap_folio_state *ifs = folio->private; struct inode *inode = folio->mapping->host; unsigned first, last, i; if (!ifs) return false; /* Caller's range may extend past the end of this folio */ count = min(folio_size(folio) - from, count); /* First and last blocks in range within folio */ first = from >> inode->i_blkbits; last = (from + count - 1) >> inode->i_blkbits; for (i = first; i <= last; i++) if (!ifs_block_is_uptodate(ifs, i)) return false; return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); /** * iomap_get_folio - get a folio reference for writing * @iter: iteration structure * @pos: start offset of write * @len: Suggested size of folio to create. * * Returns a locked reference to the folio at @pos, or an error pointer if the * folio could not be obtained. */ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; if (iter->flags & IOMAP_NOWAIT) fgp |= FGP_NOWAIT; fgp |= fgf_set_order(len); return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, fgp, mapping_gfp_mask(iter->inode->i_mapping)); } EXPORT_SYMBOL_GPL(iomap_get_folio); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), folio_size(folio)); /* * If the folio is dirty, we refuse to release our metadata because * it may be partially dirty. Once we track per-block dirty state, * we can release the metadata if every block is dirty. */ if (folio_test_dirty(folio)) return false; ifs_free(folio); return true; } EXPORT_SYMBOL_GPL(iomap_release_folio); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { trace_iomap_invalidate_folio(folio->mapping->host, folio_pos(folio) + offset, len); /* * If we're invalidating the entire folio, clear the dirty state * from it and release it to avoid unnecessary buildup of the LRU. */ if (offset == 0 && len == folio_size(folio)) { WARN_ON_ONCE(folio_test_writeback(folio)); folio_cancel_dirty(folio); ifs_free(folio); } } EXPORT_SYMBOL_GPL(iomap_invalidate_folio); bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; size_t len = folio_size(folio); ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, len); return filemap_dirty_folio(mapping, folio); } EXPORT_SYMBOL_GPL(iomap_dirty_folio); static void iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) { loff_t i_size = i_size_read(inode); /* * Only truncate newly allocated pages beyoned EOF, even if the * write started inside the existing inode size. */ if (pos + len > i_size) truncate_pagecache_range(inode, max(pos, i_size), pos + len - 1); } static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, size_t poff, size_t plen, const struct iomap *iomap) { struct bio_vec bvec; struct bio bio; bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); bio_add_folio_nofail(&bio, folio, plen, poff); return submit_bio_wait(&bio); } static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, size_t len, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_folio_state *ifs; loff_t block_size = i_blocksize(iter->inode); loff_t block_start = round_down(pos, block_size); loff_t block_end = round_up(pos + len, block_size); unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); size_t from = offset_in_folio(folio, pos), to = from + len; size_t poff, plen; /* * If the write or zeroing completely overlaps the current folio, then * entire folio will be dirtied so there is no need for * per-block state tracking structures to be attached to this folio. * For the unshare case, we must read in the ondisk contents because we * are not changing pagecache contents. */ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && pos + len >= folio_pos(folio) + folio_size(folio)) return 0; ifs = ifs_alloc(iter->inode, folio, iter->flags); if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) return -EAGAIN; if (folio_test_uptodate(folio)) return 0; folio_clear_error(folio); do { iomap_adjust_read_range(iter->inode, folio, &block_start, block_end - block_start, &poff, &plen); if (plen == 0) break; if (!(iter->flags & IOMAP_UNSHARE) && (from <= poff || from >= poff + plen) && (to <= poff || to >= poff + plen)) continue; if (iomap_block_needs_zeroing(iter, block_start)) { if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) return -EIO; folio_zero_segments(folio, poff, from, to, poff + plen); } else { int status; if (iter->flags & IOMAP_NOWAIT) return -EAGAIN; status = iomap_read_folio_sync(block_start, folio, poff, plen, srcmap); if (status) return status; } iomap_set_range_uptodate(folio, poff, plen); } while ((block_start += plen) < block_end); return 0; } static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->get_folio) return folio_ops->get_folio(iter, pos, len); else return iomap_get_folio(iter, pos, len); } static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, struct folio *folio) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->put_folio) { folio_ops->put_folio(iter->inode, pos, ret, folio); } else { folio_unlock(folio); folio_put(folio); } } static int iomap_write_begin_inline(const struct iomap_iter *iter, struct folio *folio) { /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; return iomap_read_inline_data(iter, folio); } static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, size_t len, struct folio **foliop) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); struct folio *folio; int status = 0; BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); if (fatal_signal_pending(current)) return -EINTR; if (!mapping_large_folio_support(iter->inode->i_mapping)) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); folio = __iomap_get_folio(iter, pos, len); if (IS_ERR(folio)) return PTR_ERR(folio); /* * Now we have a locked folio, before we do anything with it we need to * check that the iomap we have cached is not stale. The inode extent * mapping can change due to concurrent IO in flight (e.g. * IOMAP_UNWRITTEN state can change and memory reclaim could have * reclaimed a previously partially written page at this index after IO * completion before this write reaches this file offset) and hence we * could do the wrong thing here (zero a page range incorrectly or fail * to zero) and corrupt data. */ if (folio_ops && folio_ops->iomap_valid) { bool iomap_valid = folio_ops->iomap_valid(iter->inode, &iter->iomap); if (!iomap_valid) { iter->iomap.flags |= IOMAP_F_STALE; status = 0; goto out_unlock; } } if (pos + len > folio_pos(folio) + folio_size(folio)) len = folio_pos(folio) + folio_size(folio) - pos; if (srcmap->type == IOMAP_INLINE) status = iomap_write_begin_inline(iter, folio); else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status)) goto out_unlock; *foliop = folio; return 0; out_unlock: __iomap_put_folio(iter, pos, 0, folio); iomap_write_failed(iter->inode, pos, len); return status; } static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, size_t copied, struct folio *folio) { flush_dcache_folio(folio); /* * The blocks that were entirely written will now be uptodate, so we * don't have to worry about a read_folio reading them and overwriting a * partial write. However, if we've encountered a short write and only * partially written into a block, it will not be marked uptodate, so a * read_folio might come in and destroy our partial write. * * Do the simplest thing and just treat any short write to a * non-uptodate page as a zero-length write, and force the caller to * redo the whole thing. */ if (unlikely(copied < len && !folio_test_uptodate(folio))) return 0; iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied); filemap_dirty_folio(inode->i_mapping, folio); return copied; } static size_t iomap_write_end_inline(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t copied) { const struct iomap *iomap = &iter->iomap; void *addr; WARN_ON_ONCE(!folio_test_uptodate(folio)); BUG_ON(!iomap_inline_data_valid(iomap)); flush_dcache_folio(folio); addr = kmap_local_folio(folio, pos); memcpy(iomap_inline_data(iomap, pos), addr, copied); kunmap_local(addr); mark_inode_dirty(iter->inode); return copied; } /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, size_t copied, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t old_size = iter->inode->i_size; size_t ret; if (srcmap->type == IOMAP_INLINE) { ret = iomap_write_end_inline(iter, folio, pos, copied); } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, copied, &folio->page, NULL); } else { ret = __iomap_write_end(iter->inode, pos, len, copied, folio); } /* * Update the in-memory inode size after copying the data into the page * cache. It's up to the file system to write the updated size to disk, * preferably after I/O completion so that no stale data is exposed. */ if (pos + ret > old_size) { i_size_write(iter->inode, pos + ret); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } __iomap_put_folio(iter, pos, ret, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); if (ret < len) iomap_write_failed(iter->inode, pos + ret, len - ret); return ret; } static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) { loff_t length = iomap_length(iter); size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER; loff_t pos = iter->pos; ssize_t written = 0; long status = 0; struct address_space *mapping = iter->inode->i_mapping; unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; do { struct folio *folio; size_t offset; /* Offset into folio */ size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ bytes = iov_iter_count(i); retry: offset = pos & (chunk - 1); bytes = min(chunk - offset, bytes); status = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); if (unlikely(status)) break; if (bytes > length) bytes = length; /* * Bring in the user page that we'll copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * For async buffered writes the assumption is that the user * page has already been faulted in. This can be optimized by * faulting the user page. */ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { status = -EFAULT; break; } status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) break; if (iter->iomap.flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); status = iomap_write_end(iter, pos, bytes, copied, folio); if (unlikely(copied != status)) iov_iter_revert(i, copied - status); cond_resched(); if (unlikely(status == 0)) { /* * A short copy made iomap_write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */ if (chunk > PAGE_SIZE) chunk /= 2; if (copied) { bytes = copied; goto retry; } } else { pos += status; written += status; length -= status; } } while (iov_iter_count(i) && length); if (status == -EAGAIN) { iov_iter_revert(i, written); return -EAGAIN; } return written ? written : status; } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(i), .flags = IOMAP_WRITE, }; ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) iter.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_write_iter(&iter, i); if (unlikely(iter.pos == iocb->ki_pos)) return ret; ret = iter.pos - iocb->ki_pos; iocb->ki_pos = iter.pos; return ret; } EXPORT_SYMBOL_GPL(iomap_file_buffered_write); static int iomap_write_delalloc_ifs_punch(struct inode *inode, struct folio *folio, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { unsigned int first_blk, last_blk, i; loff_t last_byte; u8 blkbits = inode->i_blkbits; struct iomap_folio_state *ifs; int ret = 0; /* * When we have per-block dirty tracking, there can be * blocks within a folio which are marked uptodate * but not dirty. In that case it is necessary to punch * out such blocks to avoid leaking any delalloc blocks. */ ifs = folio->private; if (!ifs) return ret; last_byte = min_t(loff_t, end_byte - 1, folio_pos(folio) + folio_size(folio) - 1); first_blk = offset_in_folio(folio, start_byte) >> blkbits; last_blk = offset_in_folio(folio, last_byte) >> blkbits; for (i = first_blk; i <= last_blk; i++) { if (!ifs_block_is_dirty(folio, ifs, i)) { ret = punch(inode, folio_pos(folio) + (i << blkbits), 1 << blkbits); if (ret) return ret; } } return ret; } static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { int ret = 0; if (!folio_test_dirty(folio)) return ret; /* if dirty, punch up to offset */ if (start_byte > *punch_start_byte) { ret = punch(inode, *punch_start_byte, start_byte - *punch_start_byte); if (ret) return ret; } /* Punch non-dirty blocks within folio */ ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte, punch); if (ret) return ret; /* * Make sure the next punch start is correctly bound to * the end of this data range, not the end of the folio. */ *punch_start_byte = min_t(loff_t, end_byte, folio_pos(folio) + folio_size(folio)); return ret; } /* * Scan the data range passed to us for dirty page cache folios. If we find a * dirty folio, punch out the preceding range and update the offset from which * the next punch will start from. * * We can punch out storage reservations under clean pages because they either * contain data that has been written back - in which case the delalloc punch * over that range is a no-op - or they have been read faults in which case they * contain zeroes and we can remove the delalloc backing range and any new * writes to those pages will do the normal hole filling operation... * * This makes the logic simple: we only need to keep the delalloc extents only * over the dirty ranges of the page cache. * * This function uses [start_byte, end_byte) intervals (i.e. open ended) to * simplify range iterations. */ static int iomap_write_delalloc_scan(struct inode *inode, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { while (start_byte < end_byte) { struct folio *folio; int ret; /* grab locked page */ folio = filemap_lock_folio(inode->i_mapping, start_byte >> PAGE_SHIFT); if (IS_ERR(folio)) { start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + PAGE_SIZE; continue; } ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte, start_byte, end_byte, punch); if (ret) { folio_unlock(folio); folio_put(folio); return ret; } /* move offset to start of next folio in range */ start_byte = folio_next_index(folio) << PAGE_SHIFT; folio_unlock(folio); folio_put(folio); } return 0; } /* * Punch out all the delalloc blocks in the range given except for those that * have dirty data still pending in the page cache - those are going to be * written and so must still retain the delalloc backing for writeback. * * As we are scanning the page cache for data, we don't need to reimplement the * wheel - mapping_seek_hole_data() does exactly what we need to identify the * start and end of data ranges correctly even for sub-folio block sizes. This * byte range based iteration is especially convenient because it means we * don't have to care about variable size folios, nor where the start or end of * the data range lies within a folio, if they lie within the same folio or even * if there are multiple discontiguous data ranges within the folio. * * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so * can return data ranges that exist in the cache beyond EOF. e.g. a page fault * spanning EOF will initialise the post-EOF data to zeroes and mark it up to * date. A write page fault can then mark it dirty. If we then fail a write() * beyond EOF into that up to date cached range, we allocate a delalloc block * beyond EOF and then have to punch it out. Because the range is up to date, * mapping_seek_hole_data() will return it, and we will skip the punch because * the folio is dirty. THis is incorrect - we always need to punch out delalloc * beyond EOF in this case as writeback will never write back and covert that * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, * resulting in always punching out the range from the EOF to the end of the * range the iomap spans. * * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) * returns the end of the data range (data_end). Using closed intervals would * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose * the code to subtle off-by-one bugs.... */ static int iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, loff_t end_byte, iomap_punch_t punch) { loff_t punch_start_byte = start_byte; loff_t scan_end_byte = min(i_size_read(inode), end_byte); int error = 0; /* * Lock the mapping to avoid races with page faults re-instantiating * folios and dirtying them via ->page_mkwrite whilst we walk the * cache and perform delalloc extent removal. Failing to do this can * leave dirty pages with no space reservation in the cache. */ filemap_invalidate_lock(inode->i_mapping); while (start_byte < scan_end_byte) { loff_t data_end; start_byte = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_DATA); /* * If there is no more data to scan, all that is left is to * punch out the remaining range. */ if (start_byte == -ENXIO || start_byte == scan_end_byte) break; if (start_byte < 0) { error = start_byte; goto out_unlock; } WARN_ON_ONCE(start_byte < punch_start_byte); WARN_ON_ONCE(start_byte > scan_end_byte); /* * We find the end of this contiguous cached data range by * seeking from start_byte to the beginning of the next hole. */ data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_HOLE); if (data_end < 0) { error = data_end; goto out_unlock; } WARN_ON_ONCE(data_end <= start_byte); WARN_ON_ONCE(data_end > scan_end_byte); error = iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte, data_end, punch); if (error) goto out_unlock; /* The next data search starts at the end of this one. */ start_byte = data_end; } if (punch_start_byte < end_byte) error = punch(inode, punch_start_byte, end_byte - punch_start_byte); out_unlock: filemap_invalidate_unlock(inode->i_mapping); return error; } /* * When a short write occurs, the filesystem may need to remove reserved space * that was allocated in ->iomap_begin from it's ->iomap_end method. For * filesystems that use delayed allocation, we need to punch out delalloc * extents from the range that are not dirty in the page cache. As the write can * race with page faults, there can be dirty pages over the delalloc extent * outside the range of a short write but still within the delalloc extent * allocated for this iomap. * * This function uses [start_byte, end_byte) intervals (i.e. open ended) to * simplify range iterations. * * The punch() callback *must* only punch delalloc extents in the range passed * to it. It must skip over all other types of extents in the range and leave * them completely unchanged. It must do this punch atomically with respect to * other extent modifications. * * The punch() callback may be called with a folio locked to prevent writeback * extent allocation racing at the edge of the range we are currently punching. * The locked folio may or may not cover the range being punched, so it is not * safe for the punch() callback to lock folios itself. * * Lock order is: * * inode->i_rwsem (shared or exclusive) * inode->i_mapping->invalidate_lock (exclusive) * folio_lock() * ->punch * internal filesystem allocation lock */ int iomap_file_buffered_write_punch_delalloc(struct inode *inode, struct iomap *iomap, loff_t pos, loff_t length, ssize_t written, iomap_punch_t punch) { loff_t start_byte; loff_t end_byte; unsigned int blocksize = i_blocksize(inode); if (iomap->type != IOMAP_DELALLOC) return 0; /* If we didn't reserve the blocks, we're not allowed to punch them. */ if (!(iomap->flags & IOMAP_F_NEW)) return 0; /* * start_byte refers to the first unused block after a short write. If * nothing was written, round offset down to point at the first block in * the range. */ if (unlikely(!written)) start_byte = round_down(pos, blocksize); else start_byte = round_up(pos + written, blocksize); end_byte = round_up(pos + length, blocksize); /* Nothing to do if we've written the entire delalloc extent */ if (start_byte >= end_byte) return 0; return iomap_write_delalloc_release(inode, start_byte, end_byte, punch); } EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc); static loff_t iomap_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; /* don't bother with blocks that are not shared to start with */ if (!(iomap->flags & IOMAP_F_SHARED)) return length; /* don't bother with holes or unwritten extents */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return length; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) return status; if (iomap->flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; bytes = iomap_write_end(iter, pos, bytes, bytes, folio); if (WARN_ON_ONCE(bytes == 0)) return -EIO; cond_resched(); pos += bytes; written += bytes; length -= bytes; balance_dirty_pages_ratelimited(iter->inode->i_mapping); } while (length > 0); return written; } int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_WRITE | IOMAP_UNSHARE, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(iomap_file_unshare); static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) { const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; /* already zeroed? we're done. */ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) return length; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); status = iomap_write_begin(iter, pos, bytes, &folio); if (status) return status; if (iter->iomap.flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; folio_zero_range(folio, offset, bytes); folio_mark_accessed(folio); bytes = iomap_write_end(iter, pos, bytes, bytes, folio); if (WARN_ON_ONCE(bytes == 0)) return -EIO; pos += bytes; length -= bytes; written += bytes; } while (length > 0); if (did_zero) *did_zero = true; return written; } int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_ZERO, }; int ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_zero_iter(&iter, did_zero); return ret; } EXPORT_SYMBOL_GPL(iomap_zero_range); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(iomap_truncate_page); static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, struct folio *folio) { loff_t length = iomap_length(iter); int ret; if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { ret = __block_write_begin_int(folio, iter->pos, length, NULL, &iter->iomap); if (ret) return ret; block_commit_write(&folio->page, 0, length); } else { WARN_ON_ONCE(!folio_test_uptodate(folio)); folio_mark_dirty(folio); } return length; } vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = file_inode(vmf->vma->vm_file), .flags = IOMAP_WRITE | IOMAP_FAULT, }; struct folio *folio = page_folio(vmf->page); ssize_t ret; folio_lock(folio); ret = folio_mkwrite_check_truncate(folio, iter.inode); if (ret < 0) goto out_unlock; iter.pos = folio_pos(folio); iter.len = ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); if (ret < 0) goto out_unlock; folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: folio_unlock(folio); return vmf_fs_error(ret); } EXPORT_SYMBOL_GPL(iomap_page_mkwrite); static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, size_t len, int error) { struct iomap_folio_state *ifs = folio->private; if (error) { folio_set_error(folio); mapping_set_error(inode->i_mapping, error); } WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) folio_end_writeback(folio); } /* * We're now finished for good with this ioend structure. Update the page * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; struct bio *bio = &ioend->io_inline_bio; struct bio *last = ioend->io_bio, *next; u64 start = bio->bi_iter.bi_sector; loff_t offset = ioend->io_offset; bool quiet = bio_flagged(bio, BIO_QUIET); u32 folio_count = 0; for (bio = &ioend->io_inline_bio; bio; bio = next) { struct folio_iter fi; /* * For the last bio, bi_private points to the ioend, so we * need to explicitly end the iteration here. */ if (bio == last) next = NULL; else next = bio->bi_private; /* walk all folios in bio, ending page IO on them */ bio_for_each_folio_all(fi, bio) { iomap_finish_folio_write(inode, fi.folio, fi.length, error); folio_count++; } bio_put(bio); } /* The ioend has been freed by bio_put() */ if (unlikely(error && !quiet)) { printk_ratelimited(KERN_ERR "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, offset, start); } return folio_count; } /* * Ioend completion routine for merged bios. This can only be called from task * contexts as merged ioends can be of unbound length. Hence we have to break up * the writeback completions into manageable chunks to avoid long scheduler * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get * good batch processing throughput without creating adverse scheduler latency * conditions. */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; u32 completions; might_sleep(); list_replace_init(&ioend->io_list, &tmp); completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { if (completions > IOEND_BATCH_SIZE * 8) { cond_resched(); completions = 0; } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); /* * We can merge two adjacent ioends if they have the same set of work to do. */ static bool iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) { if (ioend->io_bio->bi_status != next->io_bio->bi_status) return false; if ((ioend->io_flags & IOMAP_F_SHARED) ^ (next->io_flags & IOMAP_F_SHARED)) return false; if ((ioend->io_type == IOMAP_UNWRITTEN) ^ (next->io_type == IOMAP_UNWRITTEN)) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; /* * Do not merge physically discontiguous ioends. The filesystem * completion functions will have to iterate the physical * discontiguities even if we merge the ioends at a logical level, so * we don't gain anything by merging physical discontiguities here. * * We cannot use bio->bi_iter.bi_sector here as it is modified during * submission so does not point to the start sector of the bio at * completion. */ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) return false; return true; } void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) { struct iomap_ioend *next; INIT_LIST_HEAD(&ioend->io_list); while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, io_list))) { if (!iomap_ioend_can_merge(ioend, next)) break; list_move_tail(&next->io_list, &ioend->io_list); ioend->io_size += next->io_size; } } EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); static int iomap_ioend_compare(void *priv, const struct list_head *a, const struct list_head *b) { struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); if (ia->io_offset < ib->io_offset) return -1; if (ia->io_offset > ib->io_offset) return 1; return 0; } void iomap_sort_ioends(struct list_head *ioend_list) { list_sort(NULL, ioend_list, iomap_ioend_compare); } EXPORT_SYMBOL_GPL(iomap_sort_ioends); static void iomap_writepage_end_bio(struct bio *bio) { struct iomap_ioend *ioend = bio->bi_private; iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); } /* * Submit the final bio for an ioend. * * If @error is non-zero, it means that we have a situation where some part of * the submission process has failed after we've marked pages for writeback * and unlocked them. In this situation, we need to fail the bio instead of * submitting it. This typically only happens on a filesystem shutdown. */ static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend, int error) { ioend->io_bio->bi_private = ioend; ioend->io_bio->bi_end_io = iomap_writepage_end_bio; if (wpc->ops->prepare_ioend) error = wpc->ops->prepare_ioend(ioend, error); if (error) { /* * If we're failing the IO now, just mark the ioend with an * error and finish it. This will run IO completion immediately * as there is only one reference to the ioend at this point in * time. */ ioend->io_bio->bi_status = errno_to_blk_status(error); bio_endio(ioend->io_bio); return error; } submit_bio(ioend->io_bio); return 0; } static struct iomap_ioend * iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, loff_t offset, sector_t sector, struct writeback_control *wbc) { struct iomap_ioend *ioend; struct bio *bio; bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, REQ_OP_WRITE | wbc_to_write_flags(wbc), GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = sector; wbc_init_bio(wbc, bio); ioend = container_of(bio, struct iomap_ioend, io_inline_bio); INIT_LIST_HEAD(&ioend->io_list); ioend->io_type = wpc->iomap.type; ioend->io_flags = wpc->iomap.flags; ioend->io_inode = inode; ioend->io_size = 0; ioend->io_folios = 0; ioend->io_offset = offset; ioend->io_bio = bio; ioend->io_sector = sector; return ioend; } /* * Allocate a new bio, and chain the old bio to the new one. * * Note that we have to perform the chaining in this unintuitive order * so that the bi_private linkage is set up in the right direction for the * traversal in iomap_finish_ioend(). */ static struct bio * iomap_chain_bio(struct bio *prev) { struct bio *new; new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS); bio_clone_blkg_association(new, prev); new->bi_iter.bi_sector = bio_end_sector(prev); bio_chain(prev, new); bio_get(prev); /* for iomap_finish_ioend */ submit_bio(prev); return new; } static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, sector_t sector) { if ((wpc->iomap.flags & IOMAP_F_SHARED) != (wpc->ioend->io_flags & IOMAP_F_SHARED)) return false; if (wpc->iomap.type != wpc->ioend->io_type) return false; if (offset != wpc->ioend->io_offset + wpc->ioend->io_size) return false; if (sector != bio_end_sector(wpc->ioend->io_bio)) return false; /* * Limit ioend bio chain lengths to minimise IO completion latency. This * also prevents long tight loops ending page writeback on all the * folios in the ioend. */ if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) return false; return true; } /* * Test to see if we have an existing ioend structure that we could append to * first; otherwise finish off the current ioend and start another. */ static void iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio, struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct list_head *iolist) { sector_t sector = iomap_sector(&wpc->iomap, pos); unsigned len = i_blocksize(inode); size_t poff = offset_in_folio(folio, pos); if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) { if (wpc->ioend) list_add(&wpc->ioend->io_list, iolist); wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc); } if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) { wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio); bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff); } if (ifs) atomic_add(len, &ifs->write_bytes_pending); wpc->ioend->io_size += len; wbc_account_cgroup_owner(wbc, &folio->page, len); } /* * We implement an immediate ioend submission policy here to avoid needing to * chain multiple ioends and hence nest mempool allocations which can violate * the forward progress guarantees we need to provide. The current ioend we're * adding blocks to is cached in the writepage context, and if the new block * doesn't append to the cached ioend, it will create a new ioend and cache that * instead. * * If a new ioend is created and cached, the old ioend is returned and queued * locally for submission once the entire page is processed or an error has been * detected. While ioends are submitted immediately after they are completed, * batching optimisations are provided by higher level block plugging. * * At the end of a writeback pass, there will be a cached ioend remaining on the * writepage context that the caller will need to submit. */ static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, struct folio *folio, u64 end_pos) { struct iomap_folio_state *ifs = folio->private; struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); unsigned nblocks = i_blocks_per_folio(inode, folio); u64 pos = folio_pos(folio); int error = 0, count = 0, i; LIST_HEAD(submit_list); WARN_ON_ONCE(end_pos <= pos); if (!ifs && nblocks > 1) { ifs = ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, end_pos - pos); } WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0); /* * Walk through the folio to find areas to write back. If we * run off the end of the current map or find the current map * invalid, grab a new one. */ for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { if (ifs && !ifs_block_is_dirty(folio, ifs, i)) continue; error = wpc->ops->map_blocks(wpc, inode, pos); if (error) break; trace_iomap_writepage_map(inode, &wpc->iomap); if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE)) continue; if (wpc->iomap.type == IOMAP_HOLE) continue; iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc, &submit_list); count++; } if (count) wpc->ioend->io_folios++; WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); WARN_ON_ONCE(!folio_test_locked(folio)); WARN_ON_ONCE(folio_test_writeback(folio)); WARN_ON_ONCE(folio_test_dirty(folio)); /* * We cannot cancel the ioend directly here on error. We may have * already set other pages under writeback and hence we have to run I/O * completion to mark the error state of the pages under writeback * appropriately. */ if (unlikely(error)) { /* * Let the filesystem know what portion of the current page * failed to map. If the page hasn't been added to ioend, it * won't be affected by I/O completion and we must unlock it * now. */ if (wpc->ops->discard_folio) wpc->ops->discard_folio(folio, pos); if (!count) { folio_unlock(folio); goto done; } } /* * We can have dirty bits set past end of file in page_mkwrite path * while mapping the last partial folio. Hence it's better to clear * all the dirty bits in the folio here. */ iomap_clear_range_dirty(folio, 0, folio_size(folio)); folio_start_writeback(folio); folio_unlock(folio); /* * Preserve the original error if there was one; catch * submission errors here and propagate into subsequent ioend * submissions. */ list_for_each_entry_safe(ioend, next, &submit_list, io_list) { int error2; list_del_init(&ioend->io_list); error2 = iomap_submit_ioend(wpc, ioend, error); if (error2 && !error) error = error2; } /* * We can end up here with no error and nothing to write only if we race * with a partial page truncate on a sub-page block sized filesystem. */ if (!count) folio_end_writeback(folio); done: mapping_set_error(inode->i_mapping, error); return error; } /* * Write out a dirty page. * * For delalloc space on the page, we need to allocate space and flush it. * For unwritten space on the page, we need to start the conversion to * regular allocated space. */ static int iomap_do_writepage(struct folio *folio, struct writeback_control *wbc, void *data) { struct iomap_writepage_ctx *wpc = data; struct inode *inode = folio->mapping->host; u64 end_pos, isize; trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); /* * Refuse to write the folio out if we're called from reclaim context. * * This avoids stack overflows when called from deeply used stacks in * random callers for direct reclaim or memcg reclaim. We explicitly * allow reclaim from kswapd as the stack usage there is relatively low. * * This should never happen except in the case of a VM regression so * warn about it. */ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)) goto redirty; /* * Is this folio beyond the end of the file? * * The folio index is less than the end_index, adjust the end_pos * to the highest offset that this folio should represent. * ----------------------------------------------------- * | file mapping | <EOF> | * ----------------------------------------------------- * | Page ... | Page N-2 | Page N-1 | Page N | | * ^--------------------------------^----------|-------- * | desired writeback range | see else | * ---------------------------------^------------------| */ isize = i_size_read(inode); end_pos = folio_pos(folio) + folio_size(folio); if (end_pos > isize) { /* * Check whether the page to write out is beyond or straddles * i_size or not. * ------------------------------------------------------- * | file mapping | <EOF> | * ------------------------------------------------------- * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | * ^--------------------------------^-----------|--------- * | | Straddles | * ---------------------------------^-----------|--------| */ size_t poff = offset_in_folio(folio, isize); pgoff_t end_index = isize >> PAGE_SHIFT; /* * Skip the page if it's fully outside i_size, e.g. * due to a truncate operation that's in progress. We've * cleaned this page and truncate will finish things off for * us. * * Note that the end_index is unsigned long. If the given * offset is greater than 16TB on a 32-bit system then if we * checked if the page is fully outside i_size with * "if (page->index >= end_index + 1)", "end_index + 1" would * overflow and evaluate to 0. Hence this page would be * redirtied and written out repeatedly, which would result in * an infinite loop; the user program performing this operation * would hang. Instead, we can detect this situation by * checking if the page is totally beyond i_size or if its * offset is just equal to the EOF. */ if (folio->index > end_index || (folio->index == end_index && poff == 0)) goto unlock; /* * The page straddles i_size. It must be zeroed out on each * and every writepage invocation because it may be mmapped. * "A file is mapped in multiples of the page size. For a file * that is not a multiple of the page size, the remaining * memory is zeroed when mapped, and writes to that region are * not written out to the file." */ folio_zero_segment(folio, poff, folio_size(folio)); end_pos = isize; } return iomap_writepage_map(wpc, wbc, inode, folio, end_pos); redirty: folio_redirty_for_writepage(wbc, folio); unlock: folio_unlock(folio); return 0; } int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops) { int ret; wpc->ops = ops; ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc); if (!wpc->ioend) return ret; return iomap_submit_ioend(wpc, wpc->ioend, ret); } EXPORT_SYMBOL_GPL(iomap_writepages); static int __init iomap_init(void) { return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), offsetof(struct iomap_ioend, io_inline_bio), BIOSET_NEED_BVECS); } fs_initcall(iomap_init); |
4 2 2 3 2 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. */ #include <linux/fs.h> #include <linux/filelock.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/dlm.h> #include <linux/dlm_plock.h> #include <linux/slab.h> #include <trace/events/dlm.h> #include "dlm_internal.h" #include "lockspace.h" static DEFINE_SPINLOCK(ops_lock); static LIST_HEAD(send_list); static LIST_HEAD(recv_list); static DECLARE_WAIT_QUEUE_HEAD(send_wq); static DECLARE_WAIT_QUEUE_HEAD(recv_wq); struct plock_async_data { void *fl; void *file; struct file_lock flc; int (*callback)(struct file_lock *fl, int result); }; struct plock_op { struct list_head list; int done; struct dlm_plock_info info; /* if set indicates async handling */ struct plock_async_data *data; }; static inline void set_version(struct dlm_plock_info *info) { info->version[0] = DLM_PLOCK_VERSION_MAJOR; info->version[1] = DLM_PLOCK_VERSION_MINOR; info->version[2] = DLM_PLOCK_VERSION_PATCH; } static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info) { struct plock_op *op = NULL, *iter; list_for_each_entry(iter, &recv_list, list) { if (iter->info.fsid == info->fsid && iter->info.number == info->number && iter->info.owner == info->owner && iter->info.pid == info->pid && iter->info.start == info->start && iter->info.end == info->end && iter->info.ex == info->ex && iter->info.wait) { op = iter; break; } } return op; } static int check_version(struct dlm_plock_info *info) { if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) || (DLM_PLOCK_VERSION_MINOR < info->version[1])) { log_print("plock device version mismatch: " "kernel (%u.%u.%u), user (%u.%u.%u)", DLM_PLOCK_VERSION_MAJOR, DLM_PLOCK_VERSION_MINOR, DLM_PLOCK_VERSION_PATCH, info->version[0], info->version[1], info->version[2]); return -EINVAL; } return 0; } static void dlm_release_plock_op(struct plock_op *op) { kfree(op->data); kfree(op); } static void send_op(struct plock_op *op) { set_version(&op->info); spin_lock(&ops_lock); list_add_tail(&op->list, &send_list); spin_unlock(&ops_lock); wake_up(&send_wq); } static int do_lock_cancel(const struct dlm_plock_info *orig_info) { struct plock_op *op; int rv; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) return -ENOMEM; op->info = *orig_info; op->info.optype = DLM_PLOCK_OP_CANCEL; op->info.wait = 0; send_op(op); wait_event(recv_wq, (op->done != 0)); rv = op->info.rv; dlm_release_plock_op(op); return rv; } int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { struct plock_async_data *op_data; struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.wait = IS_SETLKW(cmd); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { op_data = kzalloc(sizeof(*op_data), GFP_NOFS); if (!op_data) { dlm_release_plock_op(op); rv = -ENOMEM; goto out; } /* fl_owner is lockd which doesn't distinguish processes on the nfs client */ op->info.owner = (__u64) fl->fl_pid; op_data->callback = fl->fl_lmops->lm_grant; locks_init_lock(&op_data->flc); locks_copy_lock(&op_data->flc, fl); op_data->fl = fl; op_data->file = file; op->data = op_data; send_op(op); rv = FILE_LOCK_DEFERRED; goto out; } else { op->info.owner = (__u64)(long) fl->fl_owner; } send_op(op); if (op->info.wait) { rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { spin_lock(&ops_lock); /* recheck under ops_lock if we got a done != 0, * if so this interrupt case should be ignored */ if (op->done != 0) { spin_unlock(&ops_lock); goto do_lock_wait; } spin_unlock(&ops_lock); rv = do_lock_cancel(&op->info); switch (rv) { case 0: /* waiter was deleted in user space, answer will never come * remove original request. The original request must be * on recv_list because the answer of do_lock_cancel() * synchronized it. */ spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); rv = -EINTR; break; case -ENOENT: /* cancellation wasn't successful but op should be done */ fallthrough; default: /* internal error doing cancel we need to wait */ goto wait; } log_debug(ls, "%s: wait interrupted %x %llx pid %d", __func__, ls->ls_global_id, (unsigned long long)number, op->info.pid); dlm_release_plock_op(op); goto out; } } else { wait: wait_event(recv_wq, (op->done != 0)); } do_lock_wait: WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (!rv) { if (locks_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_lock); /* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { struct plock_async_data *op_data = op->data; struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(struct file_lock *fl, int result) = NULL; int rv = 0; WARN_ON(!list_empty(&op->list)); /* check if the following 2 are still valid or make a copy */ file = op_data->file; flc = &op_data->flc; fl = op_data->fl; notify = op_data->callback; if (op->info.rv) { notify(fl, op->info.rv); goto out; } /* got fs lock; bookkeep locally as well: */ flc->fl_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. * The filesystem's own lock is the authoritative lock, * so a failure to get the lock locally is not a disaster. * As long as the fs cannot reliably cancel locks (especially * in a low-memory situation), we're better off ignoring * this failure than trying to recover. */ log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p", (unsigned long long)op->info.number, file, fl); } rv = notify(fl, 0); if (rv) { /* XXX: We need to cancel the fs lock here: */ log_print("%s: lock granted after lock request failed; dangling lock!", __func__); goto out; } out: dlm_release_plock_op(op); return rv; } int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; unsigned char fl_flags = fl->fl_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } /* cause the vfs unlock to return ENOENT if lock is not found */ fl->fl_flags |= FL_EXISTS; rv = locks_lock_file_wait(file, fl); if (rv == -ENOENT) { rv = 0; goto out_free; } if (rv < 0) { log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx", rv, (unsigned long long)number); } op->info.optype = DLM_PLOCK_OP_UNLOCK; op->info.pid = fl->fl_pid; op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->lm_grant) op->info.owner = (__u64) fl->fl_pid; else op->info.owner = (__u64)(long) fl->fl_owner; if (fl->fl_flags & FL_CLOSE) { op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); rv = 0; goto out; } send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (rv == -ENOENT) rv = 0; out_free: dlm_release_plock_op(op); out: dlm_put_lockspace(ls); fl->fl_flags = fl_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); /* * NOTE: This implementation can only handle async lock requests as nfs * do it. It cannot handle cancellation of a pending lock request sitting * in wait_event(), but for now only nfs is the only user local kernel * user. */ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_plock_info info; struct plock_op *op; struct dlm_ls *ls; int rv; /* this only works for async request for now and nfs is the only * kernel user right now. */ if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant)) return -EOPNOTSUPP; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; memset(&info, 0, sizeof(info)); info.pid = fl->fl_pid; info.ex = (fl->fl_type == F_WRLCK); info.fsid = ls->ls_global_id; dlm_put_lockspace(ls); info.number = number; info.start = fl->fl_start; info.end = fl->fl_end; info.owner = (__u64)fl->fl_pid; rv = do_lock_cancel(&info); switch (rv) { case 0: spin_lock(&ops_lock); /* lock request to cancel must be on recv_list because * do_lock_cancel() synchronizes it. */ op = plock_lookup_waiter(&info); if (WARN_ON_ONCE(!op)) { spin_unlock(&ops_lock); rv = -ENOLCK; break; } list_del(&op->list); spin_unlock(&ops_lock); WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); op->data->callback(op->data->fl, -EINTR); dlm_release_plock_op(op); rv = -EINTR; break; case -ENOENT: /* if cancel wasn't successful we probably were to late * or it was a non-blocking lock request, so just unlock it. */ rv = dlm_posix_unlock(lockspace, number, file, fl); break; default: break; } return rv; } EXPORT_SYMBOL_GPL(dlm_posix_cancel); int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { struct dlm_ls *ls; struct plock_op *op; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) { rv = -ENOMEM; goto out; } op->info.optype = DLM_PLOCK_OP_GET; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; if (fl->fl_lmops && fl->fl_lmops->lm_grant) op->info.owner = (__u64) fl->fl_pid; else op->info.owner = (__u64)(long) fl->fl_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); WARN_ON(!list_empty(&op->list)); /* info.rv from userspace is 1 for conflict, 0 for no-conflict, -ENOENT if there are no locks on the file */ rv = op->info.rv; fl->fl_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; fl->fl_flags = FL_POSIX; fl->fl_pid = op->info.pid; if (op->info.nodeid != dlm_our_nodeid()) fl->fl_pid = -fl->fl_pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; } dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; } EXPORT_SYMBOL_GPL(dlm_posix_get); /* a read copies out one plock request from the send list */ static ssize_t dev_read(struct file *file, char __user *u, size_t count, loff_t *ppos) { struct dlm_plock_info info; struct plock_op *op = NULL; if (count < sizeof(info)) return -EINVAL; spin_lock(&ops_lock); if (!list_empty(&send_list)) { op = list_first_entry(&send_list, struct plock_op, list); if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); if (!op) return -EAGAIN; trace_dlm_plock_read(&info); /* there is no need to get a reply from userspace for unlocks that were generated by the vfs cleaning up for a close (the process did not make an unlock call). */ if (op->info.flags & DLM_PLOCK_FL_CLOSE) dlm_release_plock_op(op); if (copy_to_user(u, &info, sizeof(info))) return -EFAULT; return sizeof(info); } /* a write copies in one plock result that should match a plock_op on the recv list */ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { struct plock_op *op = NULL, *iter; struct dlm_plock_info info; int do_callback = 0; if (count != sizeof(info)) return -EINVAL; if (copy_from_user(&info, u, sizeof(info))) return -EFAULT; trace_dlm_plock_write(&info); if (check_version(&info)) return -EINVAL; /* * The results for waiting ops (SETLKW) can be returned in any * order, so match all fields to find the op. The results for * non-waiting ops are returned in the order that they were sent * to userspace, so match the result with the first non-waiting op. */ spin_lock(&ops_lock); if (info.wait) { op = plock_lookup_waiter(&info); } else { list_for_each_entry(iter, &recv_list, list) { if (!iter->info.wait && iter->info.fsid == info.fsid) { op = iter; break; } } } if (op) { /* Sanity check that op and info match. */ if (info.wait) WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); else WARN_ON(op->info.number != info.number || op->info.owner != info.owner || op->info.optype != info.optype); list_del_init(&op->list); memcpy(&op->info, &info, sizeof(info)); if (op->data) do_callback = 1; else op->done = 1; } spin_unlock(&ops_lock); if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else pr_debug("%s: no op %x %llx", __func__, info.fsid, (unsigned long long)info.number); return count; } static __poll_t dev_poll(struct file *file, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &send_wq, wait); spin_lock(&ops_lock); if (!list_empty(&send_list)) mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&ops_lock); return mask; } static const struct file_operations dev_fops = { .read = dev_read, .write = dev_write, .poll = dev_poll, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice plock_dev_misc = { .minor = MISC_DYNAMIC_MINOR, .name = DLM_PLOCK_MISC_NAME, .fops = &dev_fops }; int dlm_plock_init(void) { int rv; rv = misc_register(&plock_dev_misc); if (rv) log_print("dlm_plock_init: misc_register failed %d", rv); return rv; } void dlm_plock_exit(void) { misc_deregister(&plock_dev_misc); WARN_ON(!list_empty(&send_list)); WARN_ON(!list_empty(&recv_list)); } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM sctp #if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SCTP_H #include <net/sctp/structs.h> #include <linux/tracepoint.h> TRACE_EVENT(sctp_probe_path, TP_PROTO(struct sctp_transport *sp, const struct sctp_association *asoc), TP_ARGS(sp, asoc), TP_STRUCT__entry( __field(__u64, asoc) __field(__u32, primary) __array(__u8, ipaddr, sizeof(union sctp_addr)) __field(__u32, state) __field(__u32, cwnd) __field(__u32, ssthresh) __field(__u32, flight_size) __field(__u32, partial_bytes_acked) __field(__u32, pathmtu) ), TP_fast_assign( __entry->asoc = (unsigned long)asoc; __entry->primary = (sp == asoc->peer.primary_path); memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr)); __entry->state = sp->state; __entry->cwnd = sp->cwnd; __entry->ssthresh = sp->ssthresh; __entry->flight_size = sp->flight_size; __entry->partial_bytes_acked = sp->partial_bytes_acked; __entry->pathmtu = sp->pathmtu; ), TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u " "flight_size=%u partial_bytes_acked=%u pathmtu=%u", __entry->asoc, __entry->primary ? "(*)" : "", __entry->ipaddr, __entry->state, __entry->cwnd, __entry->ssthresh, __entry->flight_size, __entry->partial_bytes_acked, __entry->pathmtu) ); TRACE_EVENT(sctp_probe, TP_PROTO(const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk), TP_ARGS(ep, asoc, chunk), TP_STRUCT__entry( __field(__u64, asoc) __field(__u32, mark) __field(__u16, bind_port) __field(__u16, peer_port) __field(__u32, pathmtu) __field(__u32, rwnd) __field(__u16, unack_data) ), TP_fast_assign( struct sk_buff *skb = chunk->skb; __entry->asoc = (unsigned long)asoc; __entry->mark = skb->mark; __entry->bind_port = ep->base.bind_addr.port; __entry->peer_port = asoc->peer.port; __entry->pathmtu = asoc->pathmtu; __entry->rwnd = asoc->peer.rwnd; __entry->unack_data = asoc->unack_data; ), TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " "rwnd=%u unack_data=%d", __entry->asoc, __entry->mark, __entry->bind_port, __entry->peer_port, __entry->pathmtu, __entry->rwnd, __entry->unack_data) ); #endif /* _TRACE_SCTP_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 | /* inffast.c -- fast decoding * Copyright (C) 1995-2004 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include <linux/zutil.h> #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifndef ASMINF union uu { unsigned short us; unsigned char b[2]; }; /* Endian independent version */ static inline unsigned short get_unaligned16(const unsigned short *p) { union uu mm; unsigned char *b = (unsigned char *)p; mm.b[0] = b[0]; mm.b[1] = b[1]; return mm.us; } /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is available, an end-of-block is encountered, or a data error is encountered. When large enough input and output buffers are supplied to inflate(), for example, a 16K input buffer and a 64K output buffer, more than 95% of the inflate execution time is spent in this routine. Entry assumptions: state->mode == LEN strm->avail_in >= 6 strm->avail_out >= 258 start >= strm->avail_out state->bits < 8 On return, state->mode is one of: LEN -- ran out of enough output space or enough available input TYPE -- reached end of block code, inflate() to interpret next block BAD -- error in block data Notes: - The maximum input bits used by a length/distance pair is 15 bits for the length code, 5 bits for the length extra, 15 bits for the distance code, and 13 bits for the distance extra. This totals 48 bits, or six bytes. Therefore if strm->avail_in >= 6, then there is enough input to avoid checking for available input while decoding. - The maximum bytes that a single length/distance pair can output is 258 bytes, which is the maximum length that can be coded. inflate_fast() requires strm->avail_out >= 258 for each loop to avoid checking for output space. - @start: inflate()'s starting value for strm->avail_out */ void inflate_fast(z_streamp strm, unsigned start) { struct inflate_state *state; const unsigned char *in; /* local strm->next_in */ const unsigned char *last; /* while in < last, enough input available */ unsigned char *out; /* local strm->next_out */ unsigned char *beg; /* inflate()'s initial strm->next_out */ unsigned char *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned write; /* window write index */ unsigned char *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ code const *lcode; /* local strm->lencode */ code const *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code this; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ unsigned char *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state *)strm->state; in = strm->next_in; last = in + (strm->avail_in - 5); out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT dmax = state->dmax; #endif wsize = state->wsize; whave = state->whave; write = state->write; window = state->window; hold = state->hold; bits = state->bits; lcode = state->lencode; dcode = state->distcode; lmask = (1U << state->lenbits) - 1; dmask = (1U << state->distbits) - 1; /* decode literals and length/distances until end-of-block or not enough input data or output space */ do { if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } this = lcode[hold & lmask]; dolen: op = (unsigned)(this.bits); hold >>= op; bits -= op; op = (unsigned)(this.op); if (op == 0) { /* literal */ *out++ = (unsigned char)(this.val); } else if (op & 16) { /* length base */ len = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } this = dcode[hold & dmask]; dodist: op = (unsigned)(this.bits); hold >>= op; bits -= op; op = (unsigned)(this.op); if (op & 16) { /* distance base */ dist = (unsigned)(this.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif hold >>= op; bits -= op; op = (unsigned)(out - beg); /* max distance in output */ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } from = window; if (write == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } else if (write < op) { /* wrap around window */ from += wsize + write - op; op -= write; if (op < len) { /* some from end of window */ len -= op; do { *out++ = *from++; } while (--op); from = window; if (write < len) { /* some from start of window */ op = write; len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } } else { /* contiguous in window */ from += write - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } else { unsigned short *sout; unsigned long loops; from = out - dist; /* copy direct from output */ /* minimum length is three */ /* Align out addr */ if (!((long)(out - 1) & 1)) { *out++ = *from++; len--; } sout = (unsigned short *)(out); if (dist > 2) { unsigned short *sfrom; sfrom = (unsigned short *)(from); loops = len >> 1; do { if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) *sout++ = *sfrom++; else *sout++ = get_unaligned16(sfrom++); } while (--loops); out = (unsigned char *)sout; from = (unsigned char *)sfrom; } else { /* dist == 1 or dist == 2 */ unsigned short pat16; pat16 = *(sout-1); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ mm.us = pat16; mm.b[0] = mm.b[1]; pat16 = mm.us; } loops = len >> 1; do *sout++ = pat16; while (--loops); out = (unsigned char *)sout; } if (len & 1) *out++ = *from++; } } else if ((op & 64) == 0) { /* 2nd level distance code */ this = dcode[this.val + (hold & ((1U << op) - 1))]; goto dodist; } else { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } } else if ((op & 64) == 0) { /* 2nd level length code */ this = lcode[this.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ state->mode = TYPE; break; } else { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } } while (in < last && out < end); /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ len = bits >> 3; in -= len; bits -= len << 3; hold &= (1U << bits) - 1; /* update state and return */ strm->next_in = in; strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); state->hold = hold; state->bits = bits; return; } /* inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - Three separate decoding do-loops for direct, window, and write == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes - Swapping literal/length else - Swapping window/direct else - Larger unrolled copy loops (three is about right) - Moving len -= 3 statement into middle of loop */ #endif /* !ASMINF */ |
8 8 8 8 8 1 8 8 4 4 4 3 4 4 4 4 4 4 4 9 6 6 3 7 5 9 8 8 3 1 1 2 1 8 8 2 9 8 8 8 8 3 8 373 373 4 4 4 373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 | // SPDX-License-Identifier: GPL-2.0 #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/wext.h> #include "dev.h" #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1) #define get_bucket(x) ((x) >> BUCKET_SPACE) #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1)) #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o)) static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); struct net_device *dev; struct hlist_head *h; unsigned int count = 0, offset = get_offset(*pos); h = &net->dev_index_head[get_bucket(*pos)]; hlist_for_each_entry_rcu(dev, h, index_hlist) { if (++count == offset) return dev; } return NULL; } static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos) { struct net_device *dev; unsigned int bucket; do { dev = dev_from_same_bucket(seq, pos); if (dev) return dev; bucket = get_bucket(*pos) + 1; *pos = set_bucket_offset(bucket, 1); } while (bucket < NETDEV_HASHENTRIES); return NULL; } /* * This is invoked by the /proc filesystem handler to display a device * in detail. */ static void *dev_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); if (!*pos) return SEQ_START_TOKEN; if (get_bucket(*pos) >= NETDEV_HASHENTRIES) return NULL; return dev_from_bucket(seq, pos); } static void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return dev_from_bucket(seq, pos); } static void dev_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu " "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n", dev->name, stats->rx_bytes, stats->rx_packets, stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors, stats->rx_fifo_errors, stats->rx_length_errors + stats->rx_over_errors + stats->rx_crc_errors + stats->rx_frame_errors, stats->rx_compressed, stats->multicast, stats->tx_bytes, stats->tx_packets, stats->tx_errors, stats->tx_dropped, stats->tx_fifo_errors, stats->collisions, stats->tx_carrier_errors + stats->tx_aborted_errors + stats->tx_window_errors + stats->tx_heartbeat_errors, stats->tx_compressed); } /* * Called from the PROCfs module. This now uses the new arbitrary sized * /proc/net interface to create /proc/net/dev */ static int dev_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_puts(seq, "Inter-| Receive " " | Transmit\n" " face |bytes packets errs drop fifo frame " "compressed multicast|bytes packets errs " "drop fifo colls carrier compressed\n"); else dev_seq_printf_stats(seq, v); return 0; } static u32 softnet_input_pkt_queue_len(struct softnet_data *sd) { return skb_queue_len_lockless(&sd->input_pkt_queue); } static u32 softnet_process_queue_len(struct softnet_data *sd) { return skb_queue_len_lockless(&sd->process_queue); } static struct softnet_data *softnet_get_online(loff_t *pos) { struct softnet_data *sd = NULL; while (*pos < nr_cpu_ids) if (cpu_online(*pos)) { sd = &per_cpu(softnet_data, *pos); break; } else ++*pos; return sd; } static void *softnet_seq_start(struct seq_file *seq, loff_t *pos) { return softnet_get_online(pos); } static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return softnet_get_online(pos); } static void softnet_seq_stop(struct seq_file *seq, void *v) { } static int softnet_seq_show(struct seq_file *seq, void *v) { struct softnet_data *sd = v; u32 input_qlen = softnet_input_pkt_queue_len(sd); u32 process_qlen = softnet_process_queue_len(sd); unsigned int flow_limit_count = 0; #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit *fl; rcu_read_lock(); fl = rcu_dereference(sd->flow_limit); if (fl) flow_limit_count = fl->count; rcu_read_unlock(); #endif /* the index is the CPU id owing this sd. Since offline CPUs are not * displayed, it would be othrwise not trivial for the user-space * mapping the data a specific CPU */ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " "%08x %08x\n", sd->processed, sd->dropped, sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ 0, /* was cpu_collision */ sd->received_rps, flow_limit_count, input_qlen + process_qlen, (int)seq->index, input_qlen, process_qlen); return 0; } static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_seq_show, }; static const struct seq_operations softnet_seq_ops = { .start = softnet_seq_start, .next = softnet_seq_next, .stop = softnet_seq_stop, .show = softnet_seq_show, }; static void *ptype_get_idx(struct seq_file *seq, loff_t pos) { struct list_head *ptype_list = NULL; struct packet_type *pt = NULL; struct net_device *dev; loff_t i = 0; int t; for_each_netdev_rcu(seq_file_net(seq), dev) { ptype_list = &dev->ptype_all; list_for_each_entry_rcu(pt, ptype_list, list) { if (i == pos) return pt; ++i; } } list_for_each_entry_rcu(pt, &ptype_all, list) { if (i == pos) return pt; ++i; } for (t = 0; t < PTYPE_HASH_SIZE; t++) { list_for_each_entry_rcu(pt, &ptype_base[t], list) { if (i == pos) return pt; ++i; } } return NULL; } static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net_device *dev; struct packet_type *pt; struct list_head *nxt; int hash; ++*pos; if (v == SEQ_START_TOKEN) return ptype_get_idx(seq, 0); pt = v; nxt = pt->list.next; if (pt->dev) { if (nxt != &pt->dev->ptype_all) goto found; dev = pt->dev; for_each_netdev_continue_rcu(seq_file_net(seq), dev) { if (!list_empty(&dev->ptype_all)) { nxt = dev->ptype_all.next; goto found; } } nxt = ptype_all.next; goto ptype_all; } if (pt->type == htons(ETH_P_ALL)) { ptype_all: if (nxt != &ptype_all) goto found; hash = 0; nxt = ptype_base[0].next; } else hash = ntohs(pt->type) & PTYPE_HASH_MASK; while (nxt == &ptype_base[hash]) { if (++hash >= PTYPE_HASH_SIZE) return NULL; nxt = ptype_base[hash].next; } found: return list_entry(nxt, struct packet_type, list); } static void ptype_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static int ptype_seq_show(struct seq_file *seq, void *v) { struct packet_type *pt = v; if (v == SEQ_START_TOKEN) seq_puts(seq, "Type Device Function\n"); else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { if (pt->type == htons(ETH_P_ALL)) seq_puts(seq, "ALL "); else seq_printf(seq, "%04x", ntohs(pt->type)); seq_printf(seq, " %-8s %ps\n", pt->dev ? pt->dev->name : "", pt->func); } return 0; } static const struct seq_operations ptype_seq_ops = { .start = ptype_seq_start, .next = ptype_seq_next, .stop = ptype_seq_stop, .show = ptype_seq_show, }; static int __net_init dev_proc_net_init(struct net *net) { int rc = -ENOMEM; if (!proc_create_net("dev", 0444, net->proc_net, &dev_seq_ops, sizeof(struct seq_net_private))) goto out; if (!proc_create_seq("softnet_stat", 0444, net->proc_net, &softnet_seq_ops)) goto out_dev; if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, sizeof(struct seq_net_private))) goto out_softnet; if (wext_proc_init(net)) goto out_ptype; rc = 0; out: return rc; out_ptype: remove_proc_entry("ptype", net->proc_net); out_softnet: remove_proc_entry("softnet_stat", net->proc_net); out_dev: remove_proc_entry("dev", net->proc_net); goto out; } static void __net_exit dev_proc_net_exit(struct net *net) { wext_proc_exit(net); remove_proc_entry("ptype", net->proc_net); remove_proc_entry("softnet_stat", net->proc_net); remove_proc_entry("dev", net->proc_net); } static struct pernet_operations __net_initdata dev_proc_ops = { .init = dev_proc_net_init, .exit = dev_proc_net_exit, }; static int dev_mc_seq_show(struct seq_file *seq, void *v) { struct netdev_hw_addr *ha; struct net_device *dev = v; if (v == SEQ_START_TOKEN) return 0; netif_addr_lock_bh(dev); netdev_for_each_mc_addr(ha, dev) { seq_printf(seq, "%-4d %-15s %-5d %-5d %*phN\n", dev->ifindex, dev->name, ha->refcount, ha->global_use, (int)dev->addr_len, ha->addr); } netif_addr_unlock_bh(dev); return 0; } static const struct seq_operations dev_mc_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = dev_mc_seq_show, }; static int __net_init dev_mc_net_init(struct net *net) { if (!proc_create_net("dev_mcast", 0, net->proc_net, &dev_mc_seq_ops, sizeof(struct seq_net_private))) return -ENOMEM; return 0; } static void __net_exit dev_mc_net_exit(struct net *net) { remove_proc_entry("dev_mcast", net->proc_net); } static struct pernet_operations __net_initdata dev_mc_net_ops = { .init = dev_mc_net_init, .exit = dev_mc_net_exit, }; int __init dev_proc_init(void) { int ret = register_pernet_subsys(&dev_proc_ops); if (!ret) return register_pernet_subsys(&dev_mc_net_ops); return ret; } |
59 1253 3 11 1215 1207 1218 115 25 7 13 83 20 25 4 626 300 81 21 536 534 480 3 3 164 165 581 58 604 101 292 4 569 539 77 270 349 169 451 455 163 399 271 594 23 2 623 624 510 157 6 607 58 609 57 577 5 58 619 5 620 1 621 622 621 621 604 20 51 573 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/attr.c * * Copyright (C) 1991, 1992 Linus Torvalds * changes by Thomas Schoebel-Theuer */ #include <linux/export.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/sched/signal.h> #include <linux/capability.h> #include <linux/fsnotify.h> #include <linux/fcntl.h> #include <linux/filelock.h> #include <linux/security.h> #include <linux/evm.h> #include <linux/ima.h> #include "internal.h" /** * setattr_should_drop_sgid - determine whether the setgid bit needs to be * removed * @idmap: idmap of the mount @inode was found from * @inode: inode to check * * This function determines whether the setgid bit needs to be removed. * We retain backwards compatibility and require setgid bit to be removed * unconditionally if S_IXGRP is set. Otherwise we have the exact same * requirements as setattr_prepare() and setattr_copy(). * * Return: ATTR_KILL_SGID if setgid bit needs to be removed, 0 otherwise. */ int setattr_should_drop_sgid(struct mnt_idmap *idmap, const struct inode *inode) { umode_t mode = inode->i_mode; if (!(mode & S_ISGID)) return 0; if (mode & S_IXGRP) return ATTR_KILL_SGID; if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode))) return ATTR_KILL_SGID; return 0; } EXPORT_SYMBOL(setattr_should_drop_sgid); /** * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to * be dropped * @idmap: idmap of the mount @inode was found from * @inode: inode to check * * This function determines whether the set{g,u}id bits need to be removed. * If the setuid bit needs to be removed ATTR_KILL_SUID is returned. If the * setgid bit needs to be removed ATTR_KILL_SGID is returned. If both * set{g,u}id bits need to be removed the corresponding mask of both flags is * returned. * * Return: A mask of ATTR_KILL_S{G,U}ID indicating which - if any - setid bits * to remove, 0 otherwise. */ int setattr_should_drop_suidgid(struct mnt_idmap *idmap, struct inode *inode) { umode_t mode = inode->i_mode; int kill = 0; /* suid always must be killed */ if (unlikely(mode & S_ISUID)) kill = ATTR_KILL_SUID; kill |= setattr_should_drop_sgid(idmap, inode); if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) return kill; return 0; } EXPORT_SYMBOL(setattr_should_drop_suidgid); /** * chown_ok - verify permissions to chown inode * @idmap: idmap of the mount @inode was found from * @inode: inode to check permissions on * @ia_vfsuid: uid to chown @inode to * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before checking * permissions. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. */ static bool chown_ok(struct mnt_idmap *idmap, const struct inode *inode, vfsuid_t ia_vfsuid) { vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); if (vfsuid_eq_kuid(vfsuid, current_fsuid()) && vfsuid_eq(ia_vfsuid, vfsuid)) return true; if (capable_wrt_inode_uidgid(idmap, inode, CAP_CHOWN)) return true; if (!vfsuid_valid(vfsuid) && ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN)) return true; return false; } /** * chgrp_ok - verify permissions to chgrp inode * @idmap: idmap of the mount @inode was found from * @inode: inode to check permissions on * @ia_vfsgid: gid to chown @inode to * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before checking * permissions. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. */ static bool chgrp_ok(struct mnt_idmap *idmap, const struct inode *inode, vfsgid_t ia_vfsgid) { vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); if (vfsuid_eq_kuid(vfsuid, current_fsuid())) { if (vfsgid_eq(ia_vfsgid, vfsgid)) return true; if (vfsgid_in_group_p(ia_vfsgid)) return true; } if (capable_wrt_inode_uidgid(idmap, inode, CAP_CHOWN)) return true; if (!vfsgid_valid(vfsgid) && ns_capable(inode->i_sb->s_user_ns, CAP_CHOWN)) return true; return false; } /** * setattr_prepare - check if attribute changes to a dentry are allowed * @idmap: idmap of the mount the inode was found from * @dentry: dentry to check * @attr: attributes to change * * Check if we are allowed to change the attributes contained in @attr * in the given dentry. This includes the normal unix access permission * checks, as well as checks for rlimits and others. The function also clears * SGID bit from mode if user is not allowed to set it. Also file capabilities * and IMA extended attributes are cleared if ATTR_KILL_PRIV is set. * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before checking * permissions. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. * * Should be called as the first thing in ->setattr implementations, * possibly after taking additional locks. */ int setattr_prepare(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); unsigned int ia_valid = attr->ia_valid; /* * First check size constraints. These can't be overriden using * ATTR_FORCE. */ if (ia_valid & ATTR_SIZE) { int error = inode_newsize_ok(inode, attr->ia_size); if (error) return error; } /* If force is set do it anyway. */ if (ia_valid & ATTR_FORCE) goto kill_priv; /* Make sure a caller can chown. */ if ((ia_valid & ATTR_UID) && !chown_ok(idmap, inode, attr->ia_vfsuid)) return -EPERM; /* Make sure caller can chgrp. */ if ((ia_valid & ATTR_GID) && !chgrp_ok(idmap, inode, attr->ia_vfsgid)) return -EPERM; /* Make sure a caller can chmod. */ if (ia_valid & ATTR_MODE) { vfsgid_t vfsgid; if (!inode_owner_or_capable(idmap, inode)) return -EPERM; if (ia_valid & ATTR_GID) vfsgid = attr->ia_vfsgid; else vfsgid = i_gid_into_vfsgid(idmap, inode); /* Also check the setgid bit! */ if (!in_group_or_capable(idmap, inode, vfsgid)) attr->ia_mode &= ~S_ISGID; } /* Check for setting the inode time. */ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)) { if (!inode_owner_or_capable(idmap, inode)) return -EPERM; } kill_priv: /* User has permission for the change */ if (ia_valid & ATTR_KILL_PRIV) { int error; error = security_inode_killpriv(idmap, dentry); if (error) return error; } return 0; } EXPORT_SYMBOL(setattr_prepare); /** * inode_newsize_ok - may this inode be truncated to a given size * @inode: the inode to be truncated * @offset: the new size to assign to the inode * * inode_newsize_ok must be called with i_mutex held. * * inode_newsize_ok will check filesystem limits and ulimits to check that the * new inode size is within limits. inode_newsize_ok will also send SIGXFSZ * when necessary. Caller must not proceed with inode size change if failure is * returned. @inode must be a file (not directory), with appropriate * permissions to allow truncate (inode_newsize_ok does NOT check these * conditions). * * Return: 0 on success, -ve errno on failure */ int inode_newsize_ok(const struct inode *inode, loff_t offset) { if (offset < 0) return -EINVAL; if (inode->i_size < offset) { unsigned long limit; limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY && offset > limit) goto out_sig; if (offset > inode->i_sb->s_maxbytes) goto out_big; } else { /* * truncation of in-use swapfiles is disallowed - it would * cause subsequent swapout to scribble on the now-freed * blocks. */ if (IS_SWAPFILE(inode)) return -ETXTBSY; } return 0; out_sig: send_sig(SIGXFSZ, current, 0); out_big: return -EFBIG; } EXPORT_SYMBOL(inode_newsize_ok); /** * setattr_copy - copy simple metadata updates into the generic inode * @idmap: idmap of the mount the inode was found from * @inode: the inode to be updated * @attr: the new attributes * * setattr_copy must be called with i_mutex held. * * setattr_copy updates the inode's metadata with that specified * in attr on idmapped mounts. Necessary permission checks to determine * whether or not the S_ISGID property needs to be removed are performed with * the correct idmapped mount permission helpers. * Noticeably missing is inode size update, which is more complex * as it requires pagecache updates. * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before checking * permissions. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. * * The inode is not marked as dirty after this operation. The rationale is * that for "simple" filesystems, the struct inode is the inode storage. * The caller is free to mark the inode dirty afterwards if needed. */ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; i_uid_update(idmap, attr, inode); i_gid_update(idmap, attr, inode); if (ia_valid & ATTR_ATIME) inode_set_atime_to_ts(inode, attr->ia_atime); if (ia_valid & ATTR_MTIME) inode_set_mtime_to_ts(inode, attr->ia_mtime); if (ia_valid & ATTR_CTIME) inode_set_ctime_to_ts(inode, attr->ia_ctime); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode))) mode &= ~S_ISGID; inode->i_mode = mode; } } EXPORT_SYMBOL(setattr_copy); int may_setattr(struct mnt_idmap *idmap, struct inode *inode, unsigned int ia_valid) { int error; if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) { if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) return -EPERM; } /* * If utimes(2) and friends are called with times == NULL (or both * times are UTIME_NOW), then we need to check for write permission */ if (ia_valid & ATTR_TOUCH) { if (IS_IMMUTABLE(inode)) return -EPERM; if (!inode_owner_or_capable(idmap, inode)) { error = inode_permission(idmap, inode, MAY_WRITE); if (error) return error; } } return 0; } EXPORT_SYMBOL(may_setattr); /** * notify_change - modify attributes of a filesytem object * @idmap: idmap of the mount the inode was found from * @dentry: object affected * @attr: new attributes * @delegated_inode: returns inode, if the inode is delegated * * The caller must hold the i_mutex on the affected object. * * If notify_change discovers a delegation in need of breaking, * it will return -EWOULDBLOCK and return a reference to the inode in * delegated_inode. The caller should then break the delegation and * retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. Also, passing NULL is fine for callers holding * the file open for write, as there can be no conflicting delegation in * that case. * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before checking * permissions. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. */ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr, struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; umode_t mode = inode->i_mode; int error; struct timespec64 now; unsigned int ia_valid = attr->ia_valid; WARN_ON_ONCE(!inode_is_locked(inode)); error = may_setattr(idmap, inode, ia_valid); if (error) return error; if ((ia_valid & ATTR_MODE)) { /* * Don't allow changing the mode of symlinks: * * (1) The vfs doesn't take the mode of symlinks into account * during permission checking. * (2) This has never worked correctly. Most major filesystems * did return EOPNOTSUPP due to interactions with POSIX ACLs * but did still updated the mode of the symlink. * This inconsistency led system call wrapper providers such * as libc to block changing the mode of symlinks with * EOPNOTSUPP already. * (3) To even do this in the first place one would have to use * specific file descriptors and quite some effort. */ if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; /* Flag setting protected by i_mutex */ if (is_sxid(attr->ia_mode)) inode->i_flags &= ~S_NOSEC; } now = current_time(inode); attr->ia_ctime = now; if (!(ia_valid & ATTR_ATIME_SET)) attr->ia_atime = now; else attr->ia_atime = timestamp_truncate(attr->ia_atime, inode); if (!(ia_valid & ATTR_MTIME_SET)) attr->ia_mtime = now; else attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode); if (ia_valid & ATTR_KILL_PRIV) { error = security_inode_need_killpriv(dentry); if (error < 0) return error; if (error == 0) ia_valid = attr->ia_valid &= ~ATTR_KILL_PRIV; } /* * We now pass ATTR_KILL_S*ID to the lower level setattr function so * that the function has the ability to reinterpret a mode change * that's due to these bits. This adds an implicit restriction that * no function will ever call notify_change with both ATTR_MODE and * ATTR_KILL_S*ID set. */ if ((ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) && (ia_valid & ATTR_MODE)) BUG(); if (ia_valid & ATTR_KILL_SUID) { if (mode & S_ISUID) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = (inode->i_mode & ~S_ISUID); } } if (ia_valid & ATTR_KILL_SGID) { if (mode & S_ISGID) { if (!(ia_valid & ATTR_MODE)) { ia_valid = attr->ia_valid |= ATTR_MODE; attr->ia_mode = inode->i_mode; } attr->ia_mode &= ~S_ISGID; } } if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID))) return 0; /* * Verify that uid/gid changes are valid in the target * namespace of the superblock. */ if (ia_valid & ATTR_UID && !vfsuid_has_fsmapping(idmap, inode->i_sb->s_user_ns, attr->ia_vfsuid)) return -EOVERFLOW; if (ia_valid & ATTR_GID && !vfsgid_has_fsmapping(idmap, inode->i_sb->s_user_ns, attr->ia_vfsgid)) return -EOVERFLOW; /* Don't allow modifications of files with invalid uids or * gids unless those uids & gids are being made valid. */ if (!(ia_valid & ATTR_UID) && !vfsuid_valid(i_uid_into_vfsuid(idmap, inode))) return -EOVERFLOW; if (!(ia_valid & ATTR_GID) && !vfsgid_valid(i_gid_into_vfsgid(idmap, inode))) return -EOVERFLOW; error = security_inode_setattr(idmap, dentry, attr); if (error) return error; error = try_break_deleg(inode, delegated_inode); if (error) return error; if (inode->i_op->setattr) error = inode->i_op->setattr(idmap, dentry, attr); else error = simple_setattr(idmap, dentry, attr); if (!error) { fsnotify_change(dentry, ia_valid); ima_inode_post_setattr(idmap, dentry); evm_inode_post_setattr(dentry, ia_valid); } return error; } EXPORT_SYMBOL(notify_change); |
3 2 3 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 | /* * linux/fs/nls/nls_iso8859-7.c * * Charset iso8859-7 translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00*/ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10*/ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20*/ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30*/ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40*/ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50*/ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60*/ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70*/ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80*/ 0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087, 0x0088, 0x0089, 0x008a, 0x008b, 0x008c, 0x008d, 0x008e, 0x008f, /* 0x90*/ 0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097, 0x0098, 0x0099, 0x009a, 0x009b, 0x009c, 0x009d, 0x009e, 0x009f, /* 0xa0*/ 0x00a0, 0x02bd, 0x02bc, 0x00a3, 0x0000, 0x0000, 0x00a6, 0x00a7, 0x00a8, 0x00a9, 0x0000, 0x00ab, 0x00ac, 0x00ad, 0x0000, 0x2015, /* 0xb0*/ 0x00b0, 0x00b1, 0x00b2, 0x00b3, 0x0384, 0x0385, 0x0386, 0x00b7, 0x0388, 0x0389, 0x038a, 0x00bb, 0x038c, 0x00bd, 0x038e, 0x038f, /* 0xc0*/ 0x0390, 0x0391, 0x0392, 0x0393, 0x0394, 0x0395, 0x0396, 0x0397, 0x0398, 0x0399, 0x039a, 0x039b, 0x039c, 0x039d, 0x039e, 0x039f, /* 0xd0*/ 0x03a0, 0x03a1, 0x0000, 0x03a3, 0x03a4, 0x03a5, 0x03a6, 0x03a7, 0x03a8, 0x03a9, 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03ae, 0x03af, /* 0xe0*/ 0x03b0, 0x03b1, 0x03b2, 0x03b3, 0x03b4, 0x03b5, 0x03b6, 0x03b7, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bc, 0x03bd, 0x03be, 0x03bf, /* 0xf0*/ 0x03c0, 0x03c1, 0x03c2, 0x03c3, 0x03c4, 0x03c5, 0x03c6, 0x03c7, 0x03c8, 0x03c9, 0x03ca, 0x03cb, 0x03cc, 0x03cd, 0x03ce, 0x0000, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0x00, 0x00, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0x00, 0x00, 0xb7, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xbb, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0xa2, 0xa1, 0x00, 0x00, /* 0xb8-0xbf */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0x00, /* 0x80-0x87 */ 0xb8, 0xb9, 0xba, 0x00, 0xbc, 0x00, 0xbe, 0xbf, /* 0x88-0x8f */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x90-0x97 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x98-0x9f */ 0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xa0-0xa7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xa8-0xaf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xb0-0xb7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xb8-0xbf */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xc0-0xc7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xc8-0xcf */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, /* 0x10-0x17 */ }; static const unsigned char *const page_uni2charset[256] = { page00, NULL, page02, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */ 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xdc, 0xb7, /* 0xb0-0xb7 */ 0xdd, 0xde, 0xdf, 0xbb, 0xfc, 0xbd, 0xfd, 0xfe, /* 0xb8-0xbf */ 0xc0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */ 0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */ 0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */ 0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */ 0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */ 0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xb6, 0xb8, 0xb9, 0xba, /* 0xd8-0xdf */ 0xe0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */ 0xd0, 0xd1, 0xd3, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */ 0xd8, 0xd9, 0xda, 0xdb, 0xbc, 0xbe, 0xbf, 0x00, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "iso8859-7", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_iso8859_7(void) { return register_nls(&table); } static void __exit exit_nls_iso8859_7(void) { unregister_nls(&table); } module_init(init_nls_iso8859_7) module_exit(exit_nls_iso8859_7) MODULE_LICENSE("Dual BSD/GPL"); |
3060 2399 430 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | // SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/spinlock.h> #include <linux/atomic.h> /* * This is an implementation of the notion of "decrement a * reference count, and return locked if it decremented to zero". * * NOTE NOTE NOTE! This is _not_ equivalent to * * if (atomic_dec_and_test(&atomic)) { * spin_lock(&lock); * return 1; * } * return 0; * * because the spin-lock and the decrement must be * "atomic". */ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; spin_unlock(lock); return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock); int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, unsigned long *flags) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ spin_lock_irqsave(lock, *flags); if (atomic_dec_and_test(atomic)) return 1; spin_unlock_irqrestore(lock, *flags); return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ raw_spin_lock(lock); if (atomic_dec_and_test(atomic)) return 1; raw_spin_unlock(lock); return 0; } EXPORT_SYMBOL(_atomic_dec_and_raw_lock); int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, unsigned long *flags) { /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ if (atomic_add_unless(atomic, -1, 1)) return 0; /* Otherwise do it the slow way */ raw_spin_lock_irqsave(lock, *flags); if (atomic_dec_and_test(atomic)) return 1; raw_spin_unlock_irqrestore(lock, *flags); return 0; } EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave); |
1984 2019 1811 1807 2 1 3 2 3 2 9 1 1807 1818 1815 7 1806 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/filesystems.c * * Copyright (C) 1991, 1992 Linus Torvalds * * table of configured filesystems */ #include <linux/syscalls.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/fs_parser.h> /* * Handling of filesystem drivers list. * Rules: * Inclusion to/removals from/scanning of list are protected by spinlock. * During the unload module must call unregister_filesystem(). * We can access the fields of list element if: * 1) spinlock is held or * 2) we hold the reference to the module. * The latter can be guaranteed by call of try_module_get(); if it * returned 0 we must skip the element, otherwise we got the reference. * Once the reference is obtained we can drop the spinlock. */ static struct file_system_type *file_systems; static DEFINE_RWLOCK(file_systems_lock); /* WARNING: This can be used only if we _already_ own a reference */ struct file_system_type *get_filesystem(struct file_system_type *fs) { __module_get(fs->owner); return fs; } void put_filesystem(struct file_system_type *fs) { module_put(fs->owner); } static struct file_system_type **find_filesystem(const char *name, unsigned len) { struct file_system_type **p; for (p = &file_systems; *p; p = &(*p)->next) if (strncmp((*p)->name, name, len) == 0 && !(*p)->name[len]) break; return p; } /** * register_filesystem - register a new filesystem * @fs: the file system structure * * Adds the file system passed to the list of file systems the kernel * is aware of for mount and other syscalls. Returns 0 on success, * or a negative errno code on an error. * * The &struct file_system_type that is passed is linked into the kernel * structures and must not be freed until the file system has been * unregistered. */ int register_filesystem(struct file_system_type * fs) { int res = 0; struct file_system_type ** p; if (fs->parameters && !fs_validate_description(fs->name, fs->parameters)) return -EINVAL; BUG_ON(strchr(fs->name, '.')); if (fs->next) return -EBUSY; write_lock(&file_systems_lock); p = find_filesystem(fs->name, strlen(fs->name)); if (*p) res = -EBUSY; else *p = fs; write_unlock(&file_systems_lock); return res; } EXPORT_SYMBOL(register_filesystem); /** * unregister_filesystem - unregister a file system * @fs: filesystem to unregister * * Remove a file system that was previously successfully registered * with the kernel. An error is returned if the file system is not found. * Zero is returned on a success. * * Once this function has returned the &struct file_system_type structure * may be freed or reused. */ int unregister_filesystem(struct file_system_type * fs) { struct file_system_type ** tmp; write_lock(&file_systems_lock); tmp = &file_systems; while (*tmp) { if (fs == *tmp) { *tmp = fs->next; fs->next = NULL; write_unlock(&file_systems_lock); synchronize_rcu(); return 0; } tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); return -EINVAL; } EXPORT_SYMBOL(unregister_filesystem); #ifdef CONFIG_SYSFS_SYSCALL static int fs_index(const char __user * __name) { struct file_system_type * tmp; struct filename *name; int err, index; name = getname(__name); err = PTR_ERR(name); if (IS_ERR(name)) return err; err = -EINVAL; read_lock(&file_systems_lock); for (tmp=file_systems, index=0 ; tmp ; tmp=tmp->next, index++) { if (strcmp(tmp->name, name->name) == 0) { err = index; break; } } read_unlock(&file_systems_lock); putname(name); return err; } static int fs_name(unsigned int index, char __user * buf) { struct file_system_type * tmp; int len, res; read_lock(&file_systems_lock); for (tmp = file_systems; tmp; tmp = tmp->next, index--) if (index <= 0 && try_module_get(tmp->owner)) break; read_unlock(&file_systems_lock); if (!tmp) return -EINVAL; /* OK, we got the reference, so we can safely block */ len = strlen(tmp->name) + 1; res = copy_to_user(buf, tmp->name, len) ? -EFAULT : 0; put_filesystem(tmp); return res; } static int fs_maxindex(void) { struct file_system_type * tmp; int index; read_lock(&file_systems_lock); for (tmp = file_systems, index = 0 ; tmp ; tmp = tmp->next, index++) ; read_unlock(&file_systems_lock); return index; } /* * Whee.. Weird sysv syscall. */ SYSCALL_DEFINE3(sysfs, int, option, unsigned long, arg1, unsigned long, arg2) { int retval = -EINVAL; switch (option) { case 1: retval = fs_index((const char __user *) arg1); break; case 2: retval = fs_name(arg1, (char __user *) arg2); break; case 3: retval = fs_maxindex(); break; } return retval; } #endif int __init list_bdev_fs_names(char *buf, size_t size) { struct file_system_type *p; size_t len; int count = 0; read_lock(&file_systems_lock); for (p = file_systems; p; p = p->next) { if (!(p->fs_flags & FS_REQUIRES_DEV)) continue; len = strlen(p->name) + 1; if (len > size) { pr_warn("%s: truncating file system list\n", __func__); break; } memcpy(buf, p->name, len); buf += len; size -= len; count++; } read_unlock(&file_systems_lock); return count; } #ifdef CONFIG_PROC_FS static int filesystems_proc_show(struct seq_file *m, void *v) { struct file_system_type * tmp; read_lock(&file_systems_lock); tmp = file_systems; while (tmp) { seq_printf(m, "%s\t%s\n", (tmp->fs_flags & FS_REQUIRES_DEV) ? "" : "nodev", tmp->name); tmp = tmp->next; } read_unlock(&file_systems_lock); return 0; } static int __init proc_filesystems_init(void) { proc_create_single("filesystems", 0, NULL, filesystems_proc_show); return 0; } module_init(proc_filesystems_init); #endif static struct file_system_type *__get_fs_type(const char *name, int len) { struct file_system_type *fs; read_lock(&file_systems_lock); fs = *(find_filesystem(name, len)); if (fs && !try_module_get(fs->owner)) fs = NULL; read_unlock(&file_systems_lock); return fs; } struct file_system_type *get_fs_type(const char *name) { struct file_system_type *fs; const char *dot = strchr(name, '.'); int len = dot ? dot - name : strlen(name); fs = __get_fs_type(name, len); if (!fs && (request_module("fs-%.*s", len, name) == 0)) { fs = __get_fs_type(name, len); if (!fs) pr_warn_once("request_module fs-%.*s succeeded, but still no fs?\n", len, name); } if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) { put_filesystem(fs); fs = NULL; } return fs; } EXPORT_SYMBOL(get_fs_type); |
1 12 8 1 1 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 | // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 ethtool hooks for cfg80211 * * Copied from cfg.c - originally * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2014 Intel Corporation (Author: Johannes Berg) * Copyright (C) 2018, 2022-2023 Intel Corporation */ #include <linux/types.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "sta_info.h" #include "driver-ops.h" static int ieee80211_set_ringparam(struct net_device *dev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); int ret; if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0) return -EINVAL; wiphy_lock(local->hw.wiphy); ret = drv_set_ringparam(local, rp->tx_pending, rp->rx_pending); wiphy_unlock(local->hw.wiphy); return ret; } static void ieee80211_get_ringparam(struct net_device *dev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, struct netlink_ext_ack *extack) { struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy); memset(rp, 0, sizeof(*rp)); wiphy_lock(local->hw.wiphy); drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending, &rp->rx_pending, &rp->rx_max_pending); wiphy_unlock(local->hw.wiphy); } static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = { "rx_packets", "rx_bytes", "rx_duplicates", "rx_fragments", "rx_dropped", "tx_packets", "tx_bytes", "tx_filtered", "tx_retry_failed", "tx_retries", "sta_state", "txrate", "rxrate", "signal", "channel", "noise", "ch_time", "ch_time_busy", "ch_time_ext_busy", "ch_time_rx", "ch_time_tx" }; #define STA_STATS_LEN ARRAY_SIZE(ieee80211_gstrings_sta_stats) static int ieee80211_get_sset_count(struct net_device *dev, int sset) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int rv = 0; if (sset == ETH_SS_STATS) rv += STA_STATS_LEN; rv += drv_get_et_sset_count(sdata, sset); if (rv == 0) return -EOPNOTSUPP; return rv; } static void ieee80211_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; struct sta_info *sta; struct ieee80211_local *local = sdata->local; struct station_info sinfo; struct survey_info survey; int i, q; #define STA_STATS_SURVEY_LEN 7 memset(data, 0, sizeof(u64) * STA_STATS_LEN); #define ADD_STA_STATS(sta) \ do { \ data[i++] += sinfo.rx_packets; \ data[i++] += sinfo.rx_bytes; \ data[i++] += (sta)->rx_stats.num_duplicates; \ data[i++] += (sta)->rx_stats.fragments; \ data[i++] += sinfo.rx_dropped_misc; \ \ data[i++] += sinfo.tx_packets; \ data[i++] += sinfo.tx_bytes; \ data[i++] += (sta)->status_stats.filtered; \ data[i++] += sinfo.tx_failed; \ data[i++] += sinfo.tx_retries; \ } while (0) /* For Managed stations, find the single station based on BSSID * and use that. For interface types, iterate through all available * stations and add stats for any station that is assigned to this * network device. */ wiphy_lock(local->hw.wiphy); if (sdata->vif.type == NL80211_IFTYPE_STATION) { sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid); if (!(sta && !WARN_ON(sta->sdata->dev != dev))) goto do_survey; memset(&sinfo, 0, sizeof(sinfo)); sta_set_sinfo(sta, &sinfo, false); i = 0; ADD_STA_STATS(&sta->deflink); data[i++] = sta->sta_state; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.txrate); i++; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_RX_BITRATE)) data[i] = 100000ULL * cfg80211_calculate_bitrate(&sinfo.rxrate); i++; if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG)) data[i] = (u8)sinfo.signal_avg; i++; } else { list_for_each_entry(sta, &local->sta_list, list) { /* Make sure this station belongs to the proper dev */ if (sta->sdata->dev != dev) continue; memset(&sinfo, 0, sizeof(sinfo)); sta_set_sinfo(sta, &sinfo, false); i = 0; ADD_STA_STATS(&sta->deflink); } } do_survey: i = STA_STATS_LEN - STA_STATS_SURVEY_LEN; /* Get survey stats for current channel */ survey.filled = 0; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (chanctx_conf) channel = chanctx_conf->def.chan; else channel = NULL; rcu_read_unlock(); if (channel) { q = 0; do { survey.filled = 0; if (drv_get_survey(local, q, &survey) != 0) { survey.filled = 0; break; } q++; } while (channel != survey.channel); } if (survey.filled) data[i++] = survey.channel->center_freq; else data[i++] = 0; if (survey.filled & SURVEY_INFO_NOISE_DBM) data[i++] = (u8)survey.noise; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME) data[i++] = survey.time; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_BUSY) data[i++] = survey.time_busy; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_EXT_BUSY) data[i++] = survey.time_ext_busy; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_RX) data[i++] = survey.time_rx; else data[i++] = -1LL; if (survey.filled & SURVEY_INFO_TIME_TX) data[i++] = survey.time_tx; else data[i++] = -1LL; if (WARN_ON(i != STA_STATS_LEN)) { wiphy_unlock(local->hw.wiphy); return; } drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN])); wiphy_unlock(local->hw.wiphy); } static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int sz_sta_stats = 0; if (sset == ETH_SS_STATS) { sz_sta_stats = sizeof(ieee80211_gstrings_sta_stats); memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats); } drv_get_et_strings(sdata, sset, &(data[sz_sta_stats])); } static int ieee80211_get_regs_len(struct net_device *dev) { return 0; } static void ieee80211_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *data) { struct wireless_dev *wdev = dev->ieee80211_ptr; regs->version = wdev->wiphy->hw_version; regs->len = 0; } const struct ethtool_ops ieee80211_ethtool_ops = { .get_drvinfo = cfg80211_get_drvinfo, .get_regs_len = ieee80211_get_regs_len, .get_regs = ieee80211_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ieee80211_get_ringparam, .set_ringparam = ieee80211_set_ringparam, .get_strings = ieee80211_get_strings, .get_ethtool_stats = ieee80211_get_stats, .get_sset_count = ieee80211_get_sset_count, }; |
7 2 7 2 46 40 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 | // SPDX-License-Identifier: GPL-2.0-only /* * IEEE 802.1Q GARP VLAN Registration Protocol (GVRP) * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> */ #include <linux/types.h> #include <linux/if_vlan.h> #include <net/garp.h> #include "vlan.h" #define GARP_GVRP_ADDRESS { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x21 } enum gvrp_attributes { GVRP_ATTR_INVALID, GVRP_ATTR_VID, __GVRP_ATTR_MAX }; #define GVRP_ATTR_MAX (__GVRP_ATTR_MAX - 1) static struct garp_application vlan_gvrp_app __read_mostly = { .proto.group_address = GARP_GVRP_ADDRESS, .maxattr = GVRP_ATTR_MAX, .type = GARP_APPLICATION_GVRP, }; int vlan_gvrp_request_join(const struct net_device *dev) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); if (vlan->vlan_proto != htons(ETH_P_8021Q)) return 0; return garp_request_join(vlan->real_dev, &vlan_gvrp_app, &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); } void vlan_gvrp_request_leave(const struct net_device *dev) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); __be16 vlan_id = htons(vlan->vlan_id); if (vlan->vlan_proto != htons(ETH_P_8021Q)) return; garp_request_leave(vlan->real_dev, &vlan_gvrp_app, &vlan_id, sizeof(vlan_id), GVRP_ATTR_VID); } int vlan_gvrp_init_applicant(struct net_device *dev) { return garp_init_applicant(dev, &vlan_gvrp_app); } void vlan_gvrp_uninit_applicant(struct net_device *dev) { garp_uninit_applicant(dev, &vlan_gvrp_app); } int __init vlan_gvrp_init(void) { return garp_register_application(&vlan_gvrp_app); } void vlan_gvrp_uninit(void) { garp_unregister_application(&vlan_gvrp_app); } |
23 13 114 2 14 8 8 6 1 1 1 1 1 6 3 10 1 2 6 9 9 1 9 9 9 9 8 1 15 2 3 10 3 1 37 37 4 7 1 4 2 1 2 2 3 1 2 7 1 1 1 3 4 12 3 1 4 1 1 7 2 1 4 3 1 5 3 2 3 4 1 1 1 1 3 1 1 1 2 1 2 1 1 19 1 1 1 2 2 1 11 2 6 3 1 2 9 9 1 1 5 1 1 1 2 160 2 1 3 158 5 4 4 2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 | /* * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/completion.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/module.h> #include <linux/nsproxy.h> #include <linux/nospec.h> #include <rdma/rdma_user_cm.h> #include <rdma/ib_marshall.h> #include <rdma/rdma_cm.h> #include <rdma/rdma_cm_ib.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> #include <rdma/ib_cm.h> #include <rdma/rdma_netlink.h> #include "core_priv.h" MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); MODULE_LICENSE("Dual BSD/GPL"); static unsigned int max_backlog = 1024; static struct ctl_table_header *ucma_ctl_table_hdr; static struct ctl_table ucma_ctl_table[] = { { .procname = "max_backlog", .data = &max_backlog, .maxlen = sizeof max_backlog, .mode = 0644, .proc_handler = proc_dointvec, }, }; struct ucma_file { struct mutex mut; struct file *filp; struct list_head ctx_list; struct list_head event_list; wait_queue_head_t poll_wait; }; struct ucma_context { u32 id; struct completion comp; refcount_t ref; int events_reported; atomic_t backlog; struct ucma_file *file; struct rdma_cm_id *cm_id; struct mutex mutex; u64 uid; struct list_head list; struct list_head mc_list; struct work_struct close_work; }; struct ucma_multicast { struct ucma_context *ctx; u32 id; int events_reported; u64 uid; u8 join_state; struct list_head list; struct sockaddr_storage addr; }; struct ucma_event { struct ucma_context *ctx; struct ucma_context *conn_req_ctx; struct ucma_multicast *mc; struct list_head list; struct rdma_ucm_event_resp resp; }; static DEFINE_XARRAY_ALLOC(ctx_table); static DEFINE_XARRAY_ALLOC(multicast_table); static const struct file_operations ucma_fops; static int ucma_destroy_private_ctx(struct ucma_context *ctx); static inline struct ucma_context *_ucma_find_context(int id, struct ucma_file *file) { struct ucma_context *ctx; ctx = xa_load(&ctx_table, id); if (!ctx) ctx = ERR_PTR(-ENOENT); else if (ctx->file != file) ctx = ERR_PTR(-EINVAL); return ctx; } static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) { struct ucma_context *ctx; xa_lock(&ctx_table); ctx = _ucma_find_context(id, file); if (!IS_ERR(ctx)) if (!refcount_inc_not_zero(&ctx->ref)) ctx = ERR_PTR(-ENXIO); xa_unlock(&ctx_table); return ctx; } static void ucma_put_ctx(struct ucma_context *ctx) { if (refcount_dec_and_test(&ctx->ref)) complete(&ctx->comp); } /* * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the * CM_ID is bound. */ static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id) { struct ucma_context *ctx = ucma_get_ctx(file, id); if (IS_ERR(ctx)) return ctx; if (!ctx->cm_id->device) { ucma_put_ctx(ctx); return ERR_PTR(-EINVAL); } return ctx; } static void ucma_close_id(struct work_struct *work) { struct ucma_context *ctx = container_of(work, struct ucma_context, close_work); /* once all inflight tasks are finished, we close all underlying * resources. The context is still alive till its explicit destryoing * by its creator. This puts back the xarray's reference. */ ucma_put_ctx(ctx); wait_for_completion(&ctx->comp); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); /* Reading the cm_id without holding a positive ref is not allowed */ ctx->cm_id = NULL; } static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) { struct ucma_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; mutex_init(&ctx->mutex); if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) { kfree(ctx); return NULL; } return ctx; } static void ucma_set_ctx_cm_id(struct ucma_context *ctx, struct rdma_cm_id *cm_id) { refcount_set(&ctx->ref, 1); ctx->cm_id = cm_id; } static void ucma_finish_ctx(struct ucma_context *ctx) { lockdep_assert_held(&ctx->file->mut); list_add_tail(&ctx->list, &ctx->file->ctx_list); xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL); } static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, struct rdma_conn_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num; } static void ucma_copy_ud_event(struct ib_device *device, struct rdma_ucm_ud_param *dst, struct rdma_ud_param *src) { if (src->private_data_len) memcpy(dst->private_data, src->private_data, src->private_data_len); dst->private_data_len = src->private_data_len; ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr); dst->qp_num = src->qp_num; dst->qkey = src->qkey; } static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx, struct rdma_cm_event *event) { struct ucma_event *uevent; uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); if (!uevent) return NULL; uevent->ctx = ctx; switch (event->event) { case RDMA_CM_EVENT_MULTICAST_JOIN: case RDMA_CM_EVENT_MULTICAST_ERROR: uevent->mc = (struct ucma_multicast *) event->param.ud.private_data; uevent->resp.uid = uevent->mc->uid; uevent->resp.id = uevent->mc->id; break; default: uevent->resp.uid = ctx->uid; uevent->resp.id = ctx->id; break; } uevent->resp.event = event->event; uevent->resp.status = event->status; if (ctx->cm_id->qp_type == IB_QPT_UD) ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud, &event->param.ud); else ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); uevent->resp.ece.vendor_id = event->ece.vendor_id; uevent->resp.ece.attr_mod = event->ece.attr_mod; return uevent; } static int ucma_connect_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_context *listen_ctx = cm_id->context; struct ucma_context *ctx; struct ucma_event *uevent; if (!atomic_add_unless(&listen_ctx->backlog, -1, 0)) return -ENOMEM; ctx = ucma_alloc_ctx(listen_ctx->file); if (!ctx) goto err_backlog; ucma_set_ctx_cm_id(ctx, cm_id); uevent = ucma_create_uevent(listen_ctx, event); if (!uevent) goto err_alloc; uevent->conn_req_ctx = ctx; uevent->resp.id = ctx->id; ctx->cm_id->context = ctx; mutex_lock(&ctx->file->mut); ucma_finish_ctx(ctx); list_add_tail(&uevent->list, &ctx->file->event_list); mutex_unlock(&ctx->file->mut); wake_up_interruptible(&ctx->file->poll_wait); return 0; err_alloc: ucma_destroy_private_ctx(ctx); err_backlog: atomic_inc(&listen_ctx->backlog); /* Returning error causes the new ID to be destroyed */ return -ENOMEM; } static int ucma_event_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *event) { struct ucma_event *uevent; struct ucma_context *ctx = cm_id->context; if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) return ucma_connect_event_handler(cm_id, event); /* * We ignore events for new connections until userspace has set their * context. This can only happen if an error occurs on a new connection * before the user accepts it. This is okay, since the accept will just * fail later. However, we do need to release the underlying HW * resources in case of a device removal event. */ if (ctx->uid) { uevent = ucma_create_uevent(ctx, event); if (!uevent) return 0; mutex_lock(&ctx->file->mut); list_add_tail(&uevent->list, &ctx->file->event_list); mutex_unlock(&ctx->file->mut); wake_up_interruptible(&ctx->file->poll_wait); } if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) { xa_lock(&ctx_table); if (xa_load(&ctx_table, ctx->id) == ctx) queue_work(system_unbound_wq, &ctx->close_work); xa_unlock(&ctx_table); } return 0; } static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_get_event cmd; struct ucma_event *uevent; /* * Old 32 bit user space does not send the 4 byte padding in the * reserved field. We don't care, allow it to keep working. */ if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) - sizeof(uevent->resp.ece)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; mutex_lock(&file->mut); while (list_empty(&file->event_list)) { mutex_unlock(&file->mut); if (file->filp->f_flags & O_NONBLOCK) return -EAGAIN; if (wait_event_interruptible(file->poll_wait, !list_empty(&file->event_list))) return -ERESTARTSYS; mutex_lock(&file->mut); } uevent = list_first_entry(&file->event_list, struct ucma_event, list); if (copy_to_user(u64_to_user_ptr(cmd.response), &uevent->resp, min_t(size_t, out_len, sizeof(uevent->resp)))) { mutex_unlock(&file->mut); return -EFAULT; } list_del(&uevent->list); uevent->ctx->events_reported++; if (uevent->mc) uevent->mc->events_reported++; if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) atomic_inc(&uevent->ctx->backlog); mutex_unlock(&file->mut); kfree(uevent); return 0; } static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) { switch (cmd->ps) { case RDMA_PS_TCP: *qp_type = IB_QPT_RC; return 0; case RDMA_PS_UDP: case RDMA_PS_IPOIB: *qp_type = IB_QPT_UD; return 0; case RDMA_PS_IB: *qp_type = cmd->qp_type; return 0; default: return -EINVAL; } } static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ret = ucma_get_qp_type(&cmd, &qp_type); if (ret) return ret; ctx = ucma_alloc_ctx(file); if (!ctx) return -ENOMEM; ctx->uid = cmd.uid; cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type); if (IS_ERR(cm_id)) { ret = PTR_ERR(cm_id); goto err1; } ucma_set_ctx_cm_id(ctx, cm_id); resp.id = ctx->id; if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) { ret = -EFAULT; goto err1; } mutex_lock(&file->mut); ucma_finish_ctx(ctx); mutex_unlock(&file->mut); return 0; err1: ucma_destroy_private_ctx(ctx); return ret; } static void ucma_cleanup_multicast(struct ucma_context *ctx) { struct ucma_multicast *mc, *tmp; xa_lock(&multicast_table); list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ __xa_erase(&multicast_table, mc->id); kfree(mc); } xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; rdma_lock_handler(mc->ctx->cm_id); mutex_lock(&mc->ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { if (uevent->mc != mc) continue; list_del(&uevent->list); kfree(uevent); } mutex_unlock(&mc->ctx->file->mut); rdma_unlock_handler(mc->ctx->cm_id); } static int ucma_cleanup_ctx_events(struct ucma_context *ctx) { int events_reported; struct ucma_event *uevent, *tmp; LIST_HEAD(list); /* Cleanup events not yet reported to the user.*/ mutex_lock(&ctx->file->mut); list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { if (uevent->ctx != ctx) continue; if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST && xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id, uevent->conn_req_ctx, XA_ZERO_ENTRY, GFP_KERNEL) == uevent->conn_req_ctx) { list_move_tail(&uevent->list, &list); continue; } list_del(&uevent->list); kfree(uevent); } list_del(&ctx->list); events_reported = ctx->events_reported; mutex_unlock(&ctx->file->mut); /* * If this was a listening ID then any connections spawned from it that * have not been delivered to userspace are cleaned up too. Must be done * outside any locks. */ list_for_each_entry_safe(uevent, tmp, &list, list) { ucma_destroy_private_ctx(uevent->conn_req_ctx); kfree(uevent); } return events_reported; } /* * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie * the ctx is not public to the user). This either because: * - ucma_finish_ctx() hasn't been called * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed) */ static int ucma_destroy_private_ctx(struct ucma_context *ctx) { int events_reported; /* * Destroy the underlying cm_id. New work queuing is prevented now by * the removal from the xarray. Once the work is cancled ref will either * be 0 because the work ran to completion and consumed the ref from the * xarray, or it will be positive because we still have the ref from the * xarray. This can also be 0 in cases where cm_id was never set */ cancel_work_sync(&ctx->close_work); if (refcount_read(&ctx->ref)) ucma_close_id(&ctx->close_work); events_reported = ucma_cleanup_ctx_events(ctx); ucma_cleanup_multicast(ctx); WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL, GFP_KERNEL) != NULL); mutex_destroy(&ctx->mutex); kfree(ctx); return events_reported; } static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_context *ctx; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; xa_lock(&ctx_table); ctx = _ucma_find_context(cmd.id, file); if (!IS_ERR(ctx)) { if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY, GFP_KERNEL) != ctx) ctx = ERR_PTR(-ENOENT); } xa_unlock(&ctx_table); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.events_reported = ucma_destroy_private_ctx(ctx); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; return ret; } static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_in6(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || !cmd.addr_size || cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_ip(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_ip cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) || !rdma_addr_size_in6(&cmd.dst_addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_addr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_resolve_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_resolve_route cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; resp->num_paths = route->num_pri_alt_paths; switch (route->num_pri_alt_paths) { case 0: dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { resp->num_paths = route->num_pri_alt_paths; switch (route->num_pri_alt_paths) { case 0: rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, (union ib_gid *)&resp->ib_route[0].dgid); rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, (union ib_gid *)&resp->ib_route[0].sgid); resp->ib_route[0].pkey = cpu_to_be16(0xffff); break; case 2: ib_copy_path_rec_to_user(&resp->ib_route[1], &route->path_rec[1]); fallthrough; case 1: ib_copy_path_rec_to_user(&resp->ib_route[0], &route->path_rec[0]); break; default: break; } } static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, struct rdma_route *route) { struct rdma_dev_addr *dev_addr; dev_addr = &route->addr.dev_addr; rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); } static ssize_t ucma_query_route(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct rdma_ucm_query_route_resp resp; struct ucma_context *ctx; struct sockaddr *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)); if (!ctx->cm_id->device) goto out; resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.ibdev_index = ctx->cm_id->device->index; resp.port_num = ctx->cm_id->port_num; if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); out: mutex_unlock(&ctx->mutex); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; ucma_put_ctx(ctx); return ret; } static void ucma_query_device_addr(struct rdma_cm_id *cm_id, struct rdma_ucm_query_addr_resp *resp) { if (!cm_id->device) return; resp->node_guid = (__force __u64) cm_id->device->node_guid; resp->ibdev_index = cm_id->device->index; resp->port_num = cm_id->port_num; resp->pkey = (__force __u16) cpu_to_be16( ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); } static ssize_t ucma_query_addr(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index)) return -ENOSPC; memset(&resp, 0, sizeof resp); addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; resp.src_size = rdma_addr_size(addr); memcpy(&resp.src_addr, addr, resp.src_size); addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; resp.dst_size = rdma_addr_size(addr); memcpy(&resp.dst_addr, addr, resp.dst_size); ucma_query_device_addr(ctx->cm_id, &resp); if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; return ret; } static ssize_t ucma_query_path(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_path_resp *resp; int i, ret = 0; if (out_len < sizeof(*resp)) return -ENOSPC; resp = kzalloc(out_len, GFP_KERNEL); if (!resp) return -ENOMEM; resp->num_paths = ctx->cm_id->route.num_pri_alt_paths; for (i = 0, out_len -= sizeof(*resp); i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data); i++, out_len -= sizeof(struct ib_path_rec_data)) { struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL; if (rec->rec_type == SA_PATH_REC_TYPE_OPA) { struct sa_path_rec ib; sa_convert_path_opa_to_ib(&ib, rec); ib_sa_pack_path(&ib, &resp->path_data[i].path_rec); } else { ib_sa_pack_path(rec, &resp->path_data[i].path_rec); } } if (copy_to_user(response, resp, struct_size(resp, path_data, i))) ret = -EFAULT; kfree(resp); return ret; } static ssize_t ucma_query_gid(struct ucma_context *ctx, void __user *response, int out_len) { struct rdma_ucm_query_addr_resp resp; struct sockaddr_ib *addr; int ret = 0; if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index)) return -ENOSPC; memset(&resp, 0, sizeof resp); ucma_query_device_addr(ctx->cm_id, &resp); addr = (struct sockaddr_ib *) &resp.src_addr; resp.src_size = sizeof(*addr); if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr, NULL); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.src_addr); } addr = (struct sockaddr_ib *) &resp.dst_addr; resp.dst_size = sizeof(*addr); if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); } else { addr->sib_family = AF_IB; addr->sib_pkey = (__force __be16) resp.pkey; rdma_read_gids(ctx->cm_id, NULL, (union ib_gid *)&addr->sib_addr); addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr); } if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp)))) ret = -EFAULT; return ret; } static ssize_t ucma_query(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_query cmd; struct ucma_context *ctx; void __user *response; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; response = u64_to_user_ptr(cmd.response); ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); switch (cmd.option) { case RDMA_USER_CM_QUERY_ADDR: ret = ucma_query_addr(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_PATH: ret = ucma_query_path(ctx, response, out_len); break; case RDMA_USER_CM_QUERY_GID: ret = ucma_query_gid(ctx, response, out_len); break; default: ret = -ENOSYS; break; } mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static void ucma_copy_conn_param(struct rdma_cm_id *id, struct rdma_conn_param *dst, struct rdma_ucm_conn_param *src) { dst->private_data = src->private_data; dst->private_data_len = src->private_data_len; dst->responder_resources = src->responder_resources; dst->initiator_depth = src->initiator_depth; dst->flow_control = src->flow_control; dst->retry_count = src->retry_count; dst->rnr_retry_count = src->rnr_retry_count; dst->srq = src->srq; dst->qp_num = src->qp_num & 0xFFFFFF; dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; } static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_conn_param conn_param; struct rdma_ucm_ece ece = {}; struct rdma_ucm_connect cmd; struct ucma_context *ctx; size_t in_size; int ret; if (in_len < offsetofend(typeof(cmd), reserved)) return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; if (!cmd.conn_param.valid) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); if (offsetofend(typeof(cmd), ece) <= in_size) { ece.vendor_id = cmd.ece.vendor_id; ece.attr_mod = cmd.ece.attr_mod; } mutex_lock(&ctx->mutex); ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_listen cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (cmd.backlog <= 0 || cmd.backlog > max_backlog) cmd.backlog = max_backlog; atomic_set(&ctx->backlog, cmd.backlog); mutex_lock(&ctx->mutex); ret = rdma_listen(ctx->cm_id, cmd.backlog); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_accept cmd; struct rdma_conn_param conn_param; struct rdma_ucm_ece ece = {}; struct ucma_context *ctx; size_t in_size; int ret; if (in_len < offsetofend(typeof(cmd), reserved)) return -EINVAL; in_size = min_t(size_t, in_len, sizeof(cmd)); if (copy_from_user(&cmd, inbuf, in_size)) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); if (offsetofend(typeof(cmd), ece) <= in_size) { ece.vendor_id = cmd.ece.vendor_id; ece.attr_mod = cmd.ece.attr_mod; } if (cmd.conn_param.valid) { ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); mutex_lock(&ctx->mutex); rdma_lock_handler(ctx->cm_id); ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece); if (!ret) { /* The uid must be set atomically with the handler */ ctx->uid = cmd.uid; } rdma_unlock_handler(ctx->cm_id); mutex_unlock(&ctx->mutex); } else { mutex_lock(&ctx->mutex); rdma_lock_handler(ctx->cm_id); ret = rdma_accept_ece(ctx->cm_id, NULL, &ece); rdma_unlock_handler(ctx->cm_id); mutex_unlock(&ctx->mutex); } ucma_put_ctx(ctx); return ret; } static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_reject cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!cmd.reason) cmd.reason = IB_CM_REJ_CONSUMER_DEFINED; switch (cmd.reason) { case IB_CM_REJ_CONSUMER_DEFINED: case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED: break; default: return -EINVAL; } ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len, cmd.reason); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_disconnect cmd; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); ret = rdma_disconnect(ctx->cm_id); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_init_qp_attr(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_init_qp_attr cmd; struct ib_uverbs_qp_attr resp; struct ucma_context *ctx; struct ib_qp_attr qp_attr; int ret; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (cmd.qp_state > IB_QPS_ERR) return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; mutex_lock(&ctx->mutex); ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); mutex_unlock(&ctx->mutex); if (ret) goto out; ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: ucma_put_ctx(ctx); return ret; } static int ucma_set_option_id(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret = 0; switch (optname) { case RDMA_OPTION_ID_TOS: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; case RDMA_OPTION_ID_REUSEADDR: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_AFONLY: if (optlen != sizeof(int)) { ret = -EINVAL; break; } ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); break; case RDMA_OPTION_ID_ACK_TIMEOUT: if (optlen != sizeof(u8)) { ret = -EINVAL; break; } ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval)); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_ib_path(struct ucma_context *ctx, struct ib_path_rec_data *path_data, size_t optlen) { struct sa_path_rec sa_path; struct rdma_cm_event event; int ret; if (optlen % sizeof(*path_data)) return -EINVAL; for (; optlen; optlen -= sizeof(*path_data), path_data++) { if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY | IB_PATH_BIDIRECTIONAL)) break; } if (!optlen) return -EINVAL; if (!ctx->cm_id->device) return -EINVAL; memset(&sa_path, 0, sizeof(sa_path)); sa_path.rec_type = SA_PATH_REC_TYPE_IB; ib_sa_unpack_path(path_data->path_rec, &sa_path); if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) { struct sa_path_rec opa; sa_convert_path_ib_to_opa(&opa, &sa_path); mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &opa); mutex_unlock(&ctx->mutex); } else { mutex_lock(&ctx->mutex); ret = rdma_set_ib_path(ctx->cm_id, &sa_path); mutex_unlock(&ctx->mutex); } if (ret) return ret; memset(&event, 0, sizeof event); event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; return ucma_event_handler(ctx->cm_id, &event); } static int ucma_set_option_ib(struct ucma_context *ctx, int optname, void *optval, size_t optlen) { int ret; switch (optname) { case RDMA_OPTION_IB_PATH: ret = ucma_set_ib_path(ctx, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static int ucma_set_option_level(struct ucma_context *ctx, int level, int optname, void *optval, size_t optlen) { int ret; switch (level) { case RDMA_OPTION_ID: mutex_lock(&ctx->mutex); ret = ucma_set_option_id(ctx, optname, optval, optlen); mutex_unlock(&ctx->mutex); break; case RDMA_OPTION_IB: ret = ucma_set_option_ib(ctx, optname, optval, optlen); break; default: ret = -ENOSYS; } return ret; } static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_set_option cmd; struct ucma_context *ctx; void *optval; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); optval = memdup_user(u64_to_user_ptr(cmd.optval), cmd.optlen); if (IS_ERR(optval)) { ret = PTR_ERR(optval); goto out; } ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, cmd.optlen); kfree(optval); out: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_notify cmd; struct ucma_context *ctx; int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mutex_lock(&ctx->mutex); if (ctx->cm_id->device) ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); mutex_unlock(&ctx->mutex); ucma_put_ctx(ctx); return ret; } static ssize_t ucma_process_join(struct ucma_file *file, struct rdma_ucm_join_mcast *cmd, int out_len) { struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; struct ucma_multicast *mc; struct sockaddr *addr; int ret; u8 join_state; if (out_len < sizeof(resp)) return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; if (cmd->addr_size != rdma_addr_size(addr)) return -EINVAL; if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) join_state = BIT(FULLMEMBER_JOIN); else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER) join_state = BIT(SENDONLY_FULLMEMBER_JOIN); else return -EINVAL; ctx = ucma_get_ctx_dev(file, cmd->id); if (IS_ERR(ctx)) return PTR_ERR(ctx); mc = kzalloc(sizeof(*mc), GFP_KERNEL); if (!mc) { ret = -ENOMEM; goto err_put_ctx; } mc->ctx = ctx; mc->join_state = join_state; mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); xa_lock(&multicast_table); if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } list_add_tail(&mc->list, &ctx->mc_list); xa_unlock(&multicast_table); mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); mutex_unlock(&ctx->mutex); if (ret) goto err_xa_erase; resp.id = mc->id; if (copy_to_user(u64_to_user_ptr(cmd->response), &resp, sizeof(resp))) { ret = -EFAULT; goto err_leave_multicast; } xa_store(&multicast_table, mc->id, mc, 0); ucma_put_ctx(ctx); return 0; err_leave_multicast: mutex_lock(&ctx->mutex); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: xa_lock(&multicast_table); list_del(&mc->list); __xa_erase(&multicast_table, mc->id); err_free_mc: xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); return ret; } static ssize_t ucma_join_ip_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_ip_mcast cmd; struct rdma_ucm_join_mcast join_cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); if (!join_cmd.addr_size) return -EINVAL; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); return ucma_process_join(file, &join_cmd, out_len); } static ssize_t ucma_join_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_join_mcast cmd; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; if (!rdma_addr_size_kss(&cmd.addr)) return -EINVAL; return ucma_process_join(file, &cmd, out_len); } static ssize_t ucma_leave_multicast(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_destroy_id cmd; struct rdma_ucm_destroy_id_resp resp; struct ucma_multicast *mc; int ret = 0; if (out_len < sizeof(resp)) return -ENOSPC; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; xa_lock(&multicast_table); mc = xa_load(&multicast_table, cmd.id); if (!mc) mc = ERR_PTR(-ENOENT); else if (READ_ONCE(mc->ctx->file) != file) mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); if (IS_ERR(mc)) { xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } list_del(&mc->list); __xa_erase(&multicast_table, mc->id); xa_unlock(&multicast_table); mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); ucma_cleanup_mc_events(mc); ucma_put_ctx(mc->ctx); resp.events_reported = mc->events_reported; kfree(mc); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; out: return ret; } static ssize_t ucma_migrate_id(struct ucma_file *new_file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_migrate_id cmd; struct rdma_ucm_migrate_resp resp; struct ucma_event *uevent, *tmp; struct ucma_context *ctx; LIST_HEAD(event_list); struct fd f; struct ucma_file *cur_file; int ret = 0; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; /* Get current fd to protect against it being closed */ f = fdget(cmd.fd); if (!f.file) return -ENOENT; if (f.file->f_op != &ucma_fops) { ret = -EINVAL; goto file_put; } cur_file = f.file->private_data; /* Validate current fd and prevent destruction of id. */ ctx = ucma_get_ctx(cur_file, cmd.id); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto file_put; } rdma_lock_handler(ctx->cm_id); /* * ctx->file can only be changed under the handler & xa_lock. xa_load() * must be checked again to ensure the ctx hasn't begun destruction * since the ucma_get_ctx(). */ xa_lock(&ctx_table); if (_ucma_find_context(cmd.id, cur_file) != ctx) { xa_unlock(&ctx_table); ret = -ENOENT; goto err_unlock; } ctx->file = new_file; xa_unlock(&ctx_table); mutex_lock(&cur_file->mut); list_del(&ctx->list); /* * At this point lock_handler() prevents addition of new uevents for * this ctx. */ list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list) if (uevent->ctx == ctx) list_move_tail(&uevent->list, &event_list); resp.events_reported = ctx->events_reported; mutex_unlock(&cur_file->mut); mutex_lock(&new_file->mut); list_add_tail(&ctx->list, &new_file->ctx_list); list_splice_tail(&event_list, &new_file->event_list); mutex_unlock(&new_file->mut); if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) ret = -EFAULT; err_unlock: rdma_unlock_handler(ctx->cm_id); ucma_put_ctx(ctx); file_put: fdput(f); return ret; } static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) = { [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip, [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip, [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route, [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, [RDMA_USER_CM_CMD_REJECT] = ucma_reject, [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, [RDMA_USER_CM_CMD_GET_OPTION] = NULL, [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option, [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast, [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast, [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id, [RDMA_USER_CM_CMD_QUERY] = ucma_query, [RDMA_USER_CM_CMD_BIND] = ucma_bind, [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast }; static ssize_t ucma_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) { struct ucma_file *file = filp->private_data; struct rdma_ucm_cmd_hdr hdr; ssize_t ret; if (!ib_safe_file_access(filp)) { pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", __func__, task_tgid_vnr(current), current->comm); return -EACCES; } if (len < sizeof(hdr)) return -EINVAL; if (copy_from_user(&hdr, buf, sizeof(hdr))) return -EFAULT; if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) return -EINVAL; hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table)); if (hdr.in + sizeof(hdr) > len) return -EINVAL; if (!ucma_cmd_table[hdr.cmd]) return -ENOSYS; ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); if (!ret) ret = len; return ret; } static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait) { struct ucma_file *file = filp->private_data; __poll_t mask = 0; poll_wait(filp, &file->poll_wait, wait); if (!list_empty(&file->event_list)) mask = EPOLLIN | EPOLLRDNORM; return mask; } /* * ucma_open() does not need the BKL: * * - no global state is referred to; * - there is no ioctl method to race against; * - no further module initialization is required for open to work * after the device is registered. */ static int ucma_open(struct inode *inode, struct file *filp) { struct ucma_file *file; file = kmalloc(sizeof *file, GFP_KERNEL); if (!file) return -ENOMEM; INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); filp->private_data = file; file->filp = filp; return stream_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) { struct ucma_file *file = filp->private_data; /* * All paths that touch ctx_list or ctx_list starting from write() are * prevented by this being a FD release function. The list_add_tail() in * ucma_connect_event_handler() can run concurrently, however it only * adds to the list *after* a listening ID. By only reading the first of * the list, and relying on ucma_destroy_private_ctx() to block * ucma_connect_event_handler(), no additional locking is needed. */ while (!list_empty(&file->ctx_list)) { struct ucma_context *ctx = list_first_entry( &file->ctx_list, struct ucma_context, list); WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY, GFP_KERNEL) != ctx); ucma_destroy_private_ctx(ctx); } kfree(file); return 0; } static const struct file_operations ucma_fops = { .owner = THIS_MODULE, .open = ucma_open, .release = ucma_close, .write = ucma_write, .poll = ucma_poll, .llseek = no_llseek, }; static struct miscdevice ucma_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "rdma_cm", .nodename = "infiniband/rdma_cm", .mode = 0666, .fops = &ucma_fops, }; static int ucma_get_global_nl_info(struct ib_client_nl_info *res) { res->abi = RDMA_USER_CM_ABI_VERSION; res->cdev = ucma_misc.this_device; return 0; } static struct ib_client rdma_cma_client = { .name = "rdma_cm", .get_global_nl_info = ucma_get_global_nl_info, }; MODULE_ALIAS_RDMA_CLIENT("rdma_cm"); static ssize_t abi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); } static DEVICE_ATTR_RO(abi_version); static int __init ucma_init(void) { int ret; ret = misc_register(&ucma_misc); if (ret) return ret; ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); if (ret) { pr_err("rdma_ucm: couldn't create abi_version attr\n"); goto err1; } ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); if (!ucma_ctl_table_hdr) { pr_err("rdma_ucm: couldn't register sysctl paths\n"); ret = -ENOMEM; goto err2; } ret = ib_register_client(&rdma_cma_client); if (ret) goto err3; return 0; err3: unregister_net_sysctl_table(ucma_ctl_table_hdr); err2: device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); err1: misc_deregister(&ucma_misc); return ret; } static void __exit ucma_cleanup(void) { ib_unregister_client(&rdma_cma_client); unregister_net_sysctl_table(ucma_ctl_table_hdr); device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); misc_deregister(&ucma_misc); } module_init(ucma_init); module_exit(ucma_cleanup); |
1766 722 1767 373 1734 1769 722 723 1021 552 718 718 351 353 420 660 660 1 18 670 499 1 1 1 1 1 1 671 624 670 499 670 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 | // SPDX-License-Identifier: GPL-2.0-only /* * klist.c - Routines for manipulating klists. * * Copyright (C) 2005 Patrick Mochel * * This klist interface provides a couple of structures that wrap around * struct list_head to provide explicit list "head" (struct klist) and list * "node" (struct klist_node) objects. For struct klist, a spinlock is * included that protects access to the actual list itself. struct * klist_node provides a pointer to the klist that owns it and a kref * reference count that indicates the number of current users of that node * in the list. * * The entire point is to provide an interface for iterating over a list * that is safe and allows for modification of the list during the * iteration (e.g. insertion and removal), including modification of the * current node on the list. * * It works using a 3rd object type - struct klist_iter - that is declared * and initialized before an iteration. klist_next() is used to acquire the * next element in the list. It returns NULL if there are no more items. * Internally, that routine takes the klist's lock, decrements the * reference count of the previous klist_node and increments the count of * the next klist_node. It then drops the lock and returns. * * There are primitives for adding and removing nodes to/from a klist. * When deleting, klist_del() will simply decrement the reference count. * Only when the count goes to 0 is the node removed from the list. * klist_remove() will try to delete the node from the list and block until * it is actually removed. This is useful for objects (like devices) that * have been removed from the system and must be freed (but must wait until * all accessors have finished). */ #include <linux/klist.h> #include <linux/export.h> #include <linux/sched.h> /* * Use the lowest bit of n_klist to mark deleted nodes and exclude * dead ones from iteration. */ #define KNODE_DEAD 1LU #define KNODE_KLIST_MASK ~KNODE_DEAD static struct klist *knode_klist(struct klist_node *knode) { return (struct klist *) ((unsigned long)knode->n_klist & KNODE_KLIST_MASK); } static bool knode_dead(struct klist_node *knode) { return (unsigned long)knode->n_klist & KNODE_DEAD; } static void knode_set_klist(struct klist_node *knode, struct klist *klist) { knode->n_klist = klist; /* no knode deserves to start its life dead */ WARN_ON(knode_dead(knode)); } static void knode_kill(struct klist_node *knode) { /* and no knode should die twice ever either, see we're very humane */ WARN_ON(knode_dead(knode)); *(unsigned long *)&knode->n_klist |= KNODE_DEAD; } /** * klist_init - Initialize a klist structure. * @k: The klist we're initializing. * @get: The get function for the embedding object (NULL if none) * @put: The put function for the embedding object (NULL if none) * * Initialises the klist structure. If the klist_node structures are * going to be embedded in refcounted objects (necessary for safe * deletion) then the get/put arguments are used to initialise * functions that take and release references on the embedding * objects. */ void klist_init(struct klist *k, void (*get)(struct klist_node *), void (*put)(struct klist_node *)) { INIT_LIST_HEAD(&k->k_list); spin_lock_init(&k->k_lock); k->get = get; k->put = put; } EXPORT_SYMBOL_GPL(klist_init); static void add_head(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void add_tail(struct klist *k, struct klist_node *n) { spin_lock(&k->k_lock); list_add_tail(&n->n_node, &k->k_list); spin_unlock(&k->k_lock); } static void klist_node_init(struct klist *k, struct klist_node *n) { INIT_LIST_HEAD(&n->n_node); kref_init(&n->n_ref); knode_set_klist(n, k); if (k->get) k->get(n); } /** * klist_add_head - Initialize a klist_node and add it to front. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_head(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_head(k, n); } EXPORT_SYMBOL_GPL(klist_add_head); /** * klist_add_tail - Initialize a klist_node and add it to back. * @n: node we're adding. * @k: klist it's going on. */ void klist_add_tail(struct klist_node *n, struct klist *k) { klist_node_init(k, n); add_tail(k, n); } EXPORT_SYMBOL_GPL(klist_add_tail); /** * klist_add_behind - Init a klist_node and add it after an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_behind(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_behind); /** * klist_add_before - Init a klist_node and add it before an existing node * @n: node we're adding. * @pos: node to put @n after */ void klist_add_before(struct klist_node *n, struct klist_node *pos) { struct klist *k = knode_klist(pos); klist_node_init(k, n); spin_lock(&k->k_lock); list_add_tail(&n->n_node, &pos->n_node); spin_unlock(&k->k_lock); } EXPORT_SYMBOL_GPL(klist_add_before); struct klist_waiter { struct list_head list; struct klist_node *node; struct task_struct *process; int woken; }; static DEFINE_SPINLOCK(klist_remove_lock); static LIST_HEAD(klist_remove_waiters); static void klist_release(struct kref *kref) { struct klist_waiter *waiter, *tmp; struct klist_node *n = container_of(kref, struct klist_node, n_ref); WARN_ON(!knode_dead(n)); list_del(&n->n_node); spin_lock(&klist_remove_lock); list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { if (waiter->node != n) continue; list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); } static int klist_dec_and_del(struct klist_node *n) { return kref_put(&n->n_ref, klist_release); } static void klist_put(struct klist_node *n, bool kill) { struct klist *k = knode_klist(n); void (*put)(struct klist_node *) = k->put; spin_lock(&k->k_lock); if (kill) knode_kill(n); if (!klist_dec_and_del(n)) put = NULL; spin_unlock(&k->k_lock); if (put) put(n); } /** * klist_del - Decrement the reference count of node and try to remove. * @n: node we're deleting. */ void klist_del(struct klist_node *n) { klist_put(n, true); } EXPORT_SYMBOL_GPL(klist_del); /** * klist_remove - Decrement the refcount of node and wait for it to go away. * @n: node we're removing. */ void klist_remove(struct klist_node *n) { struct klist_waiter waiter; waiter.node = n; waiter.process = current; waiter.woken = 0; spin_lock(&klist_remove_lock); list_add(&waiter.list, &klist_remove_waiters); spin_unlock(&klist_remove_lock); klist_del(n); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (waiter.woken) break; schedule(); } __set_current_state(TASK_RUNNING); } EXPORT_SYMBOL_GPL(klist_remove); /** * klist_node_attached - Say whether a node is bound to a list or not. * @n: Node that we're testing. */ int klist_node_attached(struct klist_node *n) { return (n->n_klist != NULL); } EXPORT_SYMBOL_GPL(klist_node_attached); /** * klist_iter_init_node - Initialize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter we're filling. * @n: node to start with. * * Similar to klist_iter_init(), but starts the action off with @n, * instead of with the list head. */ void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n) { i->i_klist = k; i->i_cur = NULL; if (n && kref_get_unless_zero(&n->n_ref)) i->i_cur = n; } EXPORT_SYMBOL_GPL(klist_iter_init_node); /** * klist_iter_init - Iniitalize a klist_iter structure. * @k: klist we're iterating. * @i: klist_iter structure we're filling. * * Similar to klist_iter_init_node(), but start with the list head. */ void klist_iter_init(struct klist *k, struct klist_iter *i) { klist_iter_init_node(k, i, NULL); } EXPORT_SYMBOL_GPL(klist_iter_init); /** * klist_iter_exit - Finish a list iteration. * @i: Iterator structure. * * Must be called when done iterating over list, as it decrements the * refcount of the current node. Necessary in case iteration exited before * the end of the list was reached, and always good form. */ void klist_iter_exit(struct klist_iter *i) { if (i->i_cur) { klist_put(i->i_cur, false); i->i_cur = NULL; } } EXPORT_SYMBOL_GPL(klist_iter_exit); static struct klist_node *to_klist_node(struct list_head *n) { return container_of(n, struct klist_node, n_node); } /** * klist_prev - Ante up prev node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the prev node, increment its reference * count, drop the lock, and return that prev node. */ struct klist_node *klist_prev(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *prev; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { prev = to_klist_node(last->n_node.prev); if (!klist_dec_and_del(last)) put = NULL; } else prev = to_klist_node(i->i_klist->k_list.prev); i->i_cur = NULL; while (prev != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(prev))) { kref_get(&prev->n_ref); i->i_cur = prev; break; } prev = to_klist_node(prev->n_node.prev); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_prev); /** * klist_next - Ante up next node in list. * @i: Iterator structure. * * First grab list lock. Decrement the reference count of the previous * node, if there was one. Grab the next node, increment its reference * count, drop the lock, and return that next node. */ struct klist_node *klist_next(struct klist_iter *i) { void (*put)(struct klist_node *) = i->i_klist->put; struct klist_node *last = i->i_cur; struct klist_node *next; unsigned long flags; spin_lock_irqsave(&i->i_klist->k_lock, flags); if (last) { next = to_klist_node(last->n_node.next); if (!klist_dec_and_del(last)) put = NULL; } else next = to_klist_node(i->i_klist->k_list.next); i->i_cur = NULL; while (next != to_klist_node(&i->i_klist->k_list)) { if (likely(!knode_dead(next))) { kref_get(&next->n_ref); i->i_cur = next; break; } next = to_klist_node(next->n_node.next); } spin_unlock_irqrestore(&i->i_klist->k_lock, flags); if (put && last) put(last); return i->i_cur; } EXPORT_SYMBOL_GPL(klist_next); |
6 1 5 29 25 5 28 923 4 11 1 4 3 2 1 1 19 1 1 26 27 7 1 3 12 1 10 9 9 47 29 34 36 11 28 12 9 1 6 15 42 3 1 1 1 2 3 1 1 28 3 4 5 1 1 2 4 1 3 3 2 1 910 11 1 1 18 18 18 18 46 47 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 | // SPDX-License-Identifier: GPL-2.0 /* * Encryption policy functions for per-file encryption support. * * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility. * * Originally written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. * Modified by Eric Biggers, 2019 for v2 policy support. */ #include <linux/fs_context.h> #include <linux/random.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/mount.h> #include "fscrypt_private.h" /** * fscrypt_policies_equal() - check whether two encryption policies are the same * @policy1: the first policy * @policy2: the second policy * * Return: %true if equal, else %false */ bool fscrypt_policies_equal(const union fscrypt_policy *policy1, const union fscrypt_policy *policy2) { if (policy1->version != policy2->version) return false; return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); } int fscrypt_policy_to_key_spec(const union fscrypt_policy *policy, struct fscrypt_key_specifier *key_spec) { switch (policy->version) { case FSCRYPT_POLICY_V1: key_spec->type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; memcpy(key_spec->u.descriptor, policy->v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); return 0; case FSCRYPT_POLICY_V2: key_spec->type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; memcpy(key_spec->u.identifier, policy->v2.master_key_identifier, FSCRYPT_KEY_IDENTIFIER_SIZE); return 0; default: WARN_ON_ONCE(1); return -EINVAL; } } const union fscrypt_policy *fscrypt_get_dummy_policy(struct super_block *sb) { if (!sb->s_cop->get_dummy_policy) return NULL; return sb->s_cop->get_dummy_policy(sb); } /* * Return %true if the given combination of encryption modes is supported for v1 * (and later) encryption policies. * * Do *not* add anything new here, since v1 encryption policies are deprecated. * New combinations of modes should go in fscrypt_valid_enc_modes_v2() only. */ static bool fscrypt_valid_enc_modes_v1(u32 contents_mode, u32 filenames_mode) { if (contents_mode == FSCRYPT_MODE_AES_256_XTS && filenames_mode == FSCRYPT_MODE_AES_256_CTS) return true; if (contents_mode == FSCRYPT_MODE_AES_128_CBC && filenames_mode == FSCRYPT_MODE_AES_128_CTS) return true; if (contents_mode == FSCRYPT_MODE_ADIANTUM && filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; return false; } static bool fscrypt_valid_enc_modes_v2(u32 contents_mode, u32 filenames_mode) { if (contents_mode == FSCRYPT_MODE_AES_256_XTS && filenames_mode == FSCRYPT_MODE_AES_256_HCTR2) return true; if (contents_mode == FSCRYPT_MODE_SM4_XTS && filenames_mode == FSCRYPT_MODE_SM4_CTS) return true; return fscrypt_valid_enc_modes_v1(contents_mode, filenames_mode); } static bool supported_direct_key_modes(const struct inode *inode, u32 contents_mode, u32 filenames_mode) { const struct fscrypt_mode *mode; if (contents_mode != filenames_mode) { fscrypt_warn(inode, "Direct key flag not allowed with different contents and filenames modes"); return false; } mode = &fscrypt_modes[contents_mode]; if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { fscrypt_warn(inode, "Direct key flag not allowed with %s", mode->friendly_name); return false; } return true; } static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, const struct inode *inode) { const char *type = (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) ? "IV_INO_LBLK_64" : "IV_INO_LBLK_32"; struct super_block *sb = inode->i_sb; /* * IV_INO_LBLK_* exist only because of hardware limitations, and * currently the only known use case for them involves AES-256-XTS. * That's also all we test currently. For these reasons, for now only * allow AES-256-XTS here. This can be relaxed later if a use case for * IV_INO_LBLK_* with other encryption modes arises. */ if (policy->contents_encryption_mode != FSCRYPT_MODE_AES_256_XTS) { fscrypt_warn(inode, "Can't use %s policy with contents mode other than AES-256-XTS", type); return false; } /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. */ if (!sb->s_cop->has_stable_inodes || !sb->s_cop->has_stable_inodes(sb)) { fscrypt_warn(inode, "Can't use %s policy on filesystem '%s' because it doesn't have stable inode numbers", type, sb->s_id); return false; } /* * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that inode numbers fit * in 32 bits. In principle, IV_INO_LBLK_32 could support longer inode * numbers because it hashes the inode number; however, currently the * inode number is gotten from inode::i_ino which is 'unsigned long'. * So for now the implementation limit is 32 bits. */ if (!sb->s_cop->has_32bit_inodes) { fscrypt_warn(inode, "Can't use %s policy on filesystem '%s' because its inode numbers are too long", type, sb->s_id); return false; } /* * IV_INO_LBLK_64 and IV_INO_LBLK_32 both require that file data unit * indices fit in 32 bits. */ if (fscrypt_max_file_dun_bits(sb, fscrypt_policy_v2_du_bits(policy, inode)) > 32) { fscrypt_warn(inode, "Can't use %s policy on filesystem '%s' because its maximum file size is too large", type, sb->s_id); return false; } return true; } static bool fscrypt_supported_v1_policy(const struct fscrypt_policy_v1 *policy, const struct inode *inode) { if (!fscrypt_valid_enc_modes_v1(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, "Unsupported encryption modes (contents %d, filenames %d)", policy->contents_encryption_mode, policy->filenames_encryption_mode); return false; } if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | FSCRYPT_POLICY_FLAG_DIRECT_KEY)) { fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", policy->flags); return false; } if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && !supported_direct_key_modes(inode, policy->contents_encryption_mode, policy->filenames_encryption_mode)) return false; if (IS_CASEFOLDED(inode)) { /* With v1, there's no way to derive dirhash keys. */ fscrypt_warn(inode, "v1 policies can't be used on casefolded directories"); return false; } return true; } static bool fscrypt_supported_v2_policy(const struct fscrypt_policy_v2 *policy, const struct inode *inode) { int count = 0; if (!fscrypt_valid_enc_modes_v2(policy->contents_encryption_mode, policy->filenames_encryption_mode)) { fscrypt_warn(inode, "Unsupported encryption modes (contents %d, filenames %d)", policy->contents_encryption_mode, policy->filenames_encryption_mode); return false; } if (policy->flags & ~(FSCRYPT_POLICY_FLAGS_PAD_MASK | FSCRYPT_POLICY_FLAG_DIRECT_KEY | FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) { fscrypt_warn(inode, "Unsupported encryption flags (0x%02x)", policy->flags); return false; } count += !!(policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64); count += !!(policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32); if (count > 1) { fscrypt_warn(inode, "Mutually exclusive encryption flags (0x%02x)", policy->flags); return false; } if (policy->log2_data_unit_size) { if (!inode->i_sb->s_cop->supports_subblock_data_units) { fscrypt_warn(inode, "Filesystem does not support configuring crypto data unit size"); return false; } if (policy->log2_data_unit_size > inode->i_blkbits || policy->log2_data_unit_size < SECTOR_SHIFT /* 9 */) { fscrypt_warn(inode, "Unsupported log2_data_unit_size in encryption policy: %d", policy->log2_data_unit_size); return false; } if (policy->log2_data_unit_size != inode->i_blkbits && (policy->flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) { /* * Not safe to enable yet, as we need to ensure that DUN * wraparound can only occur on a FS block boundary. */ fscrypt_warn(inode, "Sub-block data units not yet supported with IV_INO_LBLK_32"); return false; } } if ((policy->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) && !supported_direct_key_modes(inode, policy->contents_encryption_mode, policy->filenames_encryption_mode)) return false; if ((policy->flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 | FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) && !supported_iv_ino_lblk_policy(policy, inode)) return false; if (memchr_inv(policy->__reserved, 0, sizeof(policy->__reserved))) { fscrypt_warn(inode, "Reserved bits set in encryption policy"); return false; } return true; } /** * fscrypt_supported_policy() - check whether an encryption policy is supported * @policy_u: the encryption policy * @inode: the inode on which the policy will be used * * Given an encryption policy, check whether all its encryption modes and other * settings are supported by this kernel on the given inode. (But we don't * currently don't check for crypto API support here, so attempting to use an * algorithm not configured into the crypto API will still fail later.) * * Return: %true if supported, else %false */ bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, const struct inode *inode) { switch (policy_u->version) { case FSCRYPT_POLICY_V1: return fscrypt_supported_v1_policy(&policy_u->v1, inode); case FSCRYPT_POLICY_V2: return fscrypt_supported_v2_policy(&policy_u->v2, inode); } return false; } /** * fscrypt_new_context() - create a new fscrypt_context * @ctx_u: output context * @policy_u: input policy * @nonce: nonce to use * * Create an fscrypt_context for an inode that is being assigned the given * encryption policy. @nonce must be a new random nonce. * * Return: the size of the new context in bytes. */ static int fscrypt_new_context(union fscrypt_context *ctx_u, const union fscrypt_policy *policy_u, const u8 nonce[FSCRYPT_FILE_NONCE_SIZE]) { memset(ctx_u, 0, sizeof(*ctx_u)); switch (policy_u->version) { case FSCRYPT_POLICY_V1: { const struct fscrypt_policy_v1 *policy = &policy_u->v1; struct fscrypt_context_v1 *ctx = &ctx_u->v1; ctx->version = FSCRYPT_CONTEXT_V1; ctx->contents_encryption_mode = policy->contents_encryption_mode; ctx->filenames_encryption_mode = policy->filenames_encryption_mode; ctx->flags = policy->flags; memcpy(ctx->master_key_descriptor, policy->master_key_descriptor, sizeof(ctx->master_key_descriptor)); memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE); return sizeof(*ctx); } case FSCRYPT_POLICY_V2: { const struct fscrypt_policy_v2 *policy = &policy_u->v2; struct fscrypt_context_v2 *ctx = &ctx_u->v2; ctx->version = FSCRYPT_CONTEXT_V2; ctx->contents_encryption_mode = policy->contents_encryption_mode; ctx->filenames_encryption_mode = policy->filenames_encryption_mode; ctx->flags = policy->flags; ctx->log2_data_unit_size = policy->log2_data_unit_size; memcpy(ctx->master_key_identifier, policy->master_key_identifier, sizeof(ctx->master_key_identifier)); memcpy(ctx->nonce, nonce, FSCRYPT_FILE_NONCE_SIZE); return sizeof(*ctx); } } BUG(); } /** * fscrypt_policy_from_context() - convert an fscrypt_context to * an fscrypt_policy * @policy_u: output policy * @ctx_u: input context * @ctx_size: size of input context in bytes * * Given an fscrypt_context, build the corresponding fscrypt_policy. * * Return: 0 on success, or -EINVAL if the fscrypt_context has an unrecognized * version number or size. * * This does *not* validate the settings within the policy itself, e.g. the * modes, flags, and reserved bits. Use fscrypt_supported_policy() for that. */ int fscrypt_policy_from_context(union fscrypt_policy *policy_u, const union fscrypt_context *ctx_u, int ctx_size) { memset(policy_u, 0, sizeof(*policy_u)); if (!fscrypt_context_is_valid(ctx_u, ctx_size)) return -EINVAL; switch (ctx_u->version) { case FSCRYPT_CONTEXT_V1: { const struct fscrypt_context_v1 *ctx = &ctx_u->v1; struct fscrypt_policy_v1 *policy = &policy_u->v1; policy->version = FSCRYPT_POLICY_V1; policy->contents_encryption_mode = ctx->contents_encryption_mode; policy->filenames_encryption_mode = ctx->filenames_encryption_mode; policy->flags = ctx->flags; memcpy(policy->master_key_descriptor, ctx->master_key_descriptor, sizeof(policy->master_key_descriptor)); return 0; } case FSCRYPT_CONTEXT_V2: { const struct fscrypt_context_v2 *ctx = &ctx_u->v2; struct fscrypt_policy_v2 *policy = &policy_u->v2; policy->version = FSCRYPT_POLICY_V2; policy->contents_encryption_mode = ctx->contents_encryption_mode; policy->filenames_encryption_mode = ctx->filenames_encryption_mode; policy->flags = ctx->flags; policy->log2_data_unit_size = ctx->log2_data_unit_size; memcpy(policy->__reserved, ctx->__reserved, sizeof(policy->__reserved)); memcpy(policy->master_key_identifier, ctx->master_key_identifier, sizeof(policy->master_key_identifier)); return 0; } } /* unreachable */ return -EINVAL; } /* Retrieve an inode's encryption policy */ static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) { const struct fscrypt_inode_info *ci; union fscrypt_context ctx; int ret; ci = fscrypt_get_inode_info(inode); if (ci) { /* key available, use the cached policy */ *policy = ci->ci_policy; return 0; } if (!IS_ENCRYPTED(inode)) return -ENODATA; ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (ret < 0) return (ret == -ERANGE) ? -EINVAL : ret; return fscrypt_policy_from_context(policy, &ctx, ret); } static int set_encryption_policy(struct inode *inode, const union fscrypt_policy *policy) { u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; union fscrypt_context ctx; int ctxsize; int err; if (!fscrypt_supported_policy(policy, inode)) return -EINVAL; switch (policy->version) { case FSCRYPT_POLICY_V1: /* * The original encryption policy version provided no way of * verifying that the correct master key was supplied, which was * insecure in scenarios where multiple users have access to the * same encrypted files (even just read-only access). The new * encryption policy version fixes this and also implies use of * an improved key derivation function and allows non-root users * to securely remove keys. So as long as compatibility with * old kernels isn't required, it is recommended to use the new * policy version for all new encrypted directories. */ pr_warn_once("%s (pid %d) is setting deprecated v1 encryption policy; recommend upgrading to v2.\n", current->comm, current->pid); break; case FSCRYPT_POLICY_V2: err = fscrypt_verify_key_added(inode->i_sb, policy->v2.master_key_identifier); if (err) return err; if (policy->v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) pr_warn_once("%s (pid %d) is setting an IV_INO_LBLK_32 encryption policy. This should only be used if there are certain hardware limitations.\n", current->comm, current->pid); break; default: WARN_ON_ONCE(1); return -EINVAL; } get_random_bytes(nonce, FSCRYPT_FILE_NONCE_SIZE); ctxsize = fscrypt_new_context(&ctx, policy, nonce); return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, NULL); } int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) { union fscrypt_policy policy; union fscrypt_policy existing_policy; struct inode *inode = file_inode(filp); u8 version; int size; int ret; if (get_user(policy.version, (const u8 __user *)arg)) return -EFAULT; size = fscrypt_policy_size(&policy); if (size <= 0) return -EINVAL; /* * We should just copy the remaining 'size - 1' bytes here, but a * bizarre bug in gcc 7 and earlier (fixed by gcc r255731) causes gcc to * think that size can be 0 here (despite the check above!) *and* that * it's a compile-time constant. Thus it would think copy_from_user() * is passed compile-time constant ULONG_MAX, causing the compile-time * buffer overflow check to fail, breaking the build. This only occurred * when building an i386 kernel with -Os and branch profiling enabled. * * Work around it by just copying the first byte again... */ version = policy.version; if (copy_from_user(&policy, arg, size)) return -EFAULT; policy.version = version; if (!inode_owner_or_capable(&nop_mnt_idmap, inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); ret = fscrypt_get_policy(inode, &existing_policy); if (ret == -ENODATA) { if (!S_ISDIR(inode->i_mode)) ret = -ENOTDIR; else if (IS_DEADDIR(inode)) ret = -ENOENT; else if (!inode->i_sb->s_cop->empty_dir(inode)) ret = -ENOTEMPTY; else ret = set_encryption_policy(inode, &policy); } else if (ret == -EINVAL || (ret == 0 && !fscrypt_policies_equal(&policy, &existing_policy))) { /* The file already uses a different encryption policy. */ ret = -EEXIST; } inode_unlock(inode); mnt_drop_write_file(filp); return ret; } EXPORT_SYMBOL(fscrypt_ioctl_set_policy); /* Original ioctl version; can only get the original policy version */ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) { union fscrypt_policy policy; int err; err = fscrypt_get_policy(file_inode(filp), &policy); if (err) return err; if (policy.version != FSCRYPT_POLICY_V1) return -EINVAL; if (copy_to_user(arg, &policy, sizeof(policy.v1))) return -EFAULT; return 0; } EXPORT_SYMBOL(fscrypt_ioctl_get_policy); /* Extended ioctl version; can get policies of any version */ int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg) { struct fscrypt_get_policy_ex_arg arg; union fscrypt_policy *policy = (union fscrypt_policy *)&arg.policy; size_t policy_size; int err; /* arg is policy_size, then policy */ BUILD_BUG_ON(offsetof(typeof(arg), policy_size) != 0); BUILD_BUG_ON(offsetofend(typeof(arg), policy_size) != offsetof(typeof(arg), policy)); BUILD_BUG_ON(sizeof(arg.policy) != sizeof(*policy)); err = fscrypt_get_policy(file_inode(filp), policy); if (err) return err; policy_size = fscrypt_policy_size(policy); if (copy_from_user(&arg, uarg, sizeof(arg.policy_size))) return -EFAULT; if (policy_size > arg.policy_size) return -EOVERFLOW; arg.policy_size = policy_size; if (copy_to_user(uarg, &arg, sizeof(arg.policy_size) + policy_size)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex); /* FS_IOC_GET_ENCRYPTION_NONCE: retrieve file's encryption nonce for testing */ int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) { struct inode *inode = file_inode(filp); union fscrypt_context ctx; int ret; ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); if (ret < 0) return ret; if (!fscrypt_context_is_valid(&ctx, ret)) return -EINVAL; if (copy_to_user(arg, fscrypt_context_nonce(&ctx), FSCRYPT_FILE_NONCE_SIZE)) return -EFAULT; return 0; } EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_nonce); /** * fscrypt_has_permitted_context() - is a file's encryption policy permitted * within its directory? * * @parent: inode for parent directory * @child: inode for file being looked up, opened, or linked into @parent * * Filesystems must call this before permitting access to an inode in a * situation where the parent directory is encrypted (either before allowing * ->lookup() to succeed, or for a regular file before allowing it to be opened) * and before any operation that involves linking an inode into an encrypted * directory, including link, rename, and cross rename. It enforces the * constraint that within a given encrypted directory tree, all files use the * same encryption policy. The pre-access check is needed to detect potentially * malicious offline violations of this constraint, while the link and rename * checks are needed to prevent online violations of this constraint. * * Return: 1 if permitted, 0 if forbidden. */ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { union fscrypt_policy parent_policy, child_policy; int err, err1, err2; /* No restrictions on file types which are never encrypted */ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && !S_ISLNK(child->i_mode)) return 1; /* No restrictions if the parent directory is unencrypted */ if (!IS_ENCRYPTED(parent)) return 1; /* Encrypted directories must not contain unencrypted files */ if (!IS_ENCRYPTED(child)) return 0; /* * Both parent and child are encrypted, so verify they use the same * encryption policy. Compare the cached policies if the keys are * available, otherwise retrieve and compare the fscrypt_contexts. * * Note that the fscrypt_context retrieval will be required frequently * when accessing an encrypted directory tree without the key. * Performance-wise this is not a big deal because we already don't * really optimize for file access without the key (to the extent that * such access is even possible), given that any attempted access * already causes a fscrypt_context retrieval and keyring search. * * In any case, if an unexpected error occurs, fall back to "forbidden". */ err = fscrypt_get_encryption_info(parent, true); if (err) return 0; err = fscrypt_get_encryption_info(child, true); if (err) return 0; err1 = fscrypt_get_policy(parent, &parent_policy); err2 = fscrypt_get_policy(child, &child_policy); /* * Allow the case where the parent and child both have an unrecognized * encryption policy, so that files with an unrecognized encryption * policy can be deleted. */ if (err1 == -EINVAL && err2 == -EINVAL) return 1; if (err1 || err2) return 0; return fscrypt_policies_equal(&parent_policy, &child_policy); } EXPORT_SYMBOL(fscrypt_has_permitted_context); /* * Return the encryption policy that new files in the directory will inherit, or * NULL if none, or an ERR_PTR() on error. If the directory is encrypted, also * ensure that its key is set up, so that the new filename can be encrypted. */ const union fscrypt_policy *fscrypt_policy_to_inherit(struct inode *dir) { int err; if (IS_ENCRYPTED(dir)) { err = fscrypt_require_key(dir); if (err) return ERR_PTR(err); return &dir->i_crypt_info->ci_policy; } return fscrypt_get_dummy_policy(dir->i_sb); } /** * fscrypt_context_for_new_inode() - create an encryption context for a new inode * @ctx: where context should be written * @inode: inode from which to fetch policy and nonce * * Given an in-core "prepared" (via fscrypt_prepare_new_inode) inode, * generate a new context and write it to ctx. ctx _must_ be at least * FSCRYPT_SET_CONTEXT_MAX_SIZE bytes. * * Return: size of the resulting context or a negative error code. */ int fscrypt_context_for_new_inode(void *ctx, struct inode *inode) { struct fscrypt_inode_info *ci = inode->i_crypt_info; BUILD_BUG_ON(sizeof(union fscrypt_context) != FSCRYPT_SET_CONTEXT_MAX_SIZE); /* fscrypt_prepare_new_inode() should have set up the key already. */ if (WARN_ON_ONCE(!ci)) return -ENOKEY; return fscrypt_new_context(ctx, &ci->ci_policy, ci->ci_nonce); } EXPORT_SYMBOL_GPL(fscrypt_context_for_new_inode); /** * fscrypt_set_context() - Set the fscrypt context of a new inode * @inode: a new inode * @fs_data: private data given by FS and passed to ->set_context() * * This should be called after fscrypt_prepare_new_inode(), generally during a * filesystem transaction. Everything here must be %GFP_NOFS-safe. * * Return: 0 on success, -errno on failure */ int fscrypt_set_context(struct inode *inode, void *fs_data) { struct fscrypt_inode_info *ci = inode->i_crypt_info; union fscrypt_context ctx; int ctxsize; ctxsize = fscrypt_context_for_new_inode(&ctx, inode); if (ctxsize < 0) return ctxsize; /* * This may be the first time the inode number is available, so do any * delayed key setup that requires the inode number. */ if (ci->ci_policy.version == FSCRYPT_POLICY_V2 && (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)) fscrypt_hash_inode_number(ci, ci->ci_master_key); return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, fs_data); } EXPORT_SYMBOL_GPL(fscrypt_set_context); /** * fscrypt_parse_test_dummy_encryption() - parse the test_dummy_encryption mount option * @param: the mount option * @dummy_policy: (input/output) the place to write the dummy policy that will * result from parsing the option. Zero-initialize this. If a policy is * already set here (due to test_dummy_encryption being given multiple * times), then this function will verify that the policies are the same. * * Return: 0 on success; -EINVAL if the argument is invalid; -EEXIST if the * argument conflicts with one already specified; or -ENOMEM. */ int fscrypt_parse_test_dummy_encryption(const struct fs_parameter *param, struct fscrypt_dummy_policy *dummy_policy) { const char *arg = "v2"; union fscrypt_policy *policy; int err; if (param->type == fs_value_is_string && *param->string) arg = param->string; policy = kzalloc(sizeof(*policy), GFP_KERNEL); if (!policy) return -ENOMEM; if (!strcmp(arg, "v1")) { policy->version = FSCRYPT_POLICY_V1; policy->v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; policy->v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; memset(policy->v1.master_key_descriptor, 0x42, FSCRYPT_KEY_DESCRIPTOR_SIZE); } else if (!strcmp(arg, "v2")) { policy->version = FSCRYPT_POLICY_V2; policy->v2.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; policy->v2.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; err = fscrypt_get_test_dummy_key_identifier( policy->v2.master_key_identifier); if (err) goto out; } else { err = -EINVAL; goto out; } if (dummy_policy->policy) { if (fscrypt_policies_equal(policy, dummy_policy->policy)) err = 0; else err = -EEXIST; goto out; } dummy_policy->policy = policy; policy = NULL; err = 0; out: kfree(policy); return err; } EXPORT_SYMBOL_GPL(fscrypt_parse_test_dummy_encryption); /** * fscrypt_dummy_policies_equal() - check whether two dummy policies are equal * @p1: the first test dummy policy (may be unset) * @p2: the second test dummy policy (may be unset) * * Return: %true if the dummy policies are both set and equal, or both unset. */ bool fscrypt_dummy_policies_equal(const struct fscrypt_dummy_policy *p1, const struct fscrypt_dummy_policy *p2) { if (!p1->policy && !p2->policy) return true; if (!p1->policy || !p2->policy) return false; return fscrypt_policies_equal(p1->policy, p2->policy); } EXPORT_SYMBOL_GPL(fscrypt_dummy_policies_equal); /** * fscrypt_show_test_dummy_encryption() - show '-o test_dummy_encryption' * @seq: the seq_file to print the option to * @sep: the separator character to use * @sb: the filesystem whose options are being shown * * Show the test_dummy_encryption mount option, if it was specified. * This is mainly used for /proc/mounts. */ void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, struct super_block *sb) { const union fscrypt_policy *policy = fscrypt_get_dummy_policy(sb); int vers; if (!policy) return; vers = policy->version; if (vers == FSCRYPT_POLICY_V1) /* Handle numbering quirk */ vers = 1; seq_printf(seq, "%ctest_dummy_encryption=v%d", sep, vers); } EXPORT_SYMBOL_GPL(fscrypt_show_test_dummy_encryption); |
1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 | // SPDX-License-Identifier: GPL-2.0-only /* * stackglue.c * * Code which implements an OCFS2 specific interface to underlying * cluster stacks. * * Copyright (C) 2007, 2009 Oracle. All rights reserved. */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/fs.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/sysctl.h> #include "ocfs2_fs.h" #include "stackglue.h" #define OCFS2_STACK_PLUGIN_O2CB "o2cb" #define OCFS2_STACK_PLUGIN_USER "user" #define OCFS2_MAX_HB_CTL_PATH 256 static struct ocfs2_protocol_version locking_max_version; static DEFINE_SPINLOCK(ocfs2_stack_lock); static LIST_HEAD(ocfs2_stack_list); static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1]; static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; /* * The stack currently in use. If not null, active_stack->sp_count > 0, * the module is pinned, and the locking protocol cannot be changed. */ static struct ocfs2_stack_plugin *active_stack; static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) { struct ocfs2_stack_plugin *p; assert_spin_locked(&ocfs2_stack_lock); list_for_each_entry(p, &ocfs2_stack_list, sp_list) { if (!strcmp(p->sp_name, name)) return p; } return NULL; } static int ocfs2_stack_driver_request(const char *stack_name, const char *plugin_name) { int rc; struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); /* * If the stack passed by the filesystem isn't the selected one, * we can't continue. */ if (strcmp(stack_name, cluster_stack_name)) { rc = -EBUSY; goto out; } if (active_stack) { /* * If the active stack isn't the one we want, it cannot * be selected right now. */ if (!strcmp(active_stack->sp_name, plugin_name)) rc = 0; else rc = -EBUSY; goto out; } p = ocfs2_stack_lookup(plugin_name); if (!p || !try_module_get(p->sp_owner)) { rc = -ENOENT; goto out; } active_stack = p; rc = 0; out: /* If we found it, pin it */ if (!rc) active_stack->sp_count++; spin_unlock(&ocfs2_stack_lock); return rc; } /* * This function looks up the appropriate stack and makes it active. If * there is no stack, it tries to load it. It will fail if the stack still * cannot be found. It will also fail if a different stack is in use. */ static int ocfs2_stack_driver_get(const char *stack_name) { int rc; char *plugin_name = OCFS2_STACK_PLUGIN_O2CB; /* * Classic stack does not pass in a stack name. This is * compatible with older tools as well. */ if (!stack_name || !*stack_name) stack_name = OCFS2_STACK_PLUGIN_O2CB; if (strlen(stack_name) != OCFS2_STACK_LABEL_LEN) { printk(KERN_ERR "ocfs2 passed an invalid cluster stack label: \"%s\"\n", stack_name); return -EINVAL; } /* Anything that isn't the classic stack is a user stack */ if (strcmp(stack_name, OCFS2_STACK_PLUGIN_O2CB)) plugin_name = OCFS2_STACK_PLUGIN_USER; rc = ocfs2_stack_driver_request(stack_name, plugin_name); if (rc == -ENOENT) { request_module("ocfs2_stack_%s", plugin_name); rc = ocfs2_stack_driver_request(stack_name, plugin_name); } if (rc == -ENOENT) { printk(KERN_ERR "ocfs2: Cluster stack driver \"%s\" cannot be found\n", plugin_name); } else if (rc == -EBUSY) { printk(KERN_ERR "ocfs2: A different cluster stack is in use\n"); } return rc; } static void ocfs2_stack_driver_put(void) { spin_lock(&ocfs2_stack_lock); BUG_ON(active_stack == NULL); BUG_ON(active_stack->sp_count == 0); active_stack->sp_count--; if (!active_stack->sp_count) { module_put(active_stack->sp_owner); active_stack = NULL; } spin_unlock(&ocfs2_stack_lock); } int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin) { int rc; spin_lock(&ocfs2_stack_lock); if (!ocfs2_stack_lookup(plugin->sp_name)) { plugin->sp_count = 0; plugin->sp_max_proto = locking_max_version; list_add(&plugin->sp_list, &ocfs2_stack_list); printk(KERN_INFO "ocfs2: Registered cluster interface %s\n", plugin->sp_name); rc = 0; } else { printk(KERN_ERR "ocfs2: Stack \"%s\" already registered\n", plugin->sp_name); rc = -EEXIST; } spin_unlock(&ocfs2_stack_lock); return rc; } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_register); void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin) { struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); p = ocfs2_stack_lookup(plugin->sp_name); if (p) { BUG_ON(p != plugin); BUG_ON(plugin == active_stack); BUG_ON(plugin->sp_count != 0); list_del_init(&plugin->sp_list); printk(KERN_INFO "ocfs2: Unregistered cluster interface %s\n", plugin->sp_name); } else { printk(KERN_ERR "Stack \"%s\" is not registered\n", plugin->sp_name); } spin_unlock(&ocfs2_stack_lock); } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_unregister); void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_proto) { struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); if (memcmp(max_proto, &locking_max_version, sizeof(struct ocfs2_protocol_version))) { BUG_ON(locking_max_version.pv_major != 0); locking_max_version = *max_proto; list_for_each_entry(p, &ocfs2_stack_list, sp_list) { p->sp_max_proto = locking_max_version; } } spin_unlock(&ocfs2_stack_lock); } EXPORT_SYMBOL_GPL(ocfs2_stack_glue_set_max_proto_version); /* * The ocfs2_dlm_lock() and ocfs2_dlm_unlock() functions take no argument * for the ast and bast functions. They will pass the lksb to the ast * and bast. The caller can wrap the lksb with their own structure to * get more information. */ int ocfs2_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, unsigned int namelen) { if (!lksb->lksb_conn) lksb->lksb_conn = conn; else BUG_ON(lksb->lksb_conn != conn); return active_stack->sp_ops->dlm_lock(conn, mode, lksb, flags, name, namelen); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock); int ocfs2_dlm_unlock(struct ocfs2_cluster_connection *conn, struct ocfs2_dlm_lksb *lksb, u32 flags) { BUG_ON(lksb->lksb_conn == NULL); return active_stack->sp_ops->dlm_unlock(conn, lksb, flags); } EXPORT_SYMBOL_GPL(ocfs2_dlm_unlock); int ocfs2_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_status(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lock_status); int ocfs2_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lvb_valid(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb_valid); void *ocfs2_dlm_lvb(struct ocfs2_dlm_lksb *lksb) { return active_stack->sp_ops->lock_lvb(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_lvb); void ocfs2_dlm_dump_lksb(struct ocfs2_dlm_lksb *lksb) { active_stack->sp_ops->dump_lksb(lksb); } EXPORT_SYMBOL_GPL(ocfs2_dlm_dump_lksb); int ocfs2_stack_supports_plocks(void) { return active_stack && active_stack->sp_ops->plock; } EXPORT_SYMBOL_GPL(ocfs2_stack_supports_plocks); /* * ocfs2_plock() can only be safely called if * ocfs2_stack_supports_plocks() returned true */ int ocfs2_plock(struct ocfs2_cluster_connection *conn, u64 ino, struct file *file, int cmd, struct file_lock *fl) { WARN_ON_ONCE(active_stack->sp_ops->plock == NULL); if (active_stack->sp_ops->plock) return active_stack->sp_ops->plock(conn, ino, file, cmd, fl); return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(ocfs2_plock); int ocfs2_cluster_connect(const char *stack_name, const char *cluster_name, int cluster_name_len, const char *group, int grouplen, struct ocfs2_locking_protocol *lproto, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, struct ocfs2_cluster_connection **conn) { int rc = 0; struct ocfs2_cluster_connection *new_conn; BUG_ON(group == NULL); BUG_ON(conn == NULL); BUG_ON(recovery_handler == NULL); if (grouplen > GROUP_NAME_MAX) { rc = -EINVAL; goto out; } if (memcmp(&lproto->lp_max_version, &locking_max_version, sizeof(struct ocfs2_protocol_version))) { rc = -EINVAL; goto out; } new_conn = kzalloc(sizeof(struct ocfs2_cluster_connection), GFP_KERNEL); if (!new_conn) { rc = -ENOMEM; goto out; } strscpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); new_conn->cc_namelen = grouplen; if (cluster_name_len) strscpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1); new_conn->cc_cluster_name_len = cluster_name_len; new_conn->cc_recovery_handler = recovery_handler; new_conn->cc_recovery_data = recovery_data; new_conn->cc_proto = lproto; /* Start the new connection at our maximum compatibility level */ new_conn->cc_version = lproto->lp_max_version; /* This will pin the stack driver if successful */ rc = ocfs2_stack_driver_get(stack_name); if (rc) goto out_free; rc = active_stack->sp_ops->connect(new_conn); if (rc) { ocfs2_stack_driver_put(); goto out_free; } *conn = new_conn; out_free: if (rc) kfree(new_conn); out: return rc; } EXPORT_SYMBOL_GPL(ocfs2_cluster_connect); /* The caller will ensure all nodes have the same cluster stack */ int ocfs2_cluster_connect_agnostic(const char *group, int grouplen, struct ocfs2_locking_protocol *lproto, void (*recovery_handler)(int node_num, void *recovery_data), void *recovery_data, struct ocfs2_cluster_connection **conn) { char *stack_name = NULL; if (cluster_stack_name[0]) stack_name = cluster_stack_name; return ocfs2_cluster_connect(stack_name, NULL, 0, group, grouplen, lproto, recovery_handler, recovery_data, conn); } EXPORT_SYMBOL_GPL(ocfs2_cluster_connect_agnostic); /* If hangup_pending is 0, the stack driver will be dropped */ int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn, int hangup_pending) { int ret; BUG_ON(conn == NULL); ret = active_stack->sp_ops->disconnect(conn); /* XXX Should we free it anyway? */ if (!ret) { kfree(conn); if (!hangup_pending) ocfs2_stack_driver_put(); } return ret; } EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect); /* * Leave the group for this filesystem. This is executed by a userspace * program (stored in ocfs2_hb_ctl_path). */ static void ocfs2_leave_group(const char *group) { int ret; char *argv[5], *envp[3]; argv[0] = ocfs2_hb_ctl_path; argv[1] = "-K"; argv[2] = "-u"; argv[3] = (char *)group; argv[4] = NULL; /* minimal command environment taken from cpu_run_sbin_hotplug */ envp[0] = "HOME=/"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[2] = NULL; ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); if (ret < 0) { printk(KERN_ERR "ocfs2: Error %d running user helper " "\"%s %s %s %s\"\n", ret, argv[0], argv[1], argv[2], argv[3]); } } /* * Hangup is a required post-umount. ocfs2-tools software expects the * filesystem to call "ocfs2_hb_ctl" during unmount. This happens * regardless of whether the DLM got started, so we can't do it * in ocfs2_cluster_disconnect(). The ocfs2_leave_group() function does * the actual work. */ void ocfs2_cluster_hangup(const char *group, int grouplen) { BUG_ON(group == NULL); BUG_ON(group[grouplen] != '\0'); ocfs2_leave_group(group); /* cluster_disconnect() was called with hangup_pending==1 */ ocfs2_stack_driver_put(); } EXPORT_SYMBOL_GPL(ocfs2_cluster_hangup); int ocfs2_cluster_this_node(struct ocfs2_cluster_connection *conn, unsigned int *node) { return active_stack->sp_ops->this_node(conn, node); } EXPORT_SYMBOL_GPL(ocfs2_cluster_this_node); /* * Sysfs bits */ static ssize_t ocfs2_max_locking_protocol_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0; spin_lock(&ocfs2_stack_lock); if (locking_max_version.pv_major) ret = snprintf(buf, PAGE_SIZE, "%u.%u\n", locking_max_version.pv_major, locking_max_version.pv_minor); spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_max_locking_protocol = __ATTR(max_locking_protocol, S_IRUGO, ocfs2_max_locking_protocol_show, NULL); static ssize_t ocfs2_loaded_cluster_plugins_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0, total = 0, remain = PAGE_SIZE; struct ocfs2_stack_plugin *p; spin_lock(&ocfs2_stack_lock); list_for_each_entry(p, &ocfs2_stack_list, sp_list) { ret = snprintf(buf, remain, "%s\n", p->sp_name); if (ret >= remain) { /* snprintf() didn't fit */ total = -E2BIG; break; } total += ret; remain -= ret; } spin_unlock(&ocfs2_stack_lock); return total; } static struct kobj_attribute ocfs2_attr_loaded_cluster_plugins = __ATTR(loaded_cluster_plugins, S_IRUGO, ocfs2_loaded_cluster_plugins_show, NULL); static ssize_t ocfs2_active_cluster_plugin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret = 0; spin_lock(&ocfs2_stack_lock); if (active_stack) { ret = snprintf(buf, PAGE_SIZE, "%s\n", active_stack->sp_name); if (ret >= PAGE_SIZE) ret = -E2BIG; } spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_active_cluster_plugin = __ATTR(active_cluster_plugin, S_IRUGO, ocfs2_active_cluster_plugin_show, NULL); static ssize_t ocfs2_cluster_stack_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { ssize_t ret; spin_lock(&ocfs2_stack_lock); ret = snprintf(buf, PAGE_SIZE, "%s\n", cluster_stack_name); spin_unlock(&ocfs2_stack_lock); return ret; } static ssize_t ocfs2_cluster_stack_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { size_t len = count; ssize_t ret; if (len == 0) return len; if (buf[len - 1] == '\n') len--; if ((len != OCFS2_STACK_LABEL_LEN) || (strnlen(buf, len) != len)) return -EINVAL; spin_lock(&ocfs2_stack_lock); if (active_stack) { if (!strncmp(buf, cluster_stack_name, len)) ret = count; else ret = -EBUSY; } else { memcpy(cluster_stack_name, buf, len); ret = count; } spin_unlock(&ocfs2_stack_lock); return ret; } static struct kobj_attribute ocfs2_attr_cluster_stack = __ATTR(cluster_stack, S_IRUGO | S_IWUSR, ocfs2_cluster_stack_show, ocfs2_cluster_stack_store); static ssize_t ocfs2_dlm_recover_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "1\n"); } static struct kobj_attribute ocfs2_attr_dlm_recover_support = __ATTR(dlm_recover_callback_support, S_IRUGO, ocfs2_dlm_recover_show, NULL); static struct attribute *ocfs2_attrs[] = { &ocfs2_attr_max_locking_protocol.attr, &ocfs2_attr_loaded_cluster_plugins.attr, &ocfs2_attr_active_cluster_plugin.attr, &ocfs2_attr_cluster_stack.attr, &ocfs2_attr_dlm_recover_support.attr, NULL, }; static const struct attribute_group ocfs2_attr_group = { .attrs = ocfs2_attrs, }; struct kset *ocfs2_kset; EXPORT_SYMBOL_GPL(ocfs2_kset); static void ocfs2_sysfs_exit(void) { kset_unregister(ocfs2_kset); } static int ocfs2_sysfs_init(void) { int ret; ocfs2_kset = kset_create_and_add("ocfs2", NULL, fs_kobj); if (!ocfs2_kset) return -ENOMEM; ret = sysfs_create_group(&ocfs2_kset->kobj, &ocfs2_attr_group); if (ret) goto error; return 0; error: kset_unregister(ocfs2_kset); return ret; } /* * Sysctl bits * * The sysctl lives at /proc/sys/fs/ocfs2/nm/hb_ctl_path. The 'nm' doesn't * make as much sense in a multiple cluster stack world, but it's safer * and easier to preserve the name. */ static struct ctl_table ocfs2_nm_table[] = { { .procname = "hb_ctl_path", .data = ocfs2_hb_ctl_path, .maxlen = OCFS2_MAX_HB_CTL_PATH, .mode = 0644, .proc_handler = proc_dostring, }, { } }; static struct ctl_table_header *ocfs2_table_header; /* * Initialization */ static int __init ocfs2_stack_glue_init(void) { int ret; strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB); ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table); if (!ocfs2_table_header) { printk(KERN_ERR "ocfs2 stack glue: unable to register sysctl\n"); return -ENOMEM; /* or something. */ } ret = ocfs2_sysfs_init(); if (ret) unregister_sysctl_table(ocfs2_table_header); return ret; } static void __exit ocfs2_stack_glue_exit(void) { memset(&locking_max_version, 0, sizeof(struct ocfs2_protocol_version)); ocfs2_sysfs_exit(); if (ocfs2_table_header) unregister_sysctl_table(ocfs2_table_header); } MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("ocfs2 cluster stack glue layer"); MODULE_LICENSE("GPL"); module_init(ocfs2_stack_glue_init); module_exit(ocfs2_stack_glue_exit); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 | // SPDX-License-Identifier: GPL-2.0 /* * linux/mm/compaction.c * * Memory compaction for the reduction of external fragmentation. Note that * this heavily depends upon page migration to do all the real heavy * lifting * * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> */ #include <linux/cpu.h> #include <linux/swap.h> #include <linux/migrate.h> #include <linux/compaction.h> #include <linux/mm_inline.h> #include <linux/sched/signal.h> #include <linux/backing-dev.h> #include <linux/sysctl.h> #include <linux/sysfs.h> #include <linux/page-isolation.h> #include <linux/kasan.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/page_owner.h> #include <linux/psi.h> #include "internal.h" #ifdef CONFIG_COMPACTION /* * Fragmentation score check interval for proactive compaction purposes. */ #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) static inline void count_compact_event(enum vm_event_item item) { count_vm_event(item); } static inline void count_compact_events(enum vm_event_item item, long delta) { count_vm_events(item, delta); } #else #define count_compact_event(item) do { } while (0) #define count_compact_events(item, delta) do { } while (0) #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA #define CREATE_TRACE_POINTS #include <trace/events/compaction.h> #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) /* * Page order with-respect-to which proactive compaction * calculates external fragmentation, which is used as * the "fragmentation score" of a node/zone. */ #if defined CONFIG_TRANSPARENT_HUGEPAGE #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER #elif defined CONFIG_HUGETLBFS #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER #else #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) #endif static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; unsigned long high_pfn = 0; list_for_each_entry_safe(page, next, freelist, lru) { unsigned long pfn = page_to_pfn(page); list_del(&page->lru); __free_page(page); if (pfn > high_pfn) high_pfn = pfn; } return high_pfn; } static void split_map_pages(struct list_head *list) { unsigned int i, order, nr_pages; struct page *page, *next; LIST_HEAD(tmp_list); list_for_each_entry_safe(page, next, list, lru) { list_del(&page->lru); order = page_private(page); nr_pages = 1 << order; post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); for (i = 0; i < nr_pages; i++) { list_add(&page->lru, &tmp_list); page++; } } list_splice(&tmp_list, list); } #ifdef CONFIG_COMPACTION bool PageMovable(struct page *page) { const struct movable_operations *mops; VM_BUG_ON_PAGE(!PageLocked(page), page); if (!__PageMovable(page)) return false; mops = page_movable_ops(page); if (mops) return true; return false; } void __SetPageMovable(struct page *page, const struct movable_operations *mops) { VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); } EXPORT_SYMBOL(__SetPageMovable); void __ClearPageMovable(struct page *page) { VM_BUG_ON_PAGE(!PageMovable(page), page); /* * This page still has the type of a movable page, but it's * actually not movable any more. */ page->mapping = (void *)PAGE_MAPPING_MOVABLE; } EXPORT_SYMBOL(__ClearPageMovable); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 /* * Compaction is deferred when compaction fails to result in a page * allocation success. 1 << compact_defer_shift, compactions are skipped up * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT */ static void defer_compaction(struct zone *zone, int order) { zone->compact_considered = 0; zone->compact_defer_shift++; if (order < zone->compact_order_failed) zone->compact_order_failed = order; if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; trace_mm_compaction_defer_compaction(zone, order); } /* Returns true if compaction should be skipped this time */ static bool compaction_deferred(struct zone *zone, int order) { unsigned long defer_limit = 1UL << zone->compact_defer_shift; if (order < zone->compact_order_failed) return false; /* Avoid possible overflow */ if (++zone->compact_considered >= defer_limit) { zone->compact_considered = defer_limit; return false; } trace_mm_compaction_deferred(zone, order); return true; } /* * Update defer tracking counters after successful compaction of given order, * which means an allocation either succeeded (alloc_success == true) or is * expected to succeed. */ void compaction_defer_reset(struct zone *zone, int order, bool alloc_success) { if (alloc_success) { zone->compact_considered = 0; zone->compact_defer_shift = 0; } if (order >= zone->compact_order_failed) zone->compact_order_failed = order + 1; trace_mm_compaction_defer_reset(zone, order); } /* Returns true if restarting compaction after many failures */ static bool compaction_restarting(struct zone *zone, int order) { if (order < zone->compact_order_failed) return false; return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && zone->compact_considered >= 1UL << zone->compact_defer_shift; } /* Returns true if the pageblock should be scanned for pages to isolate. */ static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { if (cc->ignore_skip_hint) return true; return !get_pageblock_skip(page); } static void reset_cached_positions(struct zone *zone) { zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; zone->compact_cached_free_pfn = pageblock_start_pfn(zone_end_pfn(zone) - 1); } #ifdef CONFIG_SPARSEMEM /* * If the PFN falls into an offline section, return the start PFN of the * next online section. If the PFN falls into an online section or if * there is no next online section, return 0. */ static unsigned long skip_offline_sections(unsigned long start_pfn) { unsigned long start_nr = pfn_to_section_nr(start_pfn); if (online_section_nr(start_nr)) return 0; while (++start_nr <= __highest_present_section_nr) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr); } return 0; } /* * If the PFN falls into an offline section, return the end PFN of the * next online section in reverse. If the PFN falls into an online section * or if there is no next online section in reverse, return 0. */ static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) { unsigned long start_nr = pfn_to_section_nr(start_pfn); if (!start_nr || online_section_nr(start_nr)) return 0; while (start_nr-- > 0) { if (online_section_nr(start_nr)) return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION; } return 0; } #else static unsigned long skip_offline_sections(unsigned long start_pfn) { return 0; } static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) { return 0; } #endif /* * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are * migratable), and the pageblocks they occupy cannot contain any free pages. */ static bool pageblock_skip_persistent(struct page *page) { if (!PageCompound(page)) return false; page = compound_head(page); if (compound_order(page) >= pageblock_order) return true; return false; } static bool __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, bool check_target) { struct page *page = pfn_to_online_page(pfn); struct page *block_page; struct page *end_page; unsigned long block_pfn; if (!page) return false; if (zone != page_zone(page)) return false; if (pageblock_skip_persistent(page)) return false; /* * If skip is already cleared do no further checking once the * restart points have been set. */ if (check_source && check_target && !get_pageblock_skip(page)) return true; /* * If clearing skip for the target scanner, do not select a * non-movable pageblock as the starting point. */ if (!check_source && check_target && get_pageblock_migratetype(page) != MIGRATE_MOVABLE) return false; /* Ensure the start of the pageblock or zone is online and valid */ block_pfn = pageblock_start_pfn(pfn); block_pfn = max(block_pfn, zone->zone_start_pfn); block_page = pfn_to_online_page(block_pfn); if (block_page) { page = block_page; pfn = block_pfn; } /* Ensure the end of the pageblock or zone is online and valid */ block_pfn = pageblock_end_pfn(pfn) - 1; block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); end_page = pfn_to_online_page(block_pfn); if (!end_page) return false; /* * Only clear the hint if a sample indicates there is either a * free page or an LRU page in the block. One or other condition * is necessary for the block to be a migration source/target. */ do { if (check_source && PageLRU(page)) { clear_pageblock_skip(page); return true; } if (check_target && PageBuddy(page)) { clear_pageblock_skip(page); return true; } page += (1 << PAGE_ALLOC_COSTLY_ORDER); } while (page <= end_page); return false; } /* * This function is called to clear all cached information on pageblocks that * should be skipped for page isolation when the migrate and free page scanner * meet. */ static void __reset_isolation_suitable(struct zone *zone) { unsigned long migrate_pfn = zone->zone_start_pfn; unsigned long free_pfn = zone_end_pfn(zone) - 1; unsigned long reset_migrate = free_pfn; unsigned long reset_free = migrate_pfn; bool source_set = false; bool free_set = false; /* Only flush if a full compaction finished recently */ if (!zone->compact_blockskip_flush) return; zone->compact_blockskip_flush = false; /* * Walk the zone and update pageblock skip information. Source looks * for PageLRU while target looks for PageBuddy. When the scanner * is found, both PageBuddy and PageLRU are checked as the pageblock * is suitable as both source and target. */ for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, free_pfn -= pageblock_nr_pages) { cond_resched(); /* Update the migrate PFN */ if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && migrate_pfn < reset_migrate) { source_set = true; reset_migrate = migrate_pfn; zone->compact_init_migrate_pfn = reset_migrate; zone->compact_cached_migrate_pfn[0] = reset_migrate; zone->compact_cached_migrate_pfn[1] = reset_migrate; } /* Update the free PFN */ if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && free_pfn > reset_free) { free_set = true; reset_free = free_pfn; zone->compact_init_free_pfn = reset_free; zone->compact_cached_free_pfn = reset_free; } } /* Leave no distance if no suitable block was reset */ if (reset_migrate >= reset_free) { zone->compact_cached_migrate_pfn[0] = migrate_pfn; zone->compact_cached_migrate_pfn[1] = migrate_pfn; zone->compact_cached_free_pfn = free_pfn; } } void reset_isolation_suitable(pg_data_t *pgdat) { int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; __reset_isolation_suitable(zone); } } /* * Sets the pageblock skip bit if it was clear. Note that this is a hint as * locks are not required for read/writers. Returns true if it was already set. */ static bool test_and_set_skip(struct compact_control *cc, struct page *page) { bool skip; /* Do not update if skip hint is being ignored */ if (cc->ignore_skip_hint) return false; skip = get_pageblock_skip(page); if (!skip && !cc->no_set_skip_hint) set_pageblock_skip(page); return skip; } static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { struct zone *zone = cc->zone; /* Set for isolation rather than compaction */ if (cc->no_set_skip_hint) return; pfn = pageblock_end_pfn(pfn); /* Update where async and sync compaction should restart */ if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC && pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } /* * If no pages were isolated then mark this pageblock to be skipped in the * future. The information is later cleared by __reset_isolation_suitable(). */ static void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) { struct zone *zone = cc->zone; if (cc->no_set_skip_hint) return; set_pageblock_skip(page); if (pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } #else static inline bool isolation_suitable(struct compact_control *cc, struct page *page) { return true; } static inline bool pageblock_skip_persistent(struct page *page) { return false; } static inline void update_pageblock_skip(struct compact_control *cc, struct page *page, unsigned long pfn) { } static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { } static bool test_and_set_skip(struct compact_control *cc, struct page *page) { return false; } #endif /* CONFIG_COMPACTION */ /* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. For async compaction, trylock and record if the * lock is contended. The lock will still be acquired but compaction will * abort when the current block is finished regardless of success rate. * Sync compaction acquires the lock. * * Always returns true which makes it easier to track lock state in callers. */ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, struct compact_control *cc) __acquires(lock) { /* Track if the lock is contended in async mode */ if (cc->mode == MIGRATE_ASYNC && !cc->contended) { if (spin_trylock_irqsave(lock, *flags)) return true; cc->contended = true; } spin_lock_irqsave(lock, *flags); return true; } /* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. The lock should be periodically unlocked to avoid * having disabled IRQs for a long time, even when there is nobody waiting on * the lock. It might also be that allowing the IRQs will result in * need_resched() becoming true. If scheduling is needed, compaction schedules. * Either compaction type will also abort if a fatal signal is pending. * In either case if the lock was locked, it is dropped and not regained. * * Returns true if compaction should abort due to fatal signal pending. * Returns false when compaction can continue. */ static bool compact_unlock_should_abort(spinlock_t *lock, unsigned long flags, bool *locked, struct compact_control *cc) { if (*locked) { spin_unlock_irqrestore(lock, flags); *locked = false; } if (fatal_signal_pending(current)) { cc->contended = true; return true; } cond_resched(); return false; } /* * Isolate free pages onto a private freelist. If @strict is true, will abort * returning 0 on any invalid PFNs or non-free pages inside of the pageblock * (even though it may still end up isolating some pages). */ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long *start_pfn, unsigned long end_pfn, struct list_head *freelist, unsigned int stride, bool strict) { int nr_scanned = 0, total_isolated = 0; struct page *page; unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; unsigned int order; /* Strict mode is for isolation, speed is secondary */ if (strict) stride = 1; page = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { int isolated; /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort if fatal signal * pending. */ if (!(blockpfn % COMPACT_CLUSTER_MAX) && compact_unlock_should_abort(&cc->zone->lock, flags, &locked, cc)) break; nr_scanned++; /* * For compound pages such as THP and hugetlbfs, we can save * potentially a lot of iterations if we skip them at once. * The check is racy, but we can consider only valid values * and the only danger is skipping too much. */ if (PageCompound(page)) { const unsigned int order = compound_order(page); if (blockpfn + (1UL << order) <= end_pfn) { blockpfn += (1UL << order) - 1; page += (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } goto isolate_fail; } if (!PageBuddy(page)) goto isolate_fail; /* If we already hold the lock, we can skip some rechecking. */ if (!locked) { locked = compact_lock_irqsave(&cc->zone->lock, &flags, cc); /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) goto isolate_fail; } /* Found a free page, will break it into order-0 pages */ order = buddy_order(page); isolated = __isolate_free_page(page, order); if (!isolated) break; set_page_private(page, order); nr_scanned += isolated - 1; total_isolated += isolated; cc->nr_freepages += isolated; list_add_tail(&page->lru, freelist); if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; } /* Advance to the end of split page */ blockpfn += isolated - 1; page += isolated - 1; continue; isolate_fail: if (strict) break; } if (locked) spin_unlock_irqrestore(&cc->zone->lock, flags); /* * Be careful to not go outside of the pageblock. */ if (unlikely(blockpfn > end_pfn)) blockpfn = end_pfn; trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, nr_scanned, total_isolated); /* Record how far we have got within the block */ *start_pfn = blockpfn; /* * If strict isolation is requested by CMA then check that all the * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ if (strict && blockpfn < end_pfn) total_isolated = 0; cc->total_free_scanned += nr_scanned; if (total_isolated) count_compact_events(COMPACTISOLATED, total_isolated); return total_isolated; } /** * isolate_freepages_range() - isolate free pages. * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Non-free pages, invalid PFNs, or zone boundaries within the * [start_pfn, end_pfn) range are considered errors, cause function to * undo its actions and return zero. * * Otherwise, function returns one-past-the-last PFN of isolated page * (which may be greater then end_pfn if end fell in a middle of * a free page). */ unsigned long isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long isolated, pfn, block_start_pfn, block_end_pfn; LIST_HEAD(freelist); pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = pageblock_end_pfn(pfn); for (; pfn < end_pfn; pfn += isolated, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { /* Protect pfn from changing by isolate_freepages_block */ unsigned long isolate_start_pfn = pfn; /* * pfn could pass the block_end_pfn if isolated freepage * is more than pageblock order. In this case, we adjust * scanning range to right one. */ if (pfn >= block_end_pfn) { block_start_pfn = pageblock_start_pfn(pfn); block_end_pfn = pageblock_end_pfn(pfn); } block_end_pfn = min(block_end_pfn, end_pfn); if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) break; isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, &freelist, 0, true); /* * In strict mode, isolate_freepages_block() returns 0 if * there are any holes in the block (ie. invalid PFNs or * non-free pages). */ if (!isolated) break; /* * If we managed to isolate pages, it is always (1 << n) * * pageblock_nr_pages for some non-negative n. (Max order * page may span two pageblocks). */ } /* __isolate_free_page() does not map the pages */ split_map_pages(&freelist); if (pfn < end_pfn) { /* Loop terminated early, cleanup. */ release_freepages(&freelist); return 0; } /* We don't use freelists for anything. */ return pfn; } /* Similar to reclaim, but different enough that they don't share logic */ static bool too_many_isolated(struct compact_control *cc) { pg_data_t *pgdat = cc->zone->zone_pgdat; bool too_many; unsigned long active, inactive, isolated; inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + node_page_state(pgdat, NR_INACTIVE_ANON); active = node_page_state(pgdat, NR_ACTIVE_FILE) + node_page_state(pgdat, NR_ACTIVE_ANON); isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + node_page_state(pgdat, NR_ISOLATED_ANON); /* * Allow GFP_NOFS to isolate past the limit set for regular * compaction runs. This prevents an ABBA deadlock when other * compactors have already isolated to the limit, but are * blocked on filesystem locks held by the GFP_NOFS thread. */ if (cc->gfp_mask & __GFP_FS) { inactive >>= 3; active >>= 3; } too_many = isolated > (inactive + active) / 2; if (!too_many) wake_throttle_isolated(pgdat); return too_many; } /** * isolate_migratepages_block() - isolate all migrate-able pages within * a single pageblock * @cc: Compaction control structure. * @low_pfn: The first PFN to isolate * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock * @mode: Isolation mode to be used. * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). The range is expected to be within same pageblock. * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, * -ENOMEM in case we could not allocate a page, or 0. * cc->migrate_pfn will contain the next pfn to scan. * * The pages are isolated on cc->migratepages list (not required to be empty), * and cc->nr_migratepages is updated accordingly. */ static int isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn, isolate_mode_t mode) { pg_data_t *pgdat = cc->zone->zone_pgdat; unsigned long nr_scanned = 0, nr_isolated = 0; struct lruvec *lruvec; unsigned long flags = 0; struct lruvec *locked = NULL; struct folio *folio = NULL; struct page *page = NULL, *valid_page = NULL; struct address_space *mapping; unsigned long start_pfn = low_pfn; bool skip_on_failure = false; unsigned long next_skip_pfn = 0; bool skip_updated = false; int ret = 0; cc->migrate_pfn = low_pfn; /* * Ensure that there are not too many pages isolated from the LRU * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated */ while (unlikely(too_many_isolated(cc))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) return -EAGAIN; /* async migration should just abort */ if (cc->mode == MIGRATE_ASYNC) return -EAGAIN; reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); if (fatal_signal_pending(current)) return -EINTR; } cond_resched(); if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { skip_on_failure = true; next_skip_pfn = block_end_pfn(low_pfn, cc->order); } /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { if (skip_on_failure && low_pfn >= next_skip_pfn) { /* * We have isolated all migration candidates in the * previous order-aligned block, and did not skip it due * to failure. We should migrate the pages now and * hopefully succeed compaction. */ if (nr_isolated) break; /* * We failed to isolate in the previous order-aligned * block. Set the new boundary to the end of the * current block. Note we can't simply increase * next_skip_pfn by 1 << order, as low_pfn might have * been incremented by a higher number due to skipping * a compound or a high-order buddy page in the * previous loop iteration. */ next_skip_pfn = block_end_pfn(low_pfn, cc->order); } /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort completely if * a fatal signal is pending. */ if (!(low_pfn % COMPACT_CLUSTER_MAX)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } if (fatal_signal_pending(current)) { cc->contended = true; ret = -EINTR; goto fatal_pending; } cond_resched(); } nr_scanned++; page = pfn_to_page(low_pfn); /* * Check if the pageblock has already been marked skipped. * Only the first PFN is checked as the caller isolates * COMPACT_CLUSTER_MAX at a time so the second call must * not falsely conclude that the block should be skipped. */ if (!valid_page && (pageblock_aligned(low_pfn) || low_pfn == cc->zone->zone_start_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; folio = NULL; goto isolate_abort; } valid_page = page; } if (PageHuge(page) && cc->alloc_contig) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); /* * Fail isolation in case isolate_or_dissolve_huge_page() * reports an error. In case of -ENOMEM, abort right away. */ if (ret < 0) { /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; low_pfn += compound_nr(page) - 1; nr_scanned += compound_nr(page) - 1; goto isolate_fail; } if (PageHuge(page)) { /* * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ folio = page_folio(page); low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list; } /* * Ok, the hugepage was dissolved. Now these pages are * Buddy and cannot be re-allocated because they are * isolated. Fall-through as the check below handles * Buddy pages. */ } /* * Skip if free. We read page order here without zone lock * which is generally unsafe, but the race window is small and * the worst thing that can happen is that we skip some * potential isolation targets. */ if (PageBuddy(page)) { unsigned long freepage_order = buddy_order_unsafe(page); /* * Without lock, we cannot be sure that what we got is * a valid page order. Consider only values in the * valid order range to prevent low_pfn overflow. */ if (freepage_order > 0 && freepage_order <= MAX_PAGE_ORDER) { low_pfn += (1UL << freepage_order) - 1; nr_scanned += (1UL << freepage_order) - 1; } continue; } /* * Regardless of being on LRU, compound pages such as THP and * hugetlbfs are not to be compacted unless we are attempting * an allocation much larger than the huge page size (eg CMA). * We can potentially save a lot of iterations if we skip them * at once. The check is racy, but we can consider only valid * values and the only danger is skipping too much. */ if (PageCompound(page) && !cc->alloc_contig) { const unsigned int order = compound_order(page); if (likely(order <= MAX_PAGE_ORDER)) { low_pfn += (1UL << order) - 1; nr_scanned += (1UL << order) - 1; } goto isolate_fail; } /* * Check may be lockless but that's ok as we recheck later. * It's possible to migrate LRU and non-lru movable pages. * Skip any other type of page */ if (!PageLRU(page)) { /* * __PageMovable can return false positive so we need * to verify it under page_lock. */ if (unlikely(__PageMovable(page)) && !PageIsolated(page)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } if (isolate_movable_page(page, mode)) { folio = page_folio(page); goto isolate_success; } } goto isolate_fail; } /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ folio = folio_get_nontail_page(page); if (unlikely(!folio)) goto isolate_fail; /* * Migration will fail if an anonymous page is pinned in memory, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ mapping = folio_mapping(folio); if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) goto isolate_fail_put; /* * Only allow to migrate anonymous pages in GFP_NOFS context * because those do not depend on fs locks. */ if (!(cc->gfp_mask & __GFP_FS) && mapping) goto isolate_fail_put; /* Only take pages on LRU: a check now makes later tests safe */ if (!folio_test_lru(folio)) goto isolate_fail_put; /* Compaction might skip unevictable pages but CMA takes them */ if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) goto isolate_fail_put; /* * To minimise LRU disruption, the caller can indicate with * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages * it will be able to migrate without blocking - clean pages * for the most part. PageWriteback would require blocking. */ if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { bool migrate_dirty; /* * Only folios without mappings or that have * a ->migrate_folio callback are possible to * migrate without blocking. However, we may * be racing with truncation, which can free * the mapping. Truncation holds the folio lock * until after the folio is removed from the page * cache so holding it ourselves is sufficient. */ if (!folio_trylock(folio)) goto isolate_fail_put; mapping = folio_mapping(folio); migrate_dirty = !mapping || mapping->a_ops->migrate_folio; folio_unlock(folio); if (!migrate_dirty) goto isolate_fail_put; } /* Try isolate the folio */ if (!folio_test_clear_lru(folio)) goto isolate_fail_put; lruvec = folio_lruvec(folio); /* If we already hold the lock, we can skip some rechecking */ if (lruvec != locked) { if (locked) unlock_page_lruvec_irqrestore(locked, flags); compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); locked = lruvec; lruvec_memcg_debug(lruvec, folio); /* * Try get exclusive access under lock. If marked for * skip, the scan is aborted unless the current context * is a rescan to reach the end of the pageblock. */ if (!skip_updated && valid_page) { skip_updated = true; if (test_and_set_skip(cc, valid_page) && !cc->finish_pageblock) { low_pfn = end_pfn; goto isolate_abort; } } /* * folio become large since the non-locked check, * and it's on LRU. */ if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { low_pfn += folio_nr_pages(folio) - 1; nr_scanned += folio_nr_pages(folio) - 1; folio_set_lru(folio); goto isolate_fail_put; } } /* The folio is taken off the LRU */ if (folio_test_large(folio)) low_pfn += folio_nr_pages(folio) - 1; /* Successfully isolated */ lruvec_del_folio(lruvec, folio); node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); isolate_success: list_add(&folio->lru, &cc->migratepages); isolate_success_no_list: cc->nr_migratepages += folio_nr_pages(folio); nr_isolated += folio_nr_pages(folio); nr_scanned += folio_nr_pages(folio) - 1; /* * Avoid isolating too much unless this block is being * fully scanned (e.g. dirty/writeback pages, parallel allocation) * or a lock is contended. For contention, isolate quickly to * potentially remove one source of contention. */ if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && !cc->finish_pageblock && !cc->contended) { ++low_pfn; break; } continue; isolate_fail_put: /* Avoid potential deadlock in freeing page under lru_lock */ if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } folio_put(folio); isolate_fail: if (!skip_on_failure && ret != -ENOMEM) continue; /* * We have isolated some pages, but then failed. Release them * instead of migrating, as we cannot form the cc->order buddy * page anyway. */ if (nr_isolated) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; nr_isolated = 0; } if (low_pfn < next_skip_pfn) { low_pfn = next_skip_pfn - 1; /* * The check near the loop beginning would have updated * next_skip_pfn too, but this is a bit simpler. */ next_skip_pfn += 1UL << cc->order; } if (ret == -ENOMEM) break; } /* * The PageBuddy() check could have potentially brought us outside * the range to be scanned. */ if (unlikely(low_pfn > end_pfn)) low_pfn = end_pfn; folio = NULL; isolate_abort: if (locked) unlock_page_lruvec_irqrestore(locked, flags); if (folio) { folio_set_lru(folio); folio_put(folio); } /* * Update the cached scanner pfn once the pageblock has been scanned. * Pages will either be migrated in which case there is no point * scanning in the near future or migration failed in which case the * failure reason may persist. The block is marked for skipping if * there were no pages isolated in the block or if the block is * rescanned twice in a row. */ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { if (!cc->no_set_skip_hint && valid_page && !skip_updated) set_pageblock_skip(valid_page); update_cached_migrate(cc, low_pfn); } trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, nr_scanned, nr_isolated); fatal_pending: cc->total_migrate_scanned += nr_scanned; if (nr_isolated) count_compact_events(COMPACTISOLATED, nr_isolated); cc->migrate_pfn = low_pfn; return ret; } /** * isolate_migratepages_range() - isolate migrate-able pages in a PFN range * @cc: Compaction control structure. * @start_pfn: The first PFN to start isolating. * @end_pfn: The one-past-last PFN. * * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM * in case we could not allocate a page, or 0. */ int isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn, block_start_pfn, block_end_pfn; int ret = 0; /* Scan block by block. First and last block may be incomplete */ pfn = start_pfn; block_start_pfn = pageblock_start_pfn(pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; block_end_pfn = pageblock_end_pfn(pfn); for (; pfn < end_pfn; pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { block_end_pfn = min(block_end_pfn, end_pfn); if (!pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone)) continue; ret = isolate_migratepages_block(cc, pfn, block_end_pfn, ISOLATE_UNEVICTABLE); if (ret) break; if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) break; } return ret; } #endif /* CONFIG_COMPACTION || CONFIG_CMA */ #ifdef CONFIG_COMPACTION static bool suitable_migration_source(struct compact_control *cc, struct page *page) { int block_mt; if (pageblock_skip_persistent(page)) return false; if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) return true; block_mt = get_pageblock_migratetype(page); if (cc->migratetype == MIGRATE_MOVABLE) return is_migrate_movable(block_mt); else return block_mt == cc->migratetype; } /* Returns true if the page is within a block suitable for migration to */ static bool suitable_migration_target(struct compact_control *cc, struct page *page) { /* If the page is a large free page, then disallow migration */ if (PageBuddy(page)) { /* * We are checking page_order without zone->lock taken. But * the only small danger is that we skip a potentially suitable * pageblock, so it's not worth to check order for valid range. */ if (buddy_order_unsafe(page) >= pageblock_order) return false; } if (cc->ignore_block_suitable) return true; /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ if (is_migrate_movable(get_pageblock_migratetype(page))) return true; /* Otherwise skip the block */ return false; } static inline unsigned int freelist_scan_limit(struct compact_control *cc) { unsigned short shift = BITS_PER_LONG - 1; return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; } /* * Test whether the free scanner has reached the same or lower pageblock than * the migration scanner, and compaction should thus terminate. */ static inline bool compact_scanners_met(struct compact_control *cc) { return (cc->free_pfn >> pageblock_order) <= (cc->migrate_pfn >> pageblock_order); } /* * Used when scanning for a suitable migration target which scans freelists * in reverse. Reorders the list such as the unscanned pages are scanned * first on the next iteration of the free scanner */ static void move_freelist_head(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); if (!list_is_first(&freepage->buddy_list, freelist)) { list_cut_before(&sublist, freelist, &freepage->buddy_list); list_splice_tail(&sublist, freelist); } } /* * Similar to move_freelist_head except used by the migration scanner * when scanning forward. It's possible for these list operations to * move against each other if they search the free list exactly in * lockstep. */ static void move_freelist_tail(struct list_head *freelist, struct page *freepage) { LIST_HEAD(sublist); if (!list_is_last(&freepage->buddy_list, freelist)) { list_cut_position(&sublist, freelist, &freepage->buddy_list); list_splice_tail(&sublist, freelist); } } static void fast_isolate_around(struct compact_control *cc, unsigned long pfn) { unsigned long start_pfn, end_pfn; struct page *page; /* Do not search around if there are enough pages already */ if (cc->nr_freepages >= cc->nr_migratepages) return; /* Minimise scanning during async compaction */ if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) return; /* Pageblock boundaries */ start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); if (!page) return; isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ if (start_pfn == end_pfn && !cc->no_set_skip_hint) set_pageblock_skip(page); } /* Search orders in round-robin fashion */ static int next_search_order(struct compact_control *cc, int order) { order--; if (order < 0) order = cc->order - 1; /* Search wrapped around? */ if (order == cc->search_order) { cc->search_order--; if (cc->search_order < 0) cc->search_order = cc->order - 1; return -1; } return order; } static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); unsigned int nr_scanned = 0, total_isolated = 0; unsigned long low_pfn, min_pfn, highest = 0; unsigned long nr_isolated = 0; unsigned long distance; struct page *page = NULL; bool scan_start = false; int order; /* Full compaction passes in a negative order */ if (cc->order <= 0) return; /* * If starting the scan, use a deeper search and use the highest * PFN found if a suitable one is not found. */ if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { limit = pageblock_nr_pages >> 1; scan_start = true; } /* * Preferred point is in the top quarter of the scan space but take * a pfn from the top half if the search is problematic. */ distance = (cc->free_pfn - cc->migrate_pfn); low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); if (WARN_ON_ONCE(min_pfn > low_pfn)) low_pfn = min_pfn; /* * Search starts from the last successful isolation order or the next * order to search after a previous failure */ cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); for (order = cc->search_order; !page && order >= 0; order = next_search_order(cc, order)) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; struct page *freepage; unsigned long flags; unsigned int order_scanned = 0; unsigned long high_pfn = 0; if (!area->nr_free) continue; spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; list_for_each_entry_reverse(freepage, freelist, buddy_list) { unsigned long pfn; order_scanned++; nr_scanned++; pfn = page_to_pfn(freepage); if (pfn >= highest) highest = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); if (pfn >= low_pfn) { cc->fast_search_fail = 0; cc->search_order = order; page = freepage; break; } if (pfn >= min_pfn && pfn > high_pfn) { high_pfn = pfn; /* Shorten the scan if a candidate is found */ limit >>= 1; } if (order_scanned >= limit) break; } /* Use a maximum candidate pfn if a preferred one was not found */ if (!page && high_pfn) { page = pfn_to_page(high_pfn); /* Update freepage for the list reorder below */ freepage = page; } /* Reorder to so a future search skips recent pages */ move_freelist_head(freelist, freepage); /* Isolate the page if available */ if (page) { if (__isolate_free_page(page, order)) { set_page_private(page, order); nr_isolated = 1 << order; nr_scanned += nr_isolated - 1; total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; list_add_tail(&page->lru, &cc->freepages); count_compact_events(COMPACTISOLATED, nr_isolated); } else { /* If isolation fails, abort the search */ order = cc->search_order + 1; page = NULL; } } spin_unlock_irqrestore(&cc->zone->lock, flags); /* Skip fast search if enough freepages isolated */ if (cc->nr_freepages >= cc->nr_migratepages) break; /* * Smaller scan on next order so the total scan is related * to freelist_scan_limit. */ if (order_scanned >= limit) limit = max(1U, limit >> 1); } trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, nr_scanned, total_isolated); if (!page) { cc->fast_search_fail++; if (scan_start) { /* * Use the highest PFN found above min. If one was * not found, be pessimistic for direct compaction * and use the min mark. */ if (highest >= min_pfn) { page = pfn_to_page(highest); cc->free_pfn = highest; } else { if (cc->direct_compaction && pfn_valid(min_pfn)) { page = pageblock_pfn_to_page(min_pfn, min(pageblock_end_pfn(min_pfn), zone_end_pfn(cc->zone)), cc->zone); if (page && !suitable_migration_target(cc, page)) page = NULL; cc->free_pfn = min_pfn; } } } } if (highest && highest >= cc->zone->compact_cached_free_pfn) { highest -= pageblock_nr_pages; cc->zone->compact_cached_free_pfn = highest; } cc->total_free_scanned += nr_scanned; if (!page) return; low_pfn = page_to_pfn(page); fast_isolate_around(cc, low_pfn); } /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. */ static void isolate_freepages(struct compact_control *cc) { struct zone *zone = cc->zone; struct page *page; unsigned long block_start_pfn; /* start of current pageblock */ unsigned long isolate_start_pfn; /* exact pfn we start at */ unsigned long block_end_pfn; /* end of current pageblock */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */ struct list_head *freelist = &cc->freepages; unsigned int stride; /* Try a small search of the free lists for a candidate */ fast_isolate_freepages(cc); if (cc->nr_freepages) goto splitmap; /* * Initialise the free scanner. The starting point is where we last * successfully isolated from, zone-cached value, or the end of the * zone when isolating for the first time. For looping we also need * this pfn aligned down to the pageblock boundary, because we do * block_start_pfn -= pageblock_nr_pages in the for loop. * For ending point, take care when isolating in last pageblock of a * zone which ends in the middle of a pageblock. * The low boundary is the end of the pageblock the migration scanner * is using. */ isolate_start_pfn = cc->free_pfn; block_start_pfn = pageblock_start_pfn(isolate_start_pfn); block_end_pfn = min(block_start_pfn + pageblock_nr_pages, zone_end_pfn(zone)); low_pfn = pageblock_end_pfn(cc->migrate_pfn); stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; /* * Isolate free pages until enough are available to migrate the * pages on cc->migratepages. We stop searching if the migrate * and free page scanners meet or enough free pages are isolated. */ for (; block_start_pfn >= low_pfn; block_end_pfn = block_start_pfn, block_start_pfn -= pageblock_nr_pages, isolate_start_pfn = block_start_pfn) { unsigned long nr_isolated; /* * This can iterate a massively long zone without finding any * suitable migration targets, so periodically check resched. */ if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone); if (!page) { unsigned long next_pfn; next_pfn = skip_offline_sections_reverse(block_start_pfn); if (next_pfn) block_start_pfn = max(next_pfn, low_pfn); continue; } /* Check the block is suitable for migration */ if (!suitable_migration_target(cc, page)) continue; /* If isolation recently failed, do not retry */ if (!isolation_suitable(cc, page)) continue; /* Found a block suitable for isolating free pages from. */ nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, freelist, stride, false); /* Update the skip hint if the full pageblock was scanned */ if (isolate_start_pfn == block_end_pfn) update_pageblock_skip(cc, page, block_start_pfn - pageblock_nr_pages); /* Are enough freepages isolated? */ if (cc->nr_freepages >= cc->nr_migratepages) { if (isolate_start_pfn >= block_end_pfn) { /* * Restart at previous pageblock if more * freepages can be isolated next time. */ isolate_start_pfn = block_start_pfn - pageblock_nr_pages; } break; } else if (isolate_start_pfn < block_end_pfn) { /* * If isolation failed early, do not continue * needlessly. */ break; } /* Adjust stride depending on isolation */ if (nr_isolated) { stride = 1; continue; } stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); } /* * Record where the free scanner will restart next time. Either we * broke from the loop and set isolate_start_pfn based on the last * call to isolate_freepages_block(), or we met the migration scanner * and the loop terminated due to isolate_start_pfn < low_pfn */ cc->free_pfn = isolate_start_pfn; splitmap: /* __isolate_free_page() does not map the pages */ split_map_pages(freelist); } /* * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; if (list_empty(&cc->freepages)) { isolate_freepages(cc); if (list_empty(&cc->freepages)) return NULL; } dst = list_entry(cc->freepages.next, struct folio, lru); list_del(&dst->lru); cc->nr_freepages--; return dst; } /* * This is a migrate-callback that "frees" freepages back to the isolated * freelist. All pages on the freelist are from the same zone, so there is no * special handling needed for NUMA. */ static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; list_add(&dst->lru, &cc->freepages); cc->nr_freepages++; } /* possible outcome of isolate_migratepages */ typedef enum { ISOLATE_ABORT, /* Abort compaction now */ ISOLATE_NONE, /* No pages isolated, continue scanning */ ISOLATE_SUCCESS, /* Pages isolated, migrate */ } isolate_migrate_t; /* * Allow userspace to control policy on scanning the unevictable LRU for * compactable pages. */ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; /* * Tunable for proactive compaction. It determines how * aggressively the kernel should compact memory in the * background. It takes values in the range [0, 100]. */ static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; static int sysctl_extfrag_threshold = 500; static int __read_mostly sysctl_compact_memory; static inline void update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) { if (cc->fast_start_pfn == ULONG_MAX) return; if (!cc->fast_start_pfn) cc->fast_start_pfn = pfn; cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); } static inline unsigned long reinit_migrate_pfn(struct compact_control *cc) { if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) return cc->migrate_pfn; cc->migrate_pfn = cc->fast_start_pfn; cc->fast_start_pfn = ULONG_MAX; return cc->migrate_pfn; } /* * Briefly search the free lists for a migration source that already has * some free pages to reduce the number of pages that need migration * before a pageblock is free. */ static unsigned long fast_find_migrateblock(struct compact_control *cc) { unsigned int limit = freelist_scan_limit(cc); unsigned int nr_scanned = 0; unsigned long distance; unsigned long pfn = cc->migrate_pfn; unsigned long high_pfn; int order; bool found_block = false; /* Skip hints are relied on to avoid repeats on the fast search */ if (cc->ignore_skip_hint) return pfn; /* * If the pageblock should be finished then do not select a different * pageblock. */ if (cc->finish_pageblock) return pfn; /* * If the migrate_pfn is not at the start of a zone or the start * of a pageblock then assume this is a continuation of a previous * scan restarted due to COMPACT_CLUSTER_MAX. */ if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) return pfn; /* * For smaller orders, just linearly scan as the number of pages * to migrate should be relatively small and does not necessarily * justify freeing up a large block for a small allocation. */ if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) return pfn; /* * Only allow kcompactd and direct requests for movable pages to * quickly clear out a MOVABLE pageblock for allocation. This * reduces the risk that a large movable pageblock is freed for * an unmovable/reclaimable small allocation. */ if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) return pfn; /* * When starting the migration scanner, pick any pageblock within the * first half of the search space. Otherwise try and pick a pageblock * within the first eighth to reduce the chances that a migration * target later becomes a source. */ distance = (cc->free_pfn - cc->migrate_pfn) >> 1; if (cc->migrate_pfn != cc->zone->zone_start_pfn) distance >>= 2; high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); for (order = cc->order - 1; order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; order--) { struct free_area *area = &cc->zone->free_area[order]; struct list_head *freelist; unsigned long flags; struct page *freepage; if (!area->nr_free) continue; spin_lock_irqsave(&cc->zone->lock, flags); freelist = &area->free_list[MIGRATE_MOVABLE]; list_for_each_entry(freepage, freelist, buddy_list) { unsigned long free_pfn; if (nr_scanned++ >= limit) { move_freelist_tail(freelist, freepage); break; } free_pfn = page_to_pfn(freepage); if (free_pfn < high_pfn) { /* * Avoid if skipped recently. Ideally it would * move to the tail but even safe iteration of * the list assumes an entry is deleted, not * reordered. */ if (get_pageblock_skip(freepage)) continue; /* Reorder to so a future search skips recent pages */ move_freelist_tail(freelist, freepage); update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); if (pfn < cc->zone->zone_start_pfn) pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; break; } } spin_unlock_irqrestore(&cc->zone->lock, flags); } cc->total_migrate_scanned += nr_scanned; /* * If fast scanning failed then use a cached entry for a page block * that had free pages as the basis for starting a linear scan. */ if (!found_block) { cc->fast_search_fail++; pfn = reinit_migrate_pfn(cc); } return pfn; } /* * Isolate all pages that can be migrated from the first suitable block, * starting at the block pointed to by the migrate scanner pfn within * compact_control. */ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) { unsigned long block_start_pfn; unsigned long block_end_pfn; unsigned long low_pfn; struct page *page; const isolate_mode_t isolate_mode = (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); bool fast_find_block; /* * Start at where we last stopped, or beginning of the zone as * initialized by compact_zone(). The first failure will use * the lowest PFN as the starting point for linear scanning. */ low_pfn = fast_find_migrateblock(cc); block_start_pfn = pageblock_start_pfn(low_pfn); if (block_start_pfn < cc->zone->zone_start_pfn) block_start_pfn = cc->zone->zone_start_pfn; /* * fast_find_migrateblock() has already ensured the pageblock is not * set with a skipped flag, so to avoid the isolation_suitable check * below again, check whether the fast search was successful. */ fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; /* Only scan within a pageblock boundary */ block_end_pfn = pageblock_end_pfn(low_pfn); /* * Iterate over whole pageblocks until we find the first suitable. * Do not cross the free scanner. */ for (; block_end_pfn <= cc->free_pfn; fast_find_block = false, cc->migrate_pfn = low_pfn = block_end_pfn, block_start_pfn = block_end_pfn, block_end_pfn += pageblock_nr_pages) { /* * This can potentially iterate a massively long zone with * many pageblocks unsuitable, so periodically check if we * need to schedule. */ if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone); if (!page) { unsigned long next_pfn; next_pfn = skip_offline_sections(block_start_pfn); if (next_pfn) block_end_pfn = min(next_pfn, cc->free_pfn); continue; } /* * If isolation recently failed, do not retry. Only check the * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock * to be visited multiple times. Assume skip was checked * before making it "skip" so other compaction instances do * not scan the same block. */ if ((pageblock_aligned(low_pfn) || low_pfn == cc->zone->zone_start_pfn) && !fast_find_block && !isolation_suitable(cc, page)) continue; /* * For async direct compaction, only scan the pageblocks of the * same migratetype without huge pages. Async direct compaction * is optimistic to see if the minimum amount of work satisfies * the allocation. The cached PFN is updated as it's possible * that all remaining blocks between source and target are * unsuitable and the compaction scanners fail to meet. */ if (!suitable_migration_source(cc, page)) { update_cached_migrate(cc, block_end_pfn); continue; } /* Perform the isolation */ if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, isolate_mode)) return ISOLATE_ABORT; /* * Either we isolated something and proceed with migration. Or * we failed and compact_zone should decide if we should * continue or not. */ break; } return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; } /* * order == -1 is expected when compacting proactively via * 1. /proc/sys/vm/compact_memory * 2. /sys/devices/system/node/nodex/compact * 3. /proc/sys/vm/compaction_proactiveness */ static inline bool is_via_compact_memory(int order) { return order == -1; } /* * Determine whether kswapd is (or recently was!) running on this node. * * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't * zero it. */ static bool kswapd_is_running(pg_data_t *pgdat) { bool running; pgdat_kswapd_lock(pgdat); running = pgdat->kswapd && task_is_running(pgdat->kswapd); pgdat_kswapd_unlock(pgdat); return running; } /* * A zone's fragmentation score is the external fragmentation wrt to the * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. */ static unsigned int fragmentation_score_zone(struct zone *zone) { return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); } /* * A weighted zone's fragmentation score is the external fragmentation * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It * returns a value in the range [0, 100]. * * The scaling factor ensures that proactive compaction focuses on larger * zones like ZONE_NORMAL, rather than smaller, specialized zones like * ZONE_DMA32. For smaller zones, the score value remains close to zero, * and thus never exceeds the high threshold for proactive compaction. */ static unsigned int fragmentation_score_zone_weighted(struct zone *zone) { unsigned long score; score = zone->present_pages * fragmentation_score_zone(zone); return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); } /* * The per-node proactive (background) compaction process is started by its * corresponding kcompactd thread when the node's fragmentation score * exceeds the high threshold. The compaction process remains active till * the node's score falls below the low threshold, or one of the back-off * conditions is met. */ static unsigned int fragmentation_score_node(pg_data_t *pgdat) { unsigned int score = 0; int zoneid; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { struct zone *zone; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; score += fragmentation_score_zone_weighted(zone); } return score; } static unsigned int fragmentation_score_wmark(bool low) { unsigned int wmark_low; /* * Cap the low watermark to avoid excessive compaction * activity in case a user sets the proactiveness tunable * close to 100 (maximum). */ wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); return low ? wmark_low : min(wmark_low + 10, 100U); } static bool should_proactive_compact_node(pg_data_t *pgdat) { int wmark_high; if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) return false; wmark_high = fragmentation_score_wmark(false); return fragmentation_score_node(pgdat) > wmark_high; } static enum compact_result __compact_finished(struct compact_control *cc) { unsigned int order; const int migratetype = cc->migratetype; int ret; /* Compaction run completes if the migrate and free scanner meet */ if (compact_scanners_met(cc)) { /* Let the next compaction start anew. */ reset_cached_positions(cc->zone); /* * Mark that the PG_migrate_skip information should be cleared * by kswapd when it goes to sleep. kcompactd does not set the * flag itself as the decision to be clear should be directly * based on an allocation request. */ if (cc->direct_compaction) cc->zone->compact_blockskip_flush = true; if (cc->whole_zone) return COMPACT_COMPLETE; else return COMPACT_PARTIAL_SKIPPED; } if (cc->proactive_compaction) { int score, wmark_low; pg_data_t *pgdat; pgdat = cc->zone->zone_pgdat; if (kswapd_is_running(pgdat)) return COMPACT_PARTIAL_SKIPPED; score = fragmentation_score_zone(cc->zone); wmark_low = fragmentation_score_wmark(true); if (score > wmark_low) ret = COMPACT_CONTINUE; else ret = COMPACT_SUCCESS; goto out; } if (is_via_compact_memory(cc->order)) return COMPACT_CONTINUE; /* * Always finish scanning a pageblock to reduce the possibility of * fallbacks in the future. This is particularly important when * migration source is unmovable/reclaimable but it's not worth * special casing. */ if (!pageblock_aligned(cc->migrate_pfn)) return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ ret = COMPACT_NO_SUITABLE_PAGE; for (order = cc->order; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &cc->zone->free_area[order]; bool can_steal; /* Job done if page is free of the right migratetype */ if (!free_area_empty(area, migratetype)) return COMPACT_SUCCESS; #ifdef CONFIG_CMA /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ if (migratetype == MIGRATE_MOVABLE && !free_area_empty(area, MIGRATE_CMA)) return COMPACT_SUCCESS; #endif /* * Job done if allocation would steal freepages from * other migratetype buddy lists. */ if (find_suitable_fallback(area, order, migratetype, true, &can_steal) != -1) /* * Movable pages are OK in any pageblock. If we are * stealing for a non-movable allocation, make sure * we finish compacting the current pageblock first * (which is assured by the above migrate_pfn align * check) so it is as free as possible and we won't * have to steal another one soon. */ return COMPACT_SUCCESS; } out: if (cc->contended || fatal_signal_pending(current)) ret = COMPACT_CONTENDED; return ret; } static enum compact_result compact_finished(struct compact_control *cc) { int ret; ret = __compact_finished(cc); trace_mm_compaction_finished(cc->zone, cc->order, ret); if (ret == COMPACT_NO_SUITABLE_PAGE) ret = COMPACT_CONTINUE; return ret; } static bool __compaction_suitable(struct zone *zone, int order, int highest_zoneidx, unsigned long wmark_target) { unsigned long watermark; /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the * watermark and alloc_flags have to match, or be more pessimistic than * the check in __isolate_free_page(). We don't use the direct * compactor's alloc_flags, as they are not relevant for freepage * isolation. We however do use the direct compactor's highest_zoneidx * to skip over zones where lowmem reserves would prevent allocation * even if compaction succeeds. * For costly orders, we require low watermark instead of min for * compaction to proceed to increase its chances. * ALLOC_CMA is used, as pages in CMA pageblocks are considered * suitable migration targets */ watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, ALLOC_CMA, wmark_target); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? */ bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { enum compact_result compact_result; bool suitable; suitable = __compaction_suitable(zone, order, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation * * index of -1000 would imply allocations might succeed depending on * watermarks, but we already failed the high-order watermark check * index towards 0 implies failure is due to lack of memory * index towards 1000 implies failure is due to fragmentation * * Only compact if a failure would be due to fragmentation. Also * ignore fragindex for non-costly orders where the alternative to * a successful reclaim/compaction is OOM. Fragindex and the * vm.extfrag_threshold sysctl is meant as a heuristic to prevent * excessive compaction for costly orders, but it should not be at the * expense of system stability. */ if (suitable) { compact_result = COMPACT_CONTINUE; if (order > PAGE_ALLOC_COSTLY_ORDER) { int fragindex = fragmentation_index(zone, order); if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) { suitable = false; compact_result = COMPACT_NOT_SUITABLE_ZONE; } } } else { compact_result = COMPACT_SKIPPED; } trace_mm_compaction_suitable(zone, order, compact_result); return suitable; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags) { struct zone *zone; struct zoneref *z; /* * Make sure at least one zone would pass __compaction_suitable if we continue * retrying the reclaim. */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; /* * Do not consider all the reclaimable memory because we do not * want to trash just for a single high order allocation which * is even not guaranteed to appear even if __compaction_suitable * is happy about the watermark check. */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); if (__compaction_suitable(zone, order, ac->highest_zoneidx, available)) return true; } return false; } /* * Should we do compaction for target allocation order. * Return COMPACT_SUCCESS if allocation for target order can be already * satisfied * Return COMPACT_SKIPPED if compaction for target order is likely to fail * Return COMPACT_CONTINUE if compaction for target order should be ran */ static enum compact_result compaction_suit_allocation_order(struct zone *zone, unsigned int order, int highest_zoneidx, unsigned int alloc_flags) { unsigned long watermark; watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, alloc_flags)) return COMPACT_SUCCESS; if (!compaction_suitable(zone, order, highest_zoneidx)) return COMPACT_SKIPPED; return COMPACT_CONTINUE; } static enum compact_result compact_zone(struct compact_control *cc, struct capture_control *capc) { enum compact_result ret; unsigned long start_pfn = cc->zone->zone_start_pfn; unsigned long end_pfn = zone_end_pfn(cc->zone); unsigned long last_migrated_pfn; const bool sync = cc->mode != MIGRATE_ASYNC; bool update_cached; unsigned int nr_succeeded = 0; /* * These counters track activities during zone compaction. Initialize * them before compacting a new zone. */ cc->total_migrate_scanned = 0; cc->total_free_scanned = 0; cc->nr_migratepages = 0; cc->nr_freepages = 0; INIT_LIST_HEAD(&cc->freepages); INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); if (!is_via_compact_memory(cc->order)) { ret = compaction_suit_allocation_order(cc->zone, cc->order, cc->highest_zoneidx, cc->alloc_flags); if (ret != COMPACT_CONTINUE) return ret; } /* * Clear pageblock skip if there were failures recently and compaction * is about to be retried after being deferred. */ if (compaction_restarting(cc->zone, cc->order)) __reset_isolation_suitable(cc->zone); /* * Setup to move all movable pages to the end of the zone. Used cached * information on where the scanners should start (unless we explicitly * want to compact the whole zone), but check that it is initialised * by ensuring the values are within zone boundaries. */ cc->fast_start_pfn = 0; if (cc->whole_zone) { cc->migrate_pfn = start_pfn; cc->free_pfn = pageblock_start_pfn(end_pfn - 1); } else { cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; cc->free_pfn = cc->zone->compact_cached_free_pfn; if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { cc->free_pfn = pageblock_start_pfn(end_pfn - 1); cc->zone->compact_cached_free_pfn = cc->free_pfn; } if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { cc->migrate_pfn = start_pfn; cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; } if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) cc->whole_zone = true; } last_migrated_pfn = 0; /* * Migrate has separate cached PFNs for ASYNC and SYNC* migration on * the basis that some migrations will fail in ASYNC mode. However, * if the cached PFNs match and pageblocks are skipped due to having * no isolation candidates, then the sync state does not matter. * Until a pageblock with isolation candidates is found, keep the * cached PFNs in sync to avoid revisiting the same blocks. */ update_cached = !sync && cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); /* lru_add_drain_all could be expensive with involving other CPUs */ lru_add_drain(); while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { int err; unsigned long iteration_start_pfn = cc->migrate_pfn; /* * Avoid multiple rescans of the same pageblock which can * happen if a page cannot be isolated (dirty/writeback in * async mode) or if the migrated pages are being allocated * before the pageblock is cleared. The first rescan will * capture the entire pageblock for migration. If it fails, * it'll be marked skip and scanning will proceed as normal. */ cc->finish_pageblock = false; if (pageblock_start_pfn(last_migrated_pfn) == pageblock_start_pfn(iteration_start_pfn)) { cc->finish_pageblock = true; } rescan: switch (isolate_migratepages(cc)) { case ISOLATE_ABORT: ret = COMPACT_CONTENDED; putback_movable_pages(&cc->migratepages); cc->nr_migratepages = 0; goto out; case ISOLATE_NONE: if (update_cached) { cc->zone->compact_cached_migrate_pfn[1] = cc->zone->compact_cached_migrate_pfn[0]; } /* * We haven't isolated and migrated anything, but * there might still be unflushed migrations from * previous cc->order aligned block. */ goto check_drain; case ISOLATE_SUCCESS: update_cached = false; last_migrated_pfn = max(cc->zone->zone_start_pfn, pageblock_start_pfn(cc->migrate_pfn - 1)); } err = migrate_pages(&cc->migratepages, compaction_alloc, compaction_free, (unsigned long)cc, cc->mode, MR_COMPACTION, &nr_succeeded); trace_mm_compaction_migratepages(cc, nr_succeeded); /* All pages were either migrated or will be released */ cc->nr_migratepages = 0; if (err) { putback_movable_pages(&cc->migratepages); /* * migrate_pages() may return -ENOMEM when scanners meet * and we want compact_finished() to detect it */ if (err == -ENOMEM && !compact_scanners_met(cc)) { ret = COMPACT_CONTENDED; goto out; } /* * If an ASYNC or SYNC_LIGHT fails to migrate a page * within the pageblock_order-aligned block and * fast_find_migrateblock may be used then scan the * remainder of the pageblock. This will mark the * pageblock "skip" to avoid rescanning in the near * future. This will isolate more pages than necessary * for the request but avoid loops due to * fast_find_migrateblock revisiting blocks that were * recently partially scanned. */ if (!pageblock_aligned(cc->migrate_pfn) && !cc->ignore_skip_hint && !cc->finish_pageblock && (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; /* * Draining pcplists does not help THP if * any page failed to migrate. Even after * drain, the pageblock will not be free. */ if (cc->order == COMPACTION_HPAGE_ORDER) last_migrated_pfn = 0; goto rescan; } } /* Stop if a page has been captured */ if (capc && capc->page) { ret = COMPACT_SUCCESS; break; } check_drain: /* * Has the migration scanner moved away from the previous * cc->order aligned block where we migrated from? If yes, * flush the pages that were freed, so that they can merge and * compact_finished() can detect immediately if allocation * would succeed. */ if (cc->order > 0 && last_migrated_pfn) { unsigned long current_block_start = block_start_pfn(cc->migrate_pfn, cc->order); if (last_migrated_pfn < current_block_start) { lru_add_drain_cpu_zone(cc->zone); /* No more flushing until we migrate again */ last_migrated_pfn = 0; } } } out: /* * Release free pages and update where the free scanner should restart, * so we don't leave any returned pages behind in the next attempt. */ if (cc->nr_freepages > 0) { unsigned long free_pfn = release_freepages(&cc->freepages); cc->nr_freepages = 0; VM_BUG_ON(free_pfn == 0); /* The cached pfn is always the first in a pageblock */ free_pfn = pageblock_start_pfn(free_pfn); /* * Only go back, not forward. The cached pfn might have been * already reset to zone end in compact_finished() */ if (free_pfn > cc->zone->compact_cached_free_pfn) cc->zone->compact_cached_free_pfn = free_pfn; } count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); VM_BUG_ON(!list_empty(&cc->freepages)); VM_BUG_ON(!list_empty(&cc->migratepages)); return ret; } static enum compact_result compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, enum compact_priority prio, unsigned int alloc_flags, int highest_zoneidx, struct page **capture) { enum compact_result ret; struct compact_control cc = { .order = order, .search_order = order, .gfp_mask = gfp_mask, .zone = zone, .mode = (prio == COMPACT_PRIO_ASYNC) ? MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, .alloc_flags = alloc_flags, .highest_zoneidx = highest_zoneidx, .direct_compaction = true, .whole_zone = (prio == MIN_COMPACT_PRIORITY), .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) }; struct capture_control capc = { .cc = &cc, .page = NULL, }; /* * Make sure the structs are really initialized before we expose the * capture control, in case we are interrupted and the interrupt handler * frees a page. */ barrier(); WRITE_ONCE(current->capture_control, &capc); ret = compact_zone(&cc, &capc); /* * Make sure we hide capture control first before we read the captured * page pointer, otherwise an interrupt could free and capture a page * and we would leak it. */ WRITE_ONCE(current->capture_control, NULL); *capture = READ_ONCE(capc.page); /* * Technically, it is also possible that compaction is skipped but * the page is still captured out of luck(IRQ came and freed the page). * Returning COMPACT_SUCCESS in such cases helps in properly accounting * the COMPACT[STALL|FAIL] when compaction is skipped. */ if (*capture) ret = COMPACT_SUCCESS; return ret; } /** * try_to_compact_pages - Direct compact to satisfy a high-order allocation * @gfp_mask: The GFP mask of the current allocation * @order: The order of the current allocation * @alloc_flags: The allocation flags of the current allocation * @ac: The context of current allocation * @prio: Determines how hard direct compaction should try to succeed * @capture: Pointer to free page created by compaction will be stored here * * This is the main entry point for direct page compaction. */ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) { int may_perform_io = (__force int)(gfp_mask & __GFP_IO); struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; /* * Check if the GFP flags allow compaction - GFP_NOIO is really * tricky context because the migration might require IO */ if (!may_perform_io) return COMPACT_SKIPPED; trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { enum compact_result status; if (prio > MIN_COMPACT_PRIORITY && compaction_deferred(zone, order)) { rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); continue; } status = compact_zone_order(zone, order, gfp_mask, prio, alloc_flags, ac->highest_zoneidx, capture); rc = max(status, rc); /* The allocation should succeed, stop compacting */ if (status == COMPACT_SUCCESS) { /* * We think the allocation will succeed in this zone, * but it is not certain, hence the false. The caller * will repeat this with true if allocation indeed * succeeds in this zone. */ compaction_defer_reset(zone, order, false); break; } if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || status == COMPACT_PARTIAL_SKIPPED)) /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up * succeeding after all, it will be reset. */ defer_compaction(zone, order); /* * We might have stopped compacting due to need_resched() in * async compaction, or due to a fatal signal detected. In that * case do not try further zones */ if ((prio == COMPACT_PRIO_ASYNC && need_resched()) || fatal_signal_pending(current)) break; } return rc; } /* * Compact all zones within a node till each zone's fragmentation score * reaches within proactive compaction thresholds (as determined by the * proactiveness tunable). * * It is possible that the function returns before reaching score targets * due to various back-off conditions, such as, contention on per-node or * per-zone locks. */ static void proactive_compact_node(pg_data_t *pgdat) { int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, .proactive_compaction = true, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; cc.zone = zone; compact_zone(&cc, NULL); count_compact_events(KCOMPACTD_MIGRATE_SCANNED, cc.total_migrate_scanned); count_compact_events(KCOMPACTD_FREE_SCANNED, cc.total_free_scanned); } } /* Compact all zones within a node */ static void compact_node(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int zoneid; struct zone *zone; struct compact_control cc = { .order = -1, .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .whole_zone = true, .gfp_mask = GFP_KERNEL, }; for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; cc.zone = zone; compact_zone(&cc, NULL); } } /* Compact all nodes in the system */ static void compact_nodes(void) { int nid; /* Flush pending updates to the LRU lists */ lru_add_drain_all(); for_each_online_node(nid) compact_node(nid); } static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc, nid; rc = proc_dointvec_minmax(table, write, buffer, length, ppos); if (rc) return rc; if (write && sysctl_compaction_proactiveness) { for_each_online_node(nid) { pg_data_t *pgdat = NODE_DATA(nid); if (pgdat->proactive_compact_trigger) continue; pgdat->proactive_compact_trigger = true; trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, pgdat->nr_zones - 1); wake_up_interruptible(&pgdat->kcompactd_wait); } } return 0; } /* * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory */ static int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, length, ppos); if (ret) return ret; if (sysctl_compact_memory != 1) return -EINVAL; if (write) compact_nodes(); return 0; } #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) static ssize_t compact_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nid = dev->id; if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { /* Flush pending updates to the LRU lists */ lru_add_drain_all(); compact_node(nid); } return count; } static DEVICE_ATTR_WO(compact); int compaction_register_node(struct node *node) { return device_create_file(&node->dev, &dev_attr_compact); } void compaction_unregister_node(struct node *node) { device_remove_file(&node->dev, &dev_attr_compact); } #endif /* CONFIG_SYSFS && CONFIG_NUMA */ static inline bool kcompactd_work_requested(pg_data_t *pgdat) { return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || pgdat->proactive_compact_trigger; } static bool kcompactd_node_suitable(pg_data_t *pgdat) { int zoneid; struct zone *zone; enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; enum compact_result ret; for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; ret = compaction_suit_allocation_order(zone, pgdat->kcompactd_max_order, highest_zoneidx, ALLOC_WMARK_MIN); if (ret == COMPACT_CONTINUE) return true; } return false; } static void kcompactd_do_work(pg_data_t *pgdat) { /* * With no special task, compact all zones so that a page of requested * order is allocatable. */ int zoneid; struct zone *zone; struct compact_control cc = { .order = pgdat->kcompactd_max_order, .search_order = pgdat->kcompactd_max_order, .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, .mode = MIGRATE_SYNC_LIGHT, .ignore_skip_hint = false, .gfp_mask = GFP_KERNEL, }; enum compact_result ret; trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, cc.highest_zoneidx); count_compact_event(KCOMPACTD_WAKE); for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { int status; zone = &pgdat->node_zones[zoneid]; if (!populated_zone(zone)) continue; if (compaction_deferred(zone, cc.order)) continue; ret = compaction_suit_allocation_order(zone, cc.order, zoneid, ALLOC_WMARK_MIN); if (ret != COMPACT_CONTINUE) continue; if (kthread_should_stop()) return; cc.zone = zone; status = compact_zone(&cc, NULL); if (status == COMPACT_SUCCESS) { compaction_defer_reset(zone, cc.order, false); } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { /* * Buddy pages may become stranded on pcps that could * otherwise coalesce on the zone's free area for * order >= cc.order. This is ratelimited by the * upcoming deferral. */ drain_all_pages(zone); /* * We use sync migration mode here, so we defer like * sync direct compaction does. */ defer_compaction(zone, cc.order); } count_compact_events(KCOMPACTD_MIGRATE_SCANNED, cc.total_migrate_scanned); count_compact_events(KCOMPACTD_FREE_SCANNED, cc.total_free_scanned); } /* * Regardless of success, we are done until woken up next. But remember * the requested order/highest_zoneidx in case it was higher/tighter * than our current ones */ if (pgdat->kcompactd_max_order <= cc.order) pgdat->kcompactd_max_order = 0; if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; } void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) { if (!order) return; if (pgdat->kcompactd_max_order < order) pgdat->kcompactd_max_order = order; if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) pgdat->kcompactd_highest_zoneidx = highest_zoneidx; /* * Pairs with implicit barrier in wait_event_freezable() * such that wakeups are not missed. */ if (!wq_has_sleeper(&pgdat->kcompactd_wait)) return; if (!kcompactd_node_suitable(pgdat)) return; trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, highest_zoneidx); wake_up_interruptible(&pgdat->kcompactd_wait); } /* * The background compaction daemon, started as a kernel thread * from the init process. */ static int kcompactd(void *p) { pg_data_t *pgdat = (pg_data_t *)p; struct task_struct *tsk = current; long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); long timeout = default_timeout; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); set_freezable(); pgdat->kcompactd_max_order = 0; pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; while (!kthread_should_stop()) { unsigned long pflags; /* * Avoid the unnecessary wakeup for proactive compaction * when it is disabled. */ if (!sysctl_compaction_proactiveness) timeout = MAX_SCHEDULE_TIMEOUT; trace_mm_compaction_kcompactd_sleep(pgdat->node_id); if (wait_event_freezable_timeout(pgdat->kcompactd_wait, kcompactd_work_requested(pgdat), timeout) && !pgdat->proactive_compact_trigger) { psi_memstall_enter(&pflags); kcompactd_do_work(pgdat); psi_memstall_leave(&pflags); /* * Reset the timeout value. The defer timeout from * proactive compaction is lost here but that is fine * as the condition of the zone changing substantionally * then carrying on with the previous defer interval is * not useful. */ timeout = default_timeout; continue; } /* * Start the proactive work with default timeout. Based * on the fragmentation score, this timeout is updated. */ timeout = default_timeout; if (should_proactive_compact_node(pgdat)) { unsigned int prev_score, score; prev_score = fragmentation_score_node(pgdat); proactive_compact_node(pgdat); score = fragmentation_score_node(pgdat); /* * Defer proactive compaction if the fragmentation * score did not go down i.e. no progress made. */ if (unlikely(score >= prev_score)) timeout = default_timeout << COMPACT_MAX_DEFER_SHIFT; } if (unlikely(pgdat->proactive_compact_trigger)) pgdat->proactive_compact_trigger = false; } return 0; } /* * This kcompactd start function will be called by init and node-hot-add. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. */ void __meminit kcompactd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); if (pgdat->kcompactd) return; pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); if (IS_ERR(pgdat->kcompactd)) { pr_err("Failed to start kcompactd on node %d\n", nid); pgdat->kcompactd = NULL; } } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ void __meminit kcompactd_stop(int nid) { struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; if (kcompactd) { kthread_stop(kcompactd); NODE_DATA(nid)->kcompactd = NULL; } } /* * It's optimal to keep kcompactd on the same CPUs as their memory, but * not required for correctness. So if the last cpu in a node goes * away, we get changed to run anywhere: as the first one comes back, * restore their cpu bindings. */ static int kcompactd_cpu_online(unsigned int cpu) { int nid; for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ if (pgdat->kcompactd) set_cpus_allowed_ptr(pgdat->kcompactd, mask); } return 0; } static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret, old; if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write) return proc_dointvec_minmax(table, write, buffer, lenp, ppos); old = *(int *)table->data; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret) return ret; if (old != *(int *)table->data) pr_warn_once("sysctl attribute %s changed by %s[%d]\n", table->procname, current->comm, task_pid_nr(current)); return ret; } static struct ctl_table vm_compaction[] = { { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "compaction_proactiveness", .data = &sysctl_compaction_proactiveness, .maxlen = sizeof(sysctl_compaction_proactiveness), .mode = 0644, .proc_handler = compaction_proactiveness_sysctl_handler, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_HUNDRED, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE_THOUSAND, }, { .procname = "compact_unevictable_allowed", .data = &sysctl_compact_unevictable_allowed, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_warn_RT_change, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, { } }; static int __init kcompactd_init(void) { int nid; int ret; ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/compaction:online", kcompactd_cpu_online, NULL); if (ret < 0) { pr_err("kcompactd: failed to register hotplug callbacks.\n"); return ret; } for_each_node_state(nid, N_MEMORY) kcompactd_run(nid); register_sysctl_init("vm", vm_compaction); return 0; } subsys_initcall(kcompactd_init) #endif /* CONFIG_COMPACTION */ |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include <linux/backing-dev.h> #include <linux/dax.h> #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_log_recover.h" #include "xfs_log_priv.h" #include "xfs_trans.h" #include "xfs_buf_item.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_ag.h" struct kmem_cache *xfs_buf_cache; /* * Locking orders * * xfs_buf_ioacct_inc: * xfs_buf_ioacct_dec: * b_sema (caller holds) * b_lock * * xfs_buf_stale: * b_sema (caller holds) * b_lock * lru_lock * * xfs_buf_rele: * b_lock * pag_buf_lock * lru_lock * * xfs_buftarg_drain_rele * lru_lock * b_lock (trylock due to inversion) * * xfs_buftarg_isolate * lru_lock * b_lock (trylock due to inversion) */ static int __xfs_buf_submit(struct xfs_buf *bp, bool wait); static inline int xfs_buf_submit( struct xfs_buf *bp) { return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC)); } static inline int xfs_buf_is_vmapped( struct xfs_buf *bp) { /* * Return true if the buffer is vmapped. * * b_addr is null if the buffer is not mapped, but the code is clever * enough to know it doesn't have to map a single page, so the check has * to be both for b_addr and bp->b_page_count > 1. */ return bp->b_addr && bp->b_page_count > 1; } static inline int xfs_buf_vmap_len( struct xfs_buf *bp) { return (bp->b_page_count * PAGE_SIZE); } /* * Bump the I/O in flight count on the buftarg if we haven't yet done so for * this buffer. The count is incremented once per buffer (per hold cycle) * because the corresponding decrement is deferred to buffer release. Buffers * can undergo I/O multiple times in a hold-release cycle and per buffer I/O * tracking adds unnecessary overhead. This is used for sychronization purposes * with unmount (see xfs_buftarg_drain()), so all we really need is a count of * in-flight buffers. * * Buffers that are never released (e.g., superblock, iclog buffers) must set * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count * never reaches zero and unmount hangs indefinitely. */ static inline void xfs_buf_ioacct_inc( struct xfs_buf *bp) { if (bp->b_flags & XBF_NO_IOACCT) return; ASSERT(bp->b_flags & XBF_ASYNC); spin_lock(&bp->b_lock); if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { bp->b_state |= XFS_BSTATE_IN_FLIGHT; percpu_counter_inc(&bp->b_target->bt_io_count); } spin_unlock(&bp->b_lock); } /* * Clear the in-flight state on a buffer about to be released to the LRU or * freed and unaccount from the buftarg. */ static inline void __xfs_buf_ioacct_dec( struct xfs_buf *bp) { lockdep_assert_held(&bp->b_lock); if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; percpu_counter_dec(&bp->b_target->bt_io_count); } } static inline void xfs_buf_ioacct_dec( struct xfs_buf *bp) { spin_lock(&bp->b_lock); __xfs_buf_ioacct_dec(bp); spin_unlock(&bp->b_lock); } /* * When we mark a buffer stale, we remove the buffer from the LRU and clear the * b_lru_ref count so that the buffer is freed immediately when the buffer * reference count falls to zero. If the buffer is already on the LRU, we need * to remove the reference that LRU holds on the buffer. * * This prevents build-up of stale buffers on the LRU. */ void xfs_buf_stale( struct xfs_buf *bp) { ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_STALE; /* * Clear the delwri status so that a delwri queue walker will not * flush this buffer to disk now that it is stale. The delwri queue has * a reference to the buffer, so this is safe to do. */ bp->b_flags &= ~_XBF_DELWRI_Q; /* * Once the buffer is marked stale and unlocked, a subsequent lookup * could reset b_flags. There is no guarantee that the buffer is * unaccounted (released to LRU) before that occurs. Drop in-flight * status now to preserve accounting consistency. */ spin_lock(&bp->b_lock); __xfs_buf_ioacct_dec(bp); atomic_set(&bp->b_lru_ref, 0); if (!(bp->b_state & XFS_BSTATE_DISPOSE) && (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru))) atomic_dec(&bp->b_hold); ASSERT(atomic_read(&bp->b_hold) >= 1); spin_unlock(&bp->b_lock); } static int xfs_buf_get_maps( struct xfs_buf *bp, int map_count) { ASSERT(bp->b_maps == NULL); bp->b_map_count = map_count; if (map_count == 1) { bp->b_maps = &bp->__b_map; return 0; } bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), KM_NOFS); if (!bp->b_maps) return -ENOMEM; return 0; } /* * Frees b_pages if it was allocated. */ static void xfs_buf_free_maps( struct xfs_buf *bp) { if (bp->b_maps != &bp->__b_map) { kmem_free(bp->b_maps); bp->b_maps = NULL; } } static int _xfs_buf_alloc( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp) { struct xfs_buf *bp; int error; int i; *bpp = NULL; bp = kmem_cache_zalloc(xfs_buf_cache, GFP_NOFS | __GFP_NOFAIL); /* * We don't want certain flags to appear in b_flags unless they are * specifically set by later operations on the buffer. */ flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_lru_ref, 1); init_completion(&bp->b_iowait); INIT_LIST_HEAD(&bp->b_lru); INIT_LIST_HEAD(&bp->b_list); INIT_LIST_HEAD(&bp->b_li_list); sema_init(&bp->b_sema, 0); /* held, no waiters */ spin_lock_init(&bp->b_lock); bp->b_target = target; bp->b_mount = target->bt_mount; bp->b_flags = flags; /* * Set length and io_length to the same value initially. * I/O routines should use io_length, which will be the same in * most cases but may be reset (e.g. XFS recovery). */ error = xfs_buf_get_maps(bp, nmaps); if (error) { kmem_cache_free(xfs_buf_cache, bp); return error; } bp->b_rhash_key = map[0].bm_bn; bp->b_length = 0; for (i = 0; i < nmaps; i++) { bp->b_maps[i].bm_bn = map[i].bm_bn; bp->b_maps[i].bm_len = map[i].bm_len; bp->b_length += map[i].bm_len; } atomic_set(&bp->b_pin_count, 0); init_waitqueue_head(&bp->b_waiters); XFS_STATS_INC(bp->b_mount, xb_create); trace_xfs_buf_init(bp, _RET_IP_); *bpp = bp; return 0; } static void xfs_buf_free_pages( struct xfs_buf *bp) { uint i; ASSERT(bp->b_flags & _XBF_PAGES); if (xfs_buf_is_vmapped(bp)) vm_unmap_ram(bp->b_addr, bp->b_page_count); for (i = 0; i < bp->b_page_count; i++) { if (bp->b_pages[i]) __free_page(bp->b_pages[i]); } mm_account_reclaimed_pages(bp->b_page_count); if (bp->b_pages != bp->b_page_array) kmem_free(bp->b_pages); bp->b_pages = NULL; bp->b_flags &= ~_XBF_PAGES; } static void xfs_buf_free_callback( struct callback_head *cb) { struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu); xfs_buf_free_maps(bp); kmem_cache_free(xfs_buf_cache, bp); } static void xfs_buf_free( struct xfs_buf *bp) { trace_xfs_buf_free(bp, _RET_IP_); ASSERT(list_empty(&bp->b_lru)); if (bp->b_flags & _XBF_PAGES) xfs_buf_free_pages(bp); else if (bp->b_flags & _XBF_KMEM) kmem_free(bp->b_addr); call_rcu(&bp->b_rcu, xfs_buf_free_callback); } static int xfs_buf_alloc_kmem( struct xfs_buf *bp, xfs_buf_flags_t flags) { xfs_km_flags_t kmflag_mask = KM_NOFS; size_t size = BBTOB(bp->b_length); /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) kmflag_mask |= KM_ZERO; bp->b_addr = kmem_alloc(size, kmflag_mask); if (!bp->b_addr) return -ENOMEM; if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != ((unsigned long)bp->b_addr & PAGE_MASK)) { /* b_addr spans two pages - use alloc_page instead */ kmem_free(bp->b_addr); bp->b_addr = NULL; return -ENOMEM; } bp->b_offset = offset_in_page(bp->b_addr); bp->b_pages = bp->b_page_array; bp->b_pages[0] = kmem_to_page(bp->b_addr); bp->b_page_count = 1; bp->b_flags |= _XBF_KMEM; return 0; } static int xfs_buf_alloc_pages( struct xfs_buf *bp, xfs_buf_flags_t flags) { gfp_t gfp_mask = __GFP_NOWARN; long filled = 0; if (flags & XBF_READ_AHEAD) gfp_mask |= __GFP_NORETRY; else gfp_mask |= GFP_NOFS; /* Make sure that we have a page list */ bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE); if (bp->b_page_count <= XB_PAGES) { bp->b_pages = bp->b_page_array; } else { bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count, gfp_mask); if (!bp->b_pages) return -ENOMEM; } bp->b_flags |= _XBF_PAGES; /* Assure zeroed buffer for non-read cases. */ if (!(flags & XBF_READ)) gfp_mask |= __GFP_ZERO; /* * Bulk filling of pages can take multiple calls. Not filling the entire * array is not an allocation failure, so don't back off if we get at * least one extra page. */ for (;;) { long last = filled; filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count, bp->b_pages); if (filled == bp->b_page_count) { XFS_STATS_INC(bp->b_mount, xb_page_found); break; } if (filled != last) continue; if (flags & XBF_READ_AHEAD) { xfs_buf_free_pages(bp); return -ENOMEM; } XFS_STATS_INC(bp->b_mount, xb_page_retries); memalloc_retry_wait(gfp_mask); } return 0; } /* * Map buffer into kernel address-space if necessary. */ STATIC int _xfs_buf_map_pages( struct xfs_buf *bp, xfs_buf_flags_t flags) { ASSERT(bp->b_flags & _XBF_PAGES); if (bp->b_page_count == 1) { /* A single page buffer is always mappable */ bp->b_addr = page_address(bp->b_pages[0]); } else if (flags & XBF_UNMAPPED) { bp->b_addr = NULL; } else { int retried = 0; unsigned nofs_flag; /* * vm_map_ram() will allocate auxiliary structures (e.g. * pagetables) with GFP_KERNEL, yet we are likely to be under * GFP_NOFS context here. Hence we need to tell memory reclaim * that we are in such a context via PF_MEMALLOC_NOFS to prevent * memory reclaim re-entering the filesystem here and * potentially deadlocking. */ nofs_flag = memalloc_nofs_save(); do { bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, -1); if (bp->b_addr) break; vm_unmap_aliases(); } while (retried++ <= 1); memalloc_nofs_restore(nofs_flag); if (!bp->b_addr) return -ENOMEM; } return 0; } /* * Finding and Reading Buffers */ static int _xfs_buf_obj_cmp( struct rhashtable_compare_arg *arg, const void *obj) { const struct xfs_buf_map *map = arg->key; const struct xfs_buf *bp = obj; /* * The key hashing in the lookup path depends on the key being the * first element of the compare_arg, make sure to assert this. */ BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); if (bp->b_rhash_key != map->bm_bn) return 1; if (unlikely(bp->b_length != map->bm_len)) { /* * found a block number match. If the range doesn't * match, the only way this is allowed is if the buffer * in the cache is stale and the transaction that made * it stale has not yet committed. i.e. we are * reallocating a busy extent. Skip this buffer and * continue searching for an exact match. */ if (!(map->bm_flags & XBM_LIVESCAN)) ASSERT(bp->b_flags & XBF_STALE); return 1; } return 0; } static const struct rhashtable_params xfs_buf_hash_params = { .min_size = 32, /* empty AGs have minimal footprint */ .nelem_hint = 16, .key_len = sizeof(xfs_daddr_t), .key_offset = offsetof(struct xfs_buf, b_rhash_key), .head_offset = offsetof(struct xfs_buf, b_rhash_head), .automatic_shrinking = true, .obj_cmpfn = _xfs_buf_obj_cmp, }; int xfs_buf_hash_init( struct xfs_perag *pag) { spin_lock_init(&pag->pag_buf_lock); return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params); } void xfs_buf_hash_destroy( struct xfs_perag *pag) { rhashtable_destroy(&pag->pag_buf_hash); } static int xfs_buf_map_verify( struct xfs_buftarg *btp, struct xfs_buf_map *map) { xfs_daddr_t eofs; /* Check for IOs smaller than the sector size / not sector aligned */ ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize)); ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); /* * Corrupted block numbers can get through to here, unfortunately, so we * have to check that the buffer falls within the filesystem bounds. */ eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); if (map->bm_bn < 0 || map->bm_bn >= eofs) { xfs_alert(btp->bt_mount, "%s: daddr 0x%llx out of range, EOFS 0x%llx", __func__, map->bm_bn, eofs); WARN_ON(1); return -EFSCORRUPTED; } return 0; } static int xfs_buf_find_lock( struct xfs_buf *bp, xfs_buf_flags_t flags) { if (flags & XBF_TRYLOCK) { if (!xfs_buf_trylock(bp)) { XFS_STATS_INC(bp->b_mount, xb_busy_locked); return -EAGAIN; } } else { xfs_buf_lock(bp); XFS_STATS_INC(bp->b_mount, xb_get_locked_waited); } /* * if the buffer is stale, clear all the external state associated with * it. We need to keep flags such as how we allocated the buffer memory * intact here. */ if (bp->b_flags & XBF_STALE) { if (flags & XBF_LIVESCAN) { xfs_buf_unlock(bp); return -ENOENT; } ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); bp->b_flags &= _XBF_KMEM | _XBF_PAGES; bp->b_ops = NULL; } return 0; } static inline int xfs_buf_lookup( struct xfs_perag *pag, struct xfs_buf_map *map, xfs_buf_flags_t flags, struct xfs_buf **bpp) { struct xfs_buf *bp; int error; rcu_read_lock(); bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params); if (!bp || !atomic_inc_not_zero(&bp->b_hold)) { rcu_read_unlock(); return -ENOENT; } rcu_read_unlock(); error = xfs_buf_find_lock(bp, flags); if (error) { xfs_buf_rele(bp); return error; } trace_xfs_buf_find(bp, flags, _RET_IP_); *bpp = bp; return 0; } /* * Insert the new_bp into the hash table. This consumes the perag reference * taken for the lookup regardless of the result of the insert. */ static int xfs_buf_find_insert( struct xfs_buftarg *btp, struct xfs_perag *pag, struct xfs_buf_map *cmap, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp) { struct xfs_buf *new_bp; struct xfs_buf *bp; int error; error = _xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); if (error) goto out_drop_pag; /* * For buffers that fit entirely within a single page, first attempt to * allocate the memory from the heap to minimise memory usage. If we * can't get heap memory for these small buffers, we fall back to using * the page allocator. */ if (BBTOB(new_bp->b_length) >= PAGE_SIZE || xfs_buf_alloc_kmem(new_bp, flags) < 0) { error = xfs_buf_alloc_pages(new_bp, flags); if (error) goto out_free_buf; } spin_lock(&pag->pag_buf_lock); bp = rhashtable_lookup_get_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head, xfs_buf_hash_params); if (IS_ERR(bp)) { error = PTR_ERR(bp); spin_unlock(&pag->pag_buf_lock); goto out_free_buf; } if (bp) { /* found an existing buffer */ atomic_inc(&bp->b_hold); spin_unlock(&pag->pag_buf_lock); error = xfs_buf_find_lock(bp, flags); if (error) xfs_buf_rele(bp); else *bpp = bp; goto out_free_buf; } /* The new buffer keeps the perag reference until it is freed. */ new_bp->b_pag = pag; spin_unlock(&pag->pag_buf_lock); *bpp = new_bp; return 0; out_free_buf: xfs_buf_free(new_bp); out_drop_pag: xfs_perag_put(pag); return error; } /* * Assembles a buffer covering the specified range. The code is optimised for * cache hits, as metadata intensive workloads will see 3 orders of magnitude * more hits than misses. */ int xfs_buf_get_map( struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp) { struct xfs_perag *pag; struct xfs_buf *bp = NULL; struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; int error; int i; if (flags & XBF_LIVESCAN) cmap.bm_flags |= XBM_LIVESCAN; for (i = 0; i < nmaps; i++) cmap.bm_len += map[i].bm_len; error = xfs_buf_map_verify(btp, &cmap); if (error) return error; pag = xfs_perag_get(btp->bt_mount, xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn)); error = xfs_buf_lookup(pag, &cmap, flags, &bp); if (error && error != -ENOENT) goto out_put_perag; /* cache hits always outnumber misses by at least 10:1 */ if (unlikely(!bp)) { XFS_STATS_INC(btp->bt_mount, xb_miss_locked); if (flags & XBF_INCORE) goto out_put_perag; /* xfs_buf_find_insert() consumes the perag reference. */ error = xfs_buf_find_insert(btp, pag, &cmap, map, nmaps, flags, &bp); if (error) return error; } else { XFS_STATS_INC(btp->bt_mount, xb_get_locked); xfs_perag_put(pag); } /* We do not hold a perag reference anymore. */ if (!bp->b_addr) { error = _xfs_buf_map_pages(bp, flags); if (unlikely(error)) { xfs_warn_ratelimited(btp->bt_mount, "%s: failed to map %u pages", __func__, bp->b_page_count); xfs_buf_relse(bp); return error; } } /* * Clear b_error if this is a lookup from a caller that doesn't expect * valid data to be found in the buffer. */ if (!(flags & XBF_READ)) xfs_buf_ioerror(bp, 0); XFS_STATS_INC(btp->bt_mount, xb_get); trace_xfs_buf_get(bp, flags, _RET_IP_); *bpp = bp; return 0; out_put_perag: xfs_perag_put(pag); return error; } int _xfs_buf_read( struct xfs_buf *bp, xfs_buf_flags_t flags) { ASSERT(!(flags & XBF_WRITE)); ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); return xfs_buf_submit(bp); } /* * Reverify a buffer found in cache without an attached ->b_ops. * * If the caller passed an ops structure and the buffer doesn't have ops * assigned, set the ops and use it to verify the contents. If verification * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is * already in XBF_DONE state on entry. * * Under normal operations, every in-core buffer is verified on read I/O * completion. There are two scenarios that can lead to in-core buffers without * an assigned ->b_ops. The first is during log recovery of buffers on a V4 * filesystem, though these buffers are purged at the end of recovery. The * other is online repair, which intentionally reads with a NULL buffer ops to * run several verifiers across an in-core buffer in order to establish buffer * type. If repair can't establish that, the buffer will be left in memory * with NULL buffer ops. */ int xfs_buf_reverify( struct xfs_buf *bp, const struct xfs_buf_ops *ops) { ASSERT(bp->b_flags & XBF_DONE); ASSERT(bp->b_error == 0); if (!ops || bp->b_ops) return 0; bp->b_ops = ops; bp->b_ops->verify_read(bp); if (bp->b_error) bp->b_flags &= ~XBF_DONE; return bp->b_error; } int xfs_buf_read_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, const struct xfs_buf_ops *ops, xfs_failaddr_t fa) { struct xfs_buf *bp; int error; flags |= XBF_READ; *bpp = NULL; error = xfs_buf_get_map(target, map, nmaps, flags, &bp); if (error) return error; trace_xfs_buf_read(bp, flags, _RET_IP_); if (!(bp->b_flags & XBF_DONE)) { /* Initiate the buffer read and wait. */ XFS_STATS_INC(target->bt_mount, xb_get_read); bp->b_ops = ops; error = _xfs_buf_read(bp, flags); /* Readahead iodone already dropped the buffer, so exit. */ if (flags & XBF_ASYNC) return 0; } else { /* Buffer already read; all we need to do is check it. */ error = xfs_buf_reverify(bp, ops); /* Readahead already finished; drop the buffer and exit. */ if (flags & XBF_ASYNC) { xfs_buf_relse(bp); return 0; } /* We do not want read in the flags */ bp->b_flags &= ~XBF_READ; ASSERT(bp->b_ops != NULL || ops == NULL); } /* * If we've had a read error, then the contents of the buffer are * invalid and should not be used. To ensure that a followup read tries * to pull the buffer from disk again, we clear the XBF_DONE flag and * mark the buffer stale. This ensures that anyone who has a current * reference to the buffer will interpret it's contents correctly and * future cache lookups will also treat it as an empty, uninitialised * buffer. */ if (error) { /* * Check against log shutdown for error reporting because * metadata writeback may require a read first and we need to * report errors in metadata writeback until the log is shut * down. High level transaction read functions already check * against mount shutdown, anyway, so we only need to be * concerned about low level IO interactions here. */ if (!xlog_is_shutdown(target->bt_mount->m_log)) xfs_buf_ioerror_alert(bp, fa); bp->b_flags &= ~XBF_DONE; xfs_buf_stale(bp); xfs_buf_relse(bp); /* bad CRC means corrupted metadata */ if (error == -EFSBADCRC) error = -EFSCORRUPTED; return error; } *bpp = bp; return 0; } /* * If we are not low on memory then do the readahead in a deadlock * safe manner. */ void xfs_buf_readahead_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, const struct xfs_buf_ops *ops) { struct xfs_buf *bp; xfs_buf_read_map(target, map, nmaps, XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, __this_address); } /* * Read an uncached buffer from disk. Allocates and returns a locked * buffer containing the disk contents or nothing. Uncached buffers always have * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer * is cached or uncached during fault diagnosis. */ int xfs_buf_read_uncached( struct xfs_buftarg *target, xfs_daddr_t daddr, size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp, const struct xfs_buf_ops *ops) { struct xfs_buf *bp; int error; *bpp = NULL; error = xfs_buf_get_uncached(target, numblks, flags, &bp); if (error) return error; /* set up the buffer for a read IO */ ASSERT(bp->b_map_count == 1); bp->b_rhash_key = XFS_BUF_DADDR_NULL; bp->b_maps[0].bm_bn = daddr; bp->b_flags |= XBF_READ; bp->b_ops = ops; xfs_buf_submit(bp); if (bp->b_error) { error = bp->b_error; xfs_buf_relse(bp); return error; } *bpp = bp; return 0; } int xfs_buf_get_uncached( struct xfs_buftarg *target, size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp) { int error; struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); *bpp = NULL; /* flags might contain irrelevant bits, pass only what we care about */ error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); if (error) return error; error = xfs_buf_alloc_pages(bp, flags); if (error) goto fail_free_buf; error = _xfs_buf_map_pages(bp, 0); if (unlikely(error)) { xfs_warn(target->bt_mount, "%s: failed to map pages", __func__); goto fail_free_buf; } trace_xfs_buf_get_uncached(bp, _RET_IP_); *bpp = bp; return 0; fail_free_buf: xfs_buf_free(bp); return error; } /* * Increment reference count on buffer, to hold the buffer concurrently * with another thread which may release (free) the buffer asynchronously. * Must hold the buffer already to call this function. */ void xfs_buf_hold( struct xfs_buf *bp) { trace_xfs_buf_hold(bp, _RET_IP_); atomic_inc(&bp->b_hold); } /* * Release a hold on the specified buffer. If the hold count is 1, the buffer is * placed on LRU or freed (depending on b_lru_ref). */ void xfs_buf_rele( struct xfs_buf *bp) { struct xfs_perag *pag = bp->b_pag; bool release; bool freebuf = false; trace_xfs_buf_rele(bp, _RET_IP_); if (!pag) { ASSERT(list_empty(&bp->b_lru)); if (atomic_dec_and_test(&bp->b_hold)) { xfs_buf_ioacct_dec(bp); xfs_buf_free(bp); } return; } ASSERT(atomic_read(&bp->b_hold) > 0); /* * We grab the b_lock here first to serialise racing xfs_buf_rele() * calls. The pag_buf_lock being taken on the last reference only * serialises against racing lookups in xfs_buf_find(). IOWs, the second * to last reference we drop here is not serialised against the last * reference until we take bp->b_lock. Hence if we don't grab b_lock * first, the last "release" reference can win the race to the lock and * free the buffer before the second-to-last reference is processed, * leading to a use-after-free scenario. */ spin_lock(&bp->b_lock); release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock); if (!release) { /* * Drop the in-flight state if the buffer is already on the LRU * and it holds the only reference. This is racy because we * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT * ensures the decrement occurs only once per-buf. */ if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) __xfs_buf_ioacct_dec(bp); goto out_unlock; } /* the last reference has been dropped ... */ __xfs_buf_ioacct_dec(bp); if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { /* * If the buffer is added to the LRU take a new reference to the * buffer for the LRU and clear the (now stale) dispose list * state flag */ if (list_lru_add_obj(&bp->b_target->bt_lru, &bp->b_lru)) { bp->b_state &= ~XFS_BSTATE_DISPOSE; atomic_inc(&bp->b_hold); } spin_unlock(&pag->pag_buf_lock); } else { /* * most of the time buffers will already be removed from the * LRU, so optimise that case by checking for the * XFS_BSTATE_DISPOSE flag indicating the last list the buffer * was on was the disposal list */ if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru); } else { ASSERT(list_empty(&bp->b_lru)); } ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head, xfs_buf_hash_params); spin_unlock(&pag->pag_buf_lock); xfs_perag_put(pag); freebuf = true; } out_unlock: spin_unlock(&bp->b_lock); if (freebuf) xfs_buf_free(bp); } /* * Lock a buffer object, if it is not already locked. * * If we come across a stale, pinned, locked buffer, we know that we are * being asked to lock a buffer that has been reallocated. Because it is * pinned, we know that the log has not been pushed to disk and hence it * will still be locked. Rather than continuing to have trylock attempts * fail until someone else pushes the log, push it ourselves before * returning. This means that the xfsaild will not get stuck trying * to push on stale inode buffers. */ int xfs_buf_trylock( struct xfs_buf *bp) { int locked; locked = down_trylock(&bp->b_sema) == 0; if (locked) trace_xfs_buf_trylock(bp, _RET_IP_); else trace_xfs_buf_trylock_fail(bp, _RET_IP_); return locked; } /* * Lock a buffer object. * * If we come across a stale, pinned, locked buffer, we know that we * are being asked to lock a buffer that has been reallocated. Because * it is pinned, we know that the log has not been pushed to disk and * hence it will still be locked. Rather than sleeping until someone * else pushes the log, push it ourselves before trying to get the lock. */ void xfs_buf_lock( struct xfs_buf *bp) { trace_xfs_buf_lock(bp, _RET_IP_); if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_log_force(bp->b_mount, 0); down(&bp->b_sema); trace_xfs_buf_lock_done(bp, _RET_IP_); } void xfs_buf_unlock( struct xfs_buf *bp) { ASSERT(xfs_buf_islocked(bp)); up(&bp->b_sema); trace_xfs_buf_unlock(bp, _RET_IP_); } STATIC void xfs_buf_wait_unpin( struct xfs_buf *bp) { DECLARE_WAITQUEUE (wait, current); if (atomic_read(&bp->b_pin_count) == 0) return; add_wait_queue(&bp->b_waiters, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&bp->b_pin_count) == 0) break; io_schedule(); } remove_wait_queue(&bp->b_waiters, &wait); set_current_state(TASK_RUNNING); } static void xfs_buf_ioerror_alert_ratelimited( struct xfs_buf *bp) { static unsigned long lasttime; static struct xfs_buftarg *lasttarg; if (bp->b_target != lasttarg || time_after(jiffies, (lasttime + 5*HZ))) { lasttime = jiffies; xfs_buf_ioerror_alert(bp, __this_address); } lasttarg = bp->b_target; } /* * Account for this latest trip around the retry handler, and decide if * we've failed enough times to constitute a permanent failure. */ static bool xfs_buf_ioerror_permanent( struct xfs_buf *bp, struct xfs_error_cfg *cfg) { struct xfs_mount *mp = bp->b_mount; if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && ++bp->b_retries > cfg->max_retries) return true; if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) return true; /* At unmount we may treat errors differently */ if (xfs_is_unmounting(mp) && mp->m_fail_unmount) return true; return false; } /* * On a sync write or shutdown we just want to stale the buffer and let the * caller handle the error in bp->b_error appropriately. * * If the write was asynchronous then no one will be looking for the error. If * this is the first failure of this type, clear the error state and write the * buffer out again. This means we always retry an async write failure at least * once, but we also need to set the buffer up to behave correctly now for * repeated failures. * * If we get repeated async write failures, then we take action according to the * error configuration we have been set up to use. * * Returns true if this function took care of error handling and the caller must * not touch the buffer again. Return false if the caller should proceed with * normal I/O completion handling. */ static bool xfs_buf_ioend_handle_error( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_mount; struct xfs_error_cfg *cfg; /* * If we've already shutdown the journal because of I/O errors, there's * no point in giving this a retry. */ if (xlog_is_shutdown(mp->m_log)) goto out_stale; xfs_buf_ioerror_alert_ratelimited(bp); /* * We're not going to bother about retrying this during recovery. * One strike! */ if (bp->b_flags & _XBF_LOGRECOVERY) { xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); return false; } /* * Synchronous writes will have callers process the error. */ if (!(bp->b_flags & XBF_ASYNC)) goto out_stale; trace_xfs_buf_iodone_async(bp, _RET_IP_); cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); if (bp->b_last_error != bp->b_error || !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) { bp->b_last_error = bp->b_error; if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && !bp->b_first_retry_time) bp->b_first_retry_time = jiffies; goto resubmit; } /* * Permanent error - we need to trigger a shutdown if we haven't already * to indicate that inconsistency will result from this action. */ if (xfs_buf_ioerror_permanent(bp, cfg)) { xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); goto out_stale; } /* Still considered a transient error. Caller will schedule retries. */ if (bp->b_flags & _XBF_INODES) xfs_buf_inode_io_fail(bp); else if (bp->b_flags & _XBF_DQUOTS) xfs_buf_dquot_io_fail(bp); else ASSERT(list_empty(&bp->b_li_list)); xfs_buf_ioerror(bp, 0); xfs_buf_relse(bp); return true; resubmit: xfs_buf_ioerror(bp, 0); bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); xfs_buf_submit(bp); return true; out_stale: xfs_buf_stale(bp); bp->b_flags |= XBF_DONE; bp->b_flags &= ~XBF_WRITE; trace_xfs_buf_error_relse(bp, _RET_IP_); return false; } static void xfs_buf_ioend( struct xfs_buf *bp) { trace_xfs_buf_iodone(bp, _RET_IP_); /* * Pull in IO completion errors now. We are guaranteed to be running * single threaded, so we don't need the lock to read b_io_error. */ if (!bp->b_error && bp->b_io_error) xfs_buf_ioerror(bp, bp->b_io_error); if (bp->b_flags & XBF_READ) { if (!bp->b_error && bp->b_ops) bp->b_ops->verify_read(bp); if (!bp->b_error) bp->b_flags |= XBF_DONE; } else { if (!bp->b_error) { bp->b_flags &= ~XBF_WRITE_FAIL; bp->b_flags |= XBF_DONE; } if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) return; /* clear the retry state */ bp->b_last_error = 0; bp->b_retries = 0; bp->b_first_retry_time = 0; /* * Note that for things like remote attribute buffers, there may * not be a buffer log item here, so processing the buffer log * item must remain optional. */ if (bp->b_log_item) xfs_buf_item_done(bp); if (bp->b_flags & _XBF_INODES) xfs_buf_inode_iodone(bp); else if (bp->b_flags & _XBF_DQUOTS) xfs_buf_dquot_iodone(bp); } bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | _XBF_LOGRECOVERY); if (bp->b_flags & XBF_ASYNC) xfs_buf_relse(bp); else complete(&bp->b_iowait); } static void xfs_buf_ioend_work( struct work_struct *work) { struct xfs_buf *bp = container_of(work, struct xfs_buf, b_ioend_work); xfs_buf_ioend(bp); } static void xfs_buf_ioend_async( struct xfs_buf *bp) { INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); } void __xfs_buf_ioerror( struct xfs_buf *bp, int error, xfs_failaddr_t failaddr) { ASSERT(error <= 0 && error >= -1000); bp->b_error = error; trace_xfs_buf_ioerror(bp, error, failaddr); } void xfs_buf_ioerror_alert( struct xfs_buf *bp, xfs_failaddr_t func) { xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d", func, (uint64_t)xfs_buf_daddr(bp), bp->b_length, -bp->b_error); } /* * To simulate an I/O failure, the buffer must be locked and held with at least * three references. The LRU reference is dropped by the stale call. The buf * item reference is dropped via ioend processing. The third reference is owned * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC. */ void xfs_buf_ioend_fail( struct xfs_buf *bp) { bp->b_flags &= ~XBF_DONE; xfs_buf_stale(bp); xfs_buf_ioerror(bp, -EIO); xfs_buf_ioend(bp); } int xfs_bwrite( struct xfs_buf *bp) { int error; ASSERT(xfs_buf_islocked(bp)); bp->b_flags |= XBF_WRITE; bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_DONE); error = xfs_buf_submit(bp); if (error) xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); return error; } static void xfs_buf_bio_end_io( struct bio *bio) { struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; if (!bio->bi_status && (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) bio->bi_status = BLK_STS_IOERR; /* * don't overwrite existing errors - otherwise we can lose errors on * buffers that require multiple bios to complete. */ if (bio->bi_status) { int error = blk_status_to_errno(bio->bi_status); cmpxchg(&bp->b_io_error, 0, error); } if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); if (atomic_dec_and_test(&bp->b_io_remaining) == 1) xfs_buf_ioend_async(bp); bio_put(bio); } static void xfs_buf_ioapply_map( struct xfs_buf *bp, int map, int *buf_offset, int *count, blk_opf_t op) { int page_index; unsigned int total_nr_pages = bp->b_page_count; int nr_pages; struct bio *bio; sector_t sector = bp->b_maps[map].bm_bn; int size; int offset; /* skip the pages in the buffer before the start offset */ page_index = 0; offset = *buf_offset; while (offset >= PAGE_SIZE) { page_index++; offset -= PAGE_SIZE; } /* * Limit the IO size to the length of the current vector, and update the * remaining IO count for the next time around. */ size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); *count -= size; *buf_offset += size; next_chunk: atomic_inc(&bp->b_io_remaining); nr_pages = bio_max_segs(total_nr_pages); bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO); bio->bi_iter.bi_sector = sector; bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_private = bp; for (; size && nr_pages; nr_pages--, page_index++) { int rbytes, nbytes = PAGE_SIZE - offset; if (nbytes > size) nbytes = size; rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, offset); if (rbytes < nbytes) break; offset = 0; sector += BTOBB(nbytes); size -= nbytes; total_nr_pages--; } if (likely(bio->bi_iter.bi_size)) { if (xfs_buf_is_vmapped(bp)) { flush_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); } submit_bio(bio); if (size) goto next_chunk; } else { /* * This is guaranteed not to be the last io reference count * because the caller (xfs_buf_submit) holds a count itself. */ atomic_dec(&bp->b_io_remaining); xfs_buf_ioerror(bp, -EIO); bio_put(bio); } } STATIC void _xfs_buf_ioapply( struct xfs_buf *bp) { struct blk_plug plug; blk_opf_t op; int offset; int size; int i; /* * Make sure we capture only current IO errors rather than stale errors * left over from previous use of the buffer (e.g. failed readahead). */ bp->b_error = 0; if (bp->b_flags & XBF_WRITE) { op = REQ_OP_WRITE; /* * Run the write verifier callback function if it exists. If * this function fails it will mark the buffer with an error and * the IO should not be dispatched. */ if (bp->b_ops) { bp->b_ops->verify_write(bp); if (bp->b_error) { xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE); return; } } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { struct xfs_mount *mp = bp->b_mount; /* * non-crc filesystems don't attach verifiers during * log recovery, so don't warn for such filesystems. */ if (xfs_has_crc(mp)) { xfs_warn(mp, "%s: no buf ops on daddr 0x%llx len %d", __func__, xfs_buf_daddr(bp), bp->b_length); xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN); dump_stack(); } } } else { op = REQ_OP_READ; if (bp->b_flags & XBF_READ_AHEAD) op |= REQ_RAHEAD; } /* we only use the buffer cache for meta-data */ op |= REQ_META; /* * Walk all the vectors issuing IO on them. Set up the initial offset * into the buffer and the desired IO size before we start - * _xfs_buf_ioapply_vec() will modify them appropriately for each * subsequent call. */ offset = bp->b_offset; size = BBTOB(bp->b_length); blk_start_plug(&plug); for (i = 0; i < bp->b_map_count; i++) { xfs_buf_ioapply_map(bp, i, &offset, &size, op); if (bp->b_error) break; if (size <= 0) break; /* all done */ } blk_finish_plug(&plug); } /* * Wait for I/O completion of a sync buffer and return the I/O error code. */ static int xfs_buf_iowait( struct xfs_buf *bp) { ASSERT(!(bp->b_flags & XBF_ASYNC)); trace_xfs_buf_iowait(bp, _RET_IP_); wait_for_completion(&bp->b_iowait); trace_xfs_buf_iowait_done(bp, _RET_IP_); return bp->b_error; } /* * Buffer I/O submission path, read or write. Asynchronous submission transfers * the buffer lock ownership and the current reference to the IO. It is not * safe to reference the buffer after a call to this function unless the caller * holds an additional reference itself. */ static int __xfs_buf_submit( struct xfs_buf *bp, bool wait) { int error = 0; trace_xfs_buf_submit(bp, _RET_IP_); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); /* * On log shutdown we stale and complete the buffer immediately. We can * be called to read the superblock before the log has been set up, so * be careful checking the log state. * * Checking the mount shutdown state here can result in the log tail * moving inappropriately on disk as the log may not yet be shut down. * i.e. failing this buffer on mount shutdown can remove it from the AIL * and move the tail of the log forwards without having written this * buffer to disk. This corrupts the log tail state in memory, and * because the log may not be shut down yet, it can then be propagated * to disk before the log is shutdown. Hence we check log shutdown * state here rather than mount state to avoid corrupting the log tail * on shutdown. */ if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) { xfs_buf_ioend_fail(bp); return -EIO; } /* * Grab a reference so the buffer does not go away underneath us. For * async buffers, I/O completion drops the callers reference, which * could occur before submission returns. */ xfs_buf_hold(bp); if (bp->b_flags & XBF_WRITE) xfs_buf_wait_unpin(bp); /* clear the internal error state to avoid spurious errors */ bp->b_io_error = 0; /* * Set the count to 1 initially, this will stop an I/O completion * callout which happens before we have started all the I/O from calling * xfs_buf_ioend too early. */ atomic_set(&bp->b_io_remaining, 1); if (bp->b_flags & XBF_ASYNC) xfs_buf_ioacct_inc(bp); _xfs_buf_ioapply(bp); /* * If _xfs_buf_ioapply failed, we can get back here with only the IO * reference we took above. If we drop it to zero, run completion so * that we don't return to the caller with completion still pending. */ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) xfs_buf_ioend(bp); else xfs_buf_ioend_async(bp); } if (wait) error = xfs_buf_iowait(bp); /* * Release the hold that keeps the buffer referenced for the entire * I/O. Note that if the buffer is async, it is not safe to reference * after this release. */ xfs_buf_rele(bp); return error; } void * xfs_buf_offset( struct xfs_buf *bp, size_t offset) { struct page *page; if (bp->b_addr) return bp->b_addr + offset; page = bp->b_pages[offset >> PAGE_SHIFT]; return page_address(page) + (offset & (PAGE_SIZE-1)); } void xfs_buf_zero( struct xfs_buf *bp, size_t boff, size_t bsize) { size_t bend; bend = boff + bsize; while (boff < bend) { struct page *page; int page_index, page_offset, csize; page_index = (boff + bp->b_offset) >> PAGE_SHIFT; page_offset = (boff + bp->b_offset) & ~PAGE_MASK; page = bp->b_pages[page_index]; csize = min_t(size_t, PAGE_SIZE - page_offset, BBTOB(bp->b_length) - boff); ASSERT((csize + page_offset) <= PAGE_SIZE); memset(page_address(page) + page_offset, 0, csize); boff += csize; } } /* * Log a message about and stale a buffer that a caller has decided is corrupt. * * This function should be called for the kinds of metadata corruption that * cannot be detect from a verifier, such as incorrect inter-block relationship * data. Do /not/ call this function from a verifier function. * * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will * be marked stale, but b_error will not be set. The caller is responsible for * releasing the buffer or fixing it. */ void __xfs_buf_mark_corrupt( struct xfs_buf *bp, xfs_failaddr_t fa) { ASSERT(bp->b_flags & XBF_DONE); xfs_buf_corruption_error(bp, fa); xfs_buf_stale(bp); } /* * Handling of buffer targets (buftargs). */ /* * Wait for any bufs with callbacks that have been submitted but have not yet * returned. These buffers will have an elevated hold count, so wait on those * while freeing all the buffers only held by the LRU. */ static enum lru_status xfs_buftarg_drain_rele( struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); struct list_head *dispose = arg; if (atomic_read(&bp->b_hold) > 1) { /* need to wait, so skip it this pass */ trace_xfs_buf_drain_buftarg(bp, _RET_IP_); return LRU_SKIP; } if (!spin_trylock(&bp->b_lock)) return LRU_SKIP; /* * clear the LRU reference count so the buffer doesn't get * ignored in xfs_buf_rele(). */ atomic_set(&bp->b_lru_ref, 0); bp->b_state |= XFS_BSTATE_DISPOSE; list_lru_isolate_move(lru, item, dispose); spin_unlock(&bp->b_lock); return LRU_REMOVED; } /* * Wait for outstanding I/O on the buftarg to complete. */ void xfs_buftarg_wait( struct xfs_buftarg *btp) { /* * First wait on the buftarg I/O count for all in-flight buffers to be * released. This is critical as new buffers do not make the LRU until * they are released. * * Next, flush the buffer workqueue to ensure all completion processing * has finished. Just waiting on buffer locks is not sufficient for * async IO as the reference count held over IO is not released until * after the buffer lock is dropped. Hence we need to ensure here that * all reference counts have been dropped before we start walking the * LRU list. */ while (percpu_counter_sum(&btp->bt_io_count)) delay(100); flush_workqueue(btp->bt_mount->m_buf_workqueue); } void xfs_buftarg_drain( struct xfs_buftarg *btp) { LIST_HEAD(dispose); int loop = 0; bool write_fail = false; xfs_buftarg_wait(btp); /* loop until there is nothing left on the lru list. */ while (list_lru_count(&btp->bt_lru)) { list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele, &dispose, LONG_MAX); while (!list_empty(&dispose)) { struct xfs_buf *bp; bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); if (bp->b_flags & XBF_WRITE_FAIL) { write_fail = true; xfs_buf_alert_ratelimited(bp, "XFS: Corruption Alert", "Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", (long long)xfs_buf_daddr(bp)); } xfs_buf_rele(bp); } if (loop++ != 0) delay(100); } /* * If one or more failed buffers were freed, that means dirty metadata * was thrown away. This should only ever happen after I/O completion * handling has elevated I/O error(s) to permanent failures and shuts * down the journal. */ if (write_fail) { ASSERT(xlog_is_shutdown(btp->bt_mount->m_log)); xfs_alert(btp->bt_mount, "Please run xfs_repair to determine the extent of the problem."); } } static enum lru_status xfs_buftarg_isolate( struct list_head *item, struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); struct list_head *dispose = arg; /* * we are inverting the lru lock/bp->b_lock here, so use a trylock. * If we fail to get the lock, just skip it. */ if (!spin_trylock(&bp->b_lock)) return LRU_SKIP; /* * Decrement the b_lru_ref count unless the value is already * zero. If the value is already zero, we need to reclaim the * buffer, otherwise it gets another trip through the LRU. */ if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { spin_unlock(&bp->b_lock); return LRU_ROTATE; } bp->b_state |= XFS_BSTATE_DISPOSE; list_lru_isolate_move(lru, item, dispose); spin_unlock(&bp->b_lock); return LRU_REMOVED; } static unsigned long xfs_buftarg_shrink_scan( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_buftarg *btp = shrink->private_data; LIST_HEAD(dispose); unsigned long freed; freed = list_lru_shrink_walk(&btp->bt_lru, sc, xfs_buftarg_isolate, &dispose); while (!list_empty(&dispose)) { struct xfs_buf *bp; bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); xfs_buf_rele(bp); } return freed; } static unsigned long xfs_buftarg_shrink_count( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_buftarg *btp = shrink->private_data; return list_lru_shrink_count(&btp->bt_lru, sc); } void xfs_free_buftarg( struct xfs_buftarg *btp) { shrinker_free(btp->bt_shrinker); ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); percpu_counter_destroy(&btp->bt_io_count); list_lru_destroy(&btp->bt_lru); fs_put_dax(btp->bt_daxdev, btp->bt_mount); /* the main block device is closed by kill_block_super */ if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev) bdev_release(btp->bt_bdev_handle); kmem_free(btp); } int xfs_setsize_buftarg( xfs_buftarg_t *btp, unsigned int sectorsize) { /* Set up metadata sector size info */ btp->bt_meta_sectorsize = sectorsize; btp->bt_meta_sectormask = sectorsize - 1; if (set_blocksize(btp->bt_bdev, sectorsize)) { xfs_warn(btp->bt_mount, "Cannot set_blocksize to %u on device %pg", sectorsize, btp->bt_bdev); return -EINVAL; } /* Set up device logical sector size mask */ btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev); btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1; return 0; } /* * When allocating the initial buffer target we have not yet * read in the superblock, so don't know what sized sectors * are being used at this early stage. Play safe. */ STATIC int xfs_setsize_buftarg_early( xfs_buftarg_t *btp) { return xfs_setsize_buftarg(btp, bdev_logical_block_size(btp->bt_bdev)); } struct xfs_buftarg * xfs_alloc_buftarg( struct xfs_mount *mp, struct bdev_handle *bdev_handle) { xfs_buftarg_t *btp; const struct dax_holder_operations *ops = NULL; #if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE) ops = &xfs_dax_holder_operations; #endif btp = kmem_zalloc(sizeof(*btp), KM_NOFS); btp->bt_mount = mp; btp->bt_bdev_handle = bdev_handle; btp->bt_dev = bdev_handle->bdev->bd_dev; btp->bt_bdev = bdev_handle->bdev; btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off, mp, ops); /* * Buffer IO error rate limiting. Limit it to no more than 10 messages * per 30 seconds so as to not spam logs too much on repeated errors. */ ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ, DEFAULT_RATELIMIT_BURST); if (xfs_setsize_buftarg_early(btp)) goto error_free; if (list_lru_init(&btp->bt_lru)) goto error_free; if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) goto error_lru; btp->bt_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", mp->m_super->s_id); if (!btp->bt_shrinker) goto error_pcpu; btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count; btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan; btp->bt_shrinker->private_data = btp; shrinker_register(btp->bt_shrinker); return btp; error_pcpu: percpu_counter_destroy(&btp->bt_io_count); error_lru: list_lru_destroy(&btp->bt_lru); error_free: kmem_free(btp); return NULL; } static inline void xfs_buf_list_del( struct xfs_buf *bp) { list_del_init(&bp->b_list); wake_up_var(&bp->b_list); } /* * Cancel a delayed write list. * * Remove each buffer from the list, clear the delwri queue flag and drop the * associated buffer reference. */ void xfs_buf_delwri_cancel( struct list_head *list) { struct xfs_buf *bp; while (!list_empty(list)) { bp = list_first_entry(list, struct xfs_buf, b_list); xfs_buf_lock(bp); bp->b_flags &= ~_XBF_DELWRI_Q; xfs_buf_list_del(bp); xfs_buf_relse(bp); } } /* * Add a buffer to the delayed write list. * * This queues a buffer for writeout if it hasn't already been. Note that * neither this routine nor the buffer list submission functions perform * any internal synchronization. It is expected that the lists are thread-local * to the callers. * * Returns true if we queued up the buffer, or false if it already had * been on the buffer list. */ bool xfs_buf_delwri_queue( struct xfs_buf *bp, struct list_head *list) { ASSERT(xfs_buf_islocked(bp)); ASSERT(!(bp->b_flags & XBF_READ)); /* * If the buffer is already marked delwri it already is queued up * by someone else for imediate writeout. Just ignore it in that * case. */ if (bp->b_flags & _XBF_DELWRI_Q) { trace_xfs_buf_delwri_queued(bp, _RET_IP_); return false; } trace_xfs_buf_delwri_queue(bp, _RET_IP_); /* * If a buffer gets written out synchronously or marked stale while it * is on a delwri list we lazily remove it. To do this, the other party * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone. * It remains referenced and on the list. In a rare corner case it * might get readded to a delwri list after the synchronous writeout, in * which case we need just need to re-add the flag here. */ bp->b_flags |= _XBF_DELWRI_Q; if (list_empty(&bp->b_list)) { atomic_inc(&bp->b_hold); list_add_tail(&bp->b_list, list); } return true; } /* * Queue a buffer to this delwri list as part of a data integrity operation. * If the buffer is on any other delwri list, we'll wait for that to clear * so that the caller can submit the buffer for IO and wait for the result. * Callers must ensure the buffer is not already on the list. */ void xfs_buf_delwri_queue_here( struct xfs_buf *bp, struct list_head *buffer_list) { /* * We need this buffer to end up on the /caller's/ delwri list, not any * old list. This can happen if the buffer is marked stale (which * clears DELWRI_Q) after the AIL queues the buffer to its list but * before the AIL has a chance to submit the list. */ while (!list_empty(&bp->b_list)) { xfs_buf_unlock(bp); wait_var_event(&bp->b_list, list_empty(&bp->b_list)); xfs_buf_lock(bp); } ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); xfs_buf_delwri_queue(bp, buffer_list); } /* * Compare function is more complex than it needs to be because * the return value is only 32 bits and we are doing comparisons * on 64 bit values */ static int xfs_buf_cmp( void *priv, const struct list_head *a, const struct list_head *b) { struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); xfs_daddr_t diff; diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn; if (diff < 0) return -1; if (diff > 0) return 1; return 0; } /* * Submit buffers for write. If wait_list is specified, the buffers are * submitted using sync I/O and placed on the wait list such that the caller can * iowait each buffer. Otherwise async I/O is used and the buffers are released * at I/O completion time. In either case, buffers remain locked until I/O * completes and the buffer is released from the queue. */ static int xfs_buf_delwri_submit_buffers( struct list_head *buffer_list, struct list_head *wait_list) { struct xfs_buf *bp, *n; int pinned = 0; struct blk_plug plug; list_sort(NULL, buffer_list, xfs_buf_cmp); blk_start_plug(&plug); list_for_each_entry_safe(bp, n, buffer_list, b_list) { if (!wait_list) { if (!xfs_buf_trylock(bp)) continue; if (xfs_buf_ispinned(bp)) { xfs_buf_unlock(bp); pinned++; continue; } } else { xfs_buf_lock(bp); } /* * Someone else might have written the buffer synchronously or * marked it stale in the meantime. In that case only the * _XBF_DELWRI_Q flag got cleared, and we have to drop the * reference and remove it from the list here. */ if (!(bp->b_flags & _XBF_DELWRI_Q)) { xfs_buf_list_del(bp); xfs_buf_relse(bp); continue; } trace_xfs_buf_delwri_split(bp, _RET_IP_); /* * If we have a wait list, each buffer (and associated delwri * queue reference) transfers to it and is submitted * synchronously. Otherwise, drop the buffer from the delwri * queue and submit async. */ bp->b_flags &= ~_XBF_DELWRI_Q; bp->b_flags |= XBF_WRITE; if (wait_list) { bp->b_flags &= ~XBF_ASYNC; list_move_tail(&bp->b_list, wait_list); } else { bp->b_flags |= XBF_ASYNC; xfs_buf_list_del(bp); } __xfs_buf_submit(bp, false); } blk_finish_plug(&plug); return pinned; } /* * Write out a buffer list asynchronously. * * This will take the @buffer_list, write all non-locked and non-pinned buffers * out and not wait for I/O completion on any of the buffers. This interface * is only safely useable for callers that can track I/O completion by higher * level means, e.g. AIL pushing as the @buffer_list is consumed in this * function. * * Note: this function will skip buffers it would block on, and in doing so * leaves them on @buffer_list so they can be retried on a later pass. As such, * it is up to the caller to ensure that the buffer list is fully submitted or * cancelled appropriately when they are finished with the list. Failure to * cancel or resubmit the list until it is empty will result in leaked buffers * at unmount time. */ int xfs_buf_delwri_submit_nowait( struct list_head *buffer_list) { return xfs_buf_delwri_submit_buffers(buffer_list, NULL); } /* * Write out a buffer list synchronously. * * This will take the @buffer_list, write all buffers out and wait for I/O * completion on all of the buffers. @buffer_list is consumed by the function, * so callers must have some other way of tracking buffers if they require such * functionality. */ int xfs_buf_delwri_submit( struct list_head *buffer_list) { LIST_HEAD (wait_list); int error = 0, error2; struct xfs_buf *bp; xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); /* Wait for IO to complete. */ while (!list_empty(&wait_list)) { bp = list_first_entry(&wait_list, struct xfs_buf, b_list); xfs_buf_list_del(bp); /* * Wait on the locked buffer, check for errors and unlock and * release the delwri queue reference. */ error2 = xfs_buf_iowait(bp); xfs_buf_relse(bp); if (!error) error = error2; } return error; } /* * Push a single buffer on a delwri queue. * * The purpose of this function is to submit a single buffer of a delwri queue * and return with the buffer still on the original queue. The waiting delwri * buffer submission infrastructure guarantees transfer of the delwri queue * buffer reference to a temporary wait list. We reuse this infrastructure to * transfer the buffer back to the original queue. * * Note the buffer transitions from the queued state, to the submitted and wait * listed state and back to the queued state during this call. The buffer * locking and queue management logic between _delwri_pushbuf() and * _delwri_queue() guarantee that the buffer cannot be queued to another list * before returning. */ int xfs_buf_delwri_pushbuf( struct xfs_buf *bp, struct list_head *buffer_list) { LIST_HEAD (submit_list); int error; ASSERT(bp->b_flags & _XBF_DELWRI_Q); trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); /* * Isolate the buffer to a new local list so we can submit it for I/O * independently from the rest of the original list. */ xfs_buf_lock(bp); list_move(&bp->b_list, &submit_list); xfs_buf_unlock(bp); /* * Delwri submission clears the DELWRI_Q buffer flag and returns with * the buffer on the wait list with the original reference. Rather than * bounce the buffer from a local wait list back to the original list * after I/O completion, reuse the original list as the wait list. */ xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); /* * The buffer is now locked, under I/O and wait listed on the original * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and * return with the buffer unlocked and on the original queue. */ error = xfs_buf_iowait(bp); bp->b_flags |= _XBF_DELWRI_Q; xfs_buf_unlock(bp); return error; } void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) { /* * Set the lru reference count to 0 based on the error injection tag. * This allows userspace to disrupt buffer caching for debug/testing * purposes. */ if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) lru_ref = 0; atomic_set(&bp->b_lru_ref, lru_ref); } /* * Verify an on-disk magic value against the magic value specified in the * verifier structure. The verifier magic is in disk byte order so the caller is * expected to pass the value directly from disk. */ bool xfs_verify_magic( struct xfs_buf *bp, __be32 dmagic) { struct xfs_mount *mp = bp->b_mount; int idx; idx = xfs_has_crc(mp); if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) return false; return dmagic == bp->b_ops->magic[idx]; } /* * Verify an on-disk magic value against the magic value specified in the * verifier structure. The verifier magic is in disk byte order so the caller is * expected to pass the value directly from disk. */ bool xfs_verify_magic16( struct xfs_buf *bp, __be16 dmagic) { struct xfs_mount *mp = bp->b_mount; int idx; idx = xfs_has_crc(mp); if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) return false; return dmagic == bp->b_ops->magic16[idx]; } |
1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 | // SPDX-License-Identifier: GPL-2.0-only /* * HID driver for ELO usb touchscreen 4000/4500 * * Copyright (c) 2013 Jiri Slaby * * Data parsing taken from elousb driver by Vojtech Pavlik. */ #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/workqueue.h> #include "hid-ids.h" #define ELO_PERIODIC_READ_INTERVAL HZ #define ELO_SMARTSET_CMD_TIMEOUT 2000 /* msec */ /* Elo SmartSet commands */ #define ELO_FLUSH_SMARTSET_RESPONSES 0x02 /* Flush all pending smartset responses */ #define ELO_SEND_SMARTSET_COMMAND 0x05 /* Send a smartset command */ #define ELO_GET_SMARTSET_RESPONSE 0x06 /* Get a smartset response */ #define ELO_DIAG 0x64 /* Diagnostics command */ #define ELO_SMARTSET_PACKET_SIZE 8 struct elo_priv { struct usb_device *usbdev; struct delayed_work work; unsigned char buffer[ELO_SMARTSET_PACKET_SIZE]; }; static struct workqueue_struct *wq; static bool use_fw_quirk = true; module_param(use_fw_quirk, bool, S_IRUGO); MODULE_PARM_DESC(use_fw_quirk, "Do periodic pokes for broken M firmwares (default = true)"); static int elo_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { struct input_dev *input = hidinput->input; /* * ELO devices have one Button usage in GenDesk field, which makes * hid-input map it to BTN_LEFT; that confuses userspace, which then * considers the device to be a mouse/touchpad instead of touchscreen. */ clear_bit(BTN_LEFT, input->keybit); set_bit(BTN_TOUCH, input->keybit); set_bit(ABS_PRESSURE, input->absbit); input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); return 0; } static void elo_process_data(struct input_dev *input, const u8 *data, int size) { int press; input_report_abs(input, ABS_X, (data[3] << 8) | data[2]); input_report_abs(input, ABS_Y, (data[5] << 8) | data[4]); press = 0; if (data[1] & 0x80) press = (data[7] << 8) | data[6]; input_report_abs(input, ABS_PRESSURE, press); if (data[1] & 0x03) { input_report_key(input, BTN_TOUCH, 1); input_sync(input); } if (data[1] & 0x04) input_report_key(input, BTN_TOUCH, 0); input_sync(input); } static int elo_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct hid_input *hidinput; if (!(hdev->claimed & HID_CLAIMED_INPUT) || list_empty(&hdev->inputs)) return 0; hidinput = list_first_entry(&hdev->inputs, struct hid_input, list); switch (report->id) { case 0: if (data[0] == 'T') { /* Mandatory ELO packet marker */ elo_process_data(hidinput->input, data, size); return 1; } break; default: /* unknown report */ /* Unknown report type; pass upstream */ hid_info(hdev, "unknown report type %d\n", report->id); break; } return 0; } static int elo_smartset_send_get(struct usb_device *dev, u8 command, void *data) { unsigned int pipe; u8 dir; if (command == ELO_SEND_SMARTSET_COMMAND) { pipe = usb_sndctrlpipe(dev, 0); dir = USB_DIR_OUT; } else if (command == ELO_GET_SMARTSET_RESPONSE) { pipe = usb_rcvctrlpipe(dev, 0); dir = USB_DIR_IN; } else return -EINVAL; return usb_control_msg(dev, pipe, command, dir | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, data, ELO_SMARTSET_PACKET_SIZE, ELO_SMARTSET_CMD_TIMEOUT); } static int elo_flush_smartset_responses(struct usb_device *dev) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ELO_FLUSH_SMARTSET_RESPONSES, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); } static void elo_work(struct work_struct *work) { struct elo_priv *priv = container_of(work, struct elo_priv, work.work); struct usb_device *dev = priv->usbdev; unsigned char *buffer = priv->buffer; int ret; ret = elo_flush_smartset_responses(dev); if (ret < 0) { dev_err(&dev->dev, "initial FLUSH_SMARTSET_RESPONSES failed, error %d\n", ret); goto fail; } /* send Diagnostics command */ *buffer = ELO_DIAG; ret = elo_smartset_send_get(dev, ELO_SEND_SMARTSET_COMMAND, buffer); if (ret < 0) { dev_err(&dev->dev, "send Diagnostics Command failed, error %d\n", ret); goto fail; } /* get the result */ ret = elo_smartset_send_get(dev, ELO_GET_SMARTSET_RESPONSE, buffer); if (ret < 0) { dev_err(&dev->dev, "get Diagnostics Command response failed, error %d\n", ret); goto fail; } /* read the ack */ if (*buffer != 'A') { ret = elo_smartset_send_get(dev, ELO_GET_SMARTSET_RESPONSE, buffer); if (ret < 0) { dev_err(&dev->dev, "get acknowledge response failed, error %d\n", ret); goto fail; } } fail: ret = elo_flush_smartset_responses(dev); if (ret < 0) dev_err(&dev->dev, "final FLUSH_SMARTSET_RESPONSES failed, error %d\n", ret); queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); } /* * Not all Elo devices need the periodic HID descriptor reads. * Only firmware version M needs this. */ static bool elo_broken_firmware(struct usb_device *dev) { struct usb_device *hub = dev->parent; struct usb_device *child = NULL; u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice); u16 child_vid, child_pid; int i; if (!use_fw_quirk) return false; if (fw_lvl != 0x10d) return false; /* iterate sibling devices of the touch controller */ usb_hub_for_each_child(hub, i, child) { child_vid = le16_to_cpu(child->descriptor.idVendor); child_pid = le16_to_cpu(child->descriptor.idProduct); /* * If one of the devices below is present attached as a sibling of * the touch controller then this is a newer IBM 4820 monitor that * does not need the IBM-requested workaround if fw level is * 0x010d - aka 'M'. * No other HW can have this combination. */ if (child_vid==0x04b3) { switch (child_pid) { case 0x4676: /* 4820 21x Video */ case 0x4677: /* 4820 51x Video */ case 0x4678: /* 4820 2Lx Video */ case 0x4679: /* 4820 5Lx Video */ return false; } } } return true; } static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct elo_priv *priv; int ret; if (!hid_is_usb(hdev)) return -EINVAL; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; INIT_DELAYED_WORK(&priv->work, elo_work); priv->usbdev = interface_to_usbdev(to_usb_interface(hdev->dev.parent)); hid_set_drvdata(hdev, priv); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } if (elo_broken_firmware(priv->usbdev)) { hid_info(hdev, "broken firmware found, installing workaround\n"); queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); } return 0; err_free: kfree(priv); return ret; } static void elo_remove(struct hid_device *hdev) { struct elo_priv *priv = hid_get_drvdata(hdev); hid_hw_stop(hdev); cancel_delayed_work_sync(&priv->work); kfree(priv); } static const struct hid_device_id elo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009), }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030), }, { } }; MODULE_DEVICE_TABLE(hid, elo_devices); static struct hid_driver elo_driver = { .name = "elo", .id_table = elo_devices, .probe = elo_probe, .remove = elo_remove, .raw_event = elo_raw_event, .input_configured = elo_input_configured, }; static int __init elo_driver_init(void) { int ret; wq = create_singlethread_workqueue("elousb"); if (!wq) return -ENOMEM; ret = hid_register_driver(&elo_driver); if (ret) destroy_workqueue(wq); return ret; } module_init(elo_driver_init); static void __exit elo_driver_exit(void) { hid_unregister_driver(&elo_driver); destroy_workqueue(wq); } module_exit(elo_driver_exit); MODULE_AUTHOR("Jiri Slaby <jslaby@suse.cz>"); MODULE_LICENSE("GPL"); |
6 6 1 5 8 1 1 5 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch> */ #include <linux/filter.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/bpf.h> #include <net/lwtunnel.h> #include <net/gre.h> #include <net/ip6_route.h> #include <net/ipv6_stubs.h> struct bpf_lwt_prog { struct bpf_prog *prog; char *name; }; struct bpf_lwt { struct bpf_lwt_prog in; struct bpf_lwt_prog out; struct bpf_lwt_prog xmit; int family; }; #define MAX_PROG_NAME 256 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt) { return (struct bpf_lwt *)lwt->data; } #define NO_REDIRECT false #define CAN_REDIRECT true static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, struct dst_entry *dst, bool can_redirect) { int ret; /* Migration disable and BH disable are needed to protect per-cpu * redirect_info between BPF prog and skb_do_redirect(). */ migrate_disable(); local_bh_disable(); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(lwt->prog, skb); switch (ret) { case BPF_OK: case BPF_LWT_REROUTE: break; case BPF_REDIRECT: if (unlikely(!can_redirect)) { pr_warn_once("Illegal redirect return code in prog %s\n", lwt->name ? : "<unknown>"); ret = BPF_OK; } else { skb_reset_mac_header(skb); skb_do_redirect(skb); ret = BPF_REDIRECT; } break; case BPF_DROP: kfree_skb(skb); ret = -EPERM; break; default: pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret); kfree_skb(skb); ret = -EINVAL; break; } local_bh_enable(); migrate_enable(); return ret; } static int bpf_lwt_input_reroute(struct sk_buff *skb) { int err = -EINVAL; if (skb->protocol == htons(ETH_P_IP)) { struct net_device *dev = skb_dst(skb)->dev; struct iphdr *iph = ip_hdr(skb); dev_hold(dev); skb_dst_drop(skb); err = ip_route_input_noref(skb, iph->daddr, iph->saddr, iph->tos, dev); dev_put(dev); } else if (skb->protocol == htons(ETH_P_IPV6)) { skb_dst_drop(skb); err = ipv6_stub->ipv6_route_input(skb); } else { err = -EAFNOSUPPORT; } if (err) goto err; return dst_input(skb); err: kfree_skb(skb); return err; } static int bpf_input(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct bpf_lwt *bpf; int ret; bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->in.prog) { ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT); if (ret < 0) return ret; if (ret == BPF_LWT_REROUTE) return bpf_lwt_input_reroute(skb); } if (unlikely(!dst->lwtstate->orig_input)) { kfree_skb(skb); return -EINVAL; } return dst->lwtstate->orig_input(skb); } static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct bpf_lwt *bpf; int ret; bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->out.prog) { ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT); if (ret < 0) return ret; } if (unlikely(!dst->lwtstate->orig_output)) { pr_warn_once("orig_output not set on dst for prog %s\n", bpf->out.name); kfree_skb(skb); return -EINVAL; } return dst->lwtstate->orig_output(net, sk, skb); } static int xmit_check_hhlen(struct sk_buff *skb, int hh_len) { if (skb_headroom(skb) < hh_len) { int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC)) return -ENOMEM; } return 0; } static int bpf_lwt_xmit_reroute(struct sk_buff *skb) { struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev); int oif = l3mdev ? l3mdev->ifindex : 0; struct dst_entry *dst = NULL; int err = -EAFNOSUPPORT; struct sock *sk; struct net *net; bool ipv4; if (skb->protocol == htons(ETH_P_IP)) ipv4 = true; else if (skb->protocol == htons(ETH_P_IPV6)) ipv4 = false; else goto err; sk = sk_to_full_sk(skb->sk); if (sk) { if (sk->sk_bound_dev_if) oif = sk->sk_bound_dev_if; net = sock_net(sk); } else { net = dev_net(skb_dst(skb)->dev); } if (ipv4) { struct iphdr *iph = ip_hdr(skb); struct flowi4 fl4 = {}; struct rtable *rt; fl4.flowi4_oif = oif; fl4.flowi4_mark = skb->mark; fl4.flowi4_uid = sock_net_uid(net, sk); fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_flags = FLOWI_FLAG_ANYSRC; fl4.flowi4_proto = iph->protocol; fl4.daddr = iph->daddr; fl4.saddr = iph->saddr; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto err; } dst = &rt->dst; } else { struct ipv6hdr *iph6 = ipv6_hdr(skb); struct flowi6 fl6 = {}; fl6.flowi6_oif = oif; fl6.flowi6_mark = skb->mark; fl6.flowi6_uid = sock_net_uid(net, sk); fl6.flowlabel = ip6_flowinfo(iph6); fl6.flowi6_proto = iph6->nexthdr; fl6.daddr = iph6->daddr; fl6.saddr = iph6->saddr; dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto err; } } if (unlikely(dst->error)) { err = dst->error; dst_release(dst); goto err; } /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it * was done for the previous dst, so we are doing it here again, in * case the new dst needs much more space. The call below is a noop * if there is enough header space in skb. */ err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); if (unlikely(err)) goto err; skb_dst_drop(skb); skb_dst_set(skb, dst); err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(err)) return net_xmit_errno(err); /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */ return LWTUNNEL_XMIT_DONE; err: kfree_skb(skb); return err; } static int bpf_xmit(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct bpf_lwt *bpf; bpf = bpf_lwt_lwtunnel(dst->lwtstate); if (bpf->xmit.prog) { int hh_len = dst->dev->hard_header_len; __be16 proto = skb->protocol; int ret; ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT); switch (ret) { case BPF_OK: /* If the header changed, e.g. via bpf_lwt_push_encap, * BPF_LWT_REROUTE below should have been used if the * protocol was also changed. */ if (skb->protocol != proto) { kfree_skb(skb); return -EINVAL; } /* If the header was expanded, headroom might be too * small for L2 header to come, expand as needed. */ ret = xmit_check_hhlen(skb, hh_len); if (unlikely(ret)) return ret; return LWTUNNEL_XMIT_CONTINUE; case BPF_REDIRECT: return LWTUNNEL_XMIT_DONE; case BPF_LWT_REROUTE: return bpf_lwt_xmit_reroute(skb); default: return ret; } } return LWTUNNEL_XMIT_CONTINUE; } static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog) { if (prog->prog) bpf_prog_put(prog->prog); kfree(prog->name); } static void bpf_destroy_state(struct lwtunnel_state *lwt) { struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt); bpf_lwt_prog_destroy(&bpf->in); bpf_lwt_prog_destroy(&bpf->out); bpf_lwt_prog_destroy(&bpf->xmit); } static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = { [LWT_BPF_PROG_FD] = { .type = NLA_U32, }, [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING, .len = MAX_PROG_NAME }, }; static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog, enum bpf_prog_type type) { struct nlattr *tb[LWT_BPF_PROG_MAX + 1]; struct bpf_prog *p; int ret; u32 fd; ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr, bpf_prog_policy, NULL); if (ret < 0) return ret; if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME]) return -EINVAL; prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC); if (!prog->name) return -ENOMEM; fd = nla_get_u32(tb[LWT_BPF_PROG_FD]); p = bpf_prog_get_type(fd, type); if (IS_ERR(p)) return PTR_ERR(p); prog->prog = p; return 0; } static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = { [LWT_BPF_IN] = { .type = NLA_NESTED, }, [LWT_BPF_OUT] = { .type = NLA_NESTED, }, [LWT_BPF_XMIT] = { .type = NLA_NESTED, }, [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 }, }; static int bpf_build_state(struct net *net, struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts, struct netlink_ext_ack *extack) { struct nlattr *tb[LWT_BPF_MAX + 1]; struct lwtunnel_state *newts; struct bpf_lwt *bpf; int ret; if (family != AF_INET && family != AF_INET6) return -EAFNOSUPPORT; ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy, extack); if (ret < 0) return ret; if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT]) return -EINVAL; newts = lwtunnel_state_alloc(sizeof(*bpf)); if (!newts) return -ENOMEM; newts->type = LWTUNNEL_ENCAP_BPF; bpf = bpf_lwt_lwtunnel(newts); if (tb[LWT_BPF_IN]) { newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT; ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in, BPF_PROG_TYPE_LWT_IN); if (ret < 0) goto errout; } if (tb[LWT_BPF_OUT]) { newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT; ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out, BPF_PROG_TYPE_LWT_OUT); if (ret < 0) goto errout; } if (tb[LWT_BPF_XMIT]) { newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT; ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit, BPF_PROG_TYPE_LWT_XMIT); if (ret < 0) goto errout; } if (tb[LWT_BPF_XMIT_HEADROOM]) { u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]); if (headroom > LWT_BPF_MAX_HEADROOM) { ret = -ERANGE; goto errout; } newts->headroom = headroom; } bpf->family = family; *ts = newts; return 0; errout: bpf_destroy_state(newts); kfree(newts); return ret; } static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr, struct bpf_lwt_prog *prog) { struct nlattr *nest; if (!prog->prog) return 0; nest = nla_nest_start_noflag(skb, attr); if (!nest) return -EMSGSIZE; if (prog->name && nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name)) return -EMSGSIZE; return nla_nest_end(skb, nest); } static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt) { struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt); if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 || bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 || bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0) return -EMSGSIZE; return 0; } static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate) { int nest_len = nla_total_size(sizeof(struct nlattr)) + nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */ 0; return nest_len + /* LWT_BPF_IN */ nest_len + /* LWT_BPF_OUT */ nest_len + /* LWT_BPF_XMIT */ 0; } static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b) { /* FIXME: * The LWT state is currently rebuilt for delete requests which * results in a new bpf_prog instance. Comparing names for now. */ if (!a->name && !b->name) return 0; if (!a->name || !b->name) return 1; return strcmp(a->name, b->name); } static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) { struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a); struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b); return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) || bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) || bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit); } static const struct lwtunnel_encap_ops bpf_encap_ops = { .build_state = bpf_build_state, .destroy_state = bpf_destroy_state, .input = bpf_input, .output = bpf_output, .xmit = bpf_xmit, .fill_encap = bpf_fill_encap_info, .get_encap_size = bpf_encap_nlsize, .cmp_encap = bpf_encap_cmp, .owner = THIS_MODULE, }; static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type, int encap_len) { struct skb_shared_info *shinfo = skb_shinfo(skb); gso_type |= SKB_GSO_DODGY; shinfo->gso_type |= gso_type; skb_decrease_gso_size(shinfo, encap_len); shinfo->gso_segs = 0; return 0; } static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len) { int next_hdr_offset; void *next_hdr; __u8 protocol; /* SCTP and UDP_L4 gso need more nuanced handling than what * handle_gso_type() does above: skb_decrease_gso_size() is not enough. * So at the moment only TCP GSO packets are let through. */ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) return -ENOTSUPP; if (ipv4) { protocol = ip_hdr(skb)->protocol; next_hdr_offset = sizeof(struct iphdr); next_hdr = skb_network_header(skb) + next_hdr_offset; } else { protocol = ipv6_hdr(skb)->nexthdr; next_hdr_offset = sizeof(struct ipv6hdr); next_hdr = skb_network_header(skb) + next_hdr_offset; } switch (protocol) { case IPPROTO_GRE: next_hdr_offset += sizeof(struct gre_base_hdr); if (next_hdr_offset > encap_len) return -EINVAL; if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM) return handle_gso_type(skb, SKB_GSO_GRE_CSUM, encap_len); return handle_gso_type(skb, SKB_GSO_GRE, encap_len); case IPPROTO_UDP: next_hdr_offset += sizeof(struct udphdr); if (next_hdr_offset > encap_len) return -EINVAL; if (((struct udphdr *)next_hdr)->check) return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM, encap_len); return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len); case IPPROTO_IP: case IPPROTO_IPV6: if (ipv4) return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len); else return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len); default: return -EPROTONOSUPPORT; } } int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress) { struct iphdr *iph; bool ipv4; int err; if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM)) return -EINVAL; /* validate protocol and length */ iph = (struct iphdr *)hdr; if (iph->version == 4) { ipv4 = true; if (unlikely(len < iph->ihl * 4)) return -EINVAL; } else if (iph->version == 6) { ipv4 = false; if (unlikely(len < sizeof(struct ipv6hdr))) return -EINVAL; } else { return -EINVAL; } if (ingress) err = skb_cow_head(skb, len + skb->mac_len); else err = skb_cow_head(skb, len + LL_RESERVED_SPACE(skb_dst(skb)->dev)); if (unlikely(err)) return err; /* push the encap headers and fix pointers */ skb_reset_inner_headers(skb); skb_reset_inner_mac_header(skb); /* mac header is not yet set */ skb_set_inner_protocol(skb, skb->protocol); skb->encapsulation = 1; skb_push(skb, len); if (ingress) skb_postpush_rcsum(skb, iph, len); skb_reset_network_header(skb); memcpy(skb_network_header(skb), hdr, len); bpf_compute_data_pointers(skb); skb_clear_hash(skb); if (ipv4) { skb->protocol = htons(ETH_P_IP); iph = ip_hdr(skb); if (!iph->check) iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } else { skb->protocol = htons(ETH_P_IPV6); } if (skb_is_gso(skb)) return handle_gso_encap(skb, ipv4, len); return 0; } static int __init bpf_lwt_init(void) { return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF); } subsys_initcall(bpf_lwt_init) |
1136 46 7 3974 5 1275 23 38 679 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 | /* SPDX-License-Identifier: GPL-2.0 */ /* * NUMA memory policies for Linux. * Copyright 2003,2004 Andi Kleen SuSE Labs */ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 #include <linux/sched.h> #include <linux/mmzone.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/nodemask.h> #include <linux/pagemap.h> #include <uapi/linux/mempolicy.h> struct mm_struct; #define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */ #ifdef CONFIG_NUMA /* * Describe a memory policy. * * A mempolicy can be either associated with a process or with a VMA. * For VMA related allocations the VMA policy is preferred, otherwise * the process policy is used. Interrupts ignore the memory policy * of the current process. * * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on * mmap_lock. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when * mpol_put() decrements the reference count to zero. * * Duplicating policy objects: * mpol_dup() allocates a new mempolicy and copies the specified mempolicy * to the new storage. The reference count of the new object is initialized * to 1, representing the caller of mpol_dup(). */ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ nodemask_t nodes; /* interleave/bind/perfer */ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ union { nodemask_t cpuset_mems_allowed; /* relative to these nodes */ nodemask_t user_nodemask; /* nodemask passed by user */ } w; }; /* * Support for managing mempolicy data objects (clone, copy, destroy) * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. */ extern void __mpol_put(struct mempolicy *pol); static inline void mpol_put(struct mempolicy *pol) { if (pol) __mpol_put(pol); } /* * Does mempolicy pol need explicit unref after use? * Currently only needed for shared policies. */ static inline int mpol_needs_cond_ref(struct mempolicy *pol) { return (pol && (pol->flags & MPOL_F_SHARED)); } static inline void mpol_cond_put(struct mempolicy *pol) { if (mpol_needs_cond_ref(pol)) __mpol_put(pol); } extern struct mempolicy *__mpol_dup(struct mempolicy *pol); static inline struct mempolicy *mpol_dup(struct mempolicy *pol) { if (pol) pol = __mpol_dup(pol); return pol; } static inline void mpol_get(struct mempolicy *pol) { if (pol) atomic_inc(&pol->refcnt); } extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { if (a == b) return true; return __mpol_equal(a, b); } /* * Tree of shared policies for a shared memory region. */ struct shared_policy { struct rb_root root; rwlock_t lock; }; struct sp_node { struct rb_node nd; pgoff_t start, end; struct mempolicy *policy; }; int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); int mpol_set_shared_policy(struct shared_policy *sp, struct vm_area_struct *vma, struct mempolicy *mpol); void mpol_free_shared_policy(struct shared_policy *sp); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx); struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr, pgoff_t *ilx); struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr, int order, pgoff_t *ilx); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); extern int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask); extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_in_oom_domain(struct task_struct *tsk, const nodemask_t *mask); extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; static inline void check_highest_zone(enum zone_type k) { if (k > policy_zone && k != ZONE_MOVABLE) policy_zone = k; } int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags); #ifdef CONFIG_TMPFS extern int mpol_parse_str(char *str, struct mempolicy **mpol); #endif extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) { return (pol->mode == MPOL_PREFERRED_MANY); } extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); #else struct mempolicy {}; static inline struct mempolicy *get_task_policy(struct task_struct *p) { return NULL; } static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) { return true; } static inline void mpol_put(struct mempolicy *pol) { } static inline void mpol_cond_put(struct mempolicy *pol) { } static inline void mpol_get(struct mempolicy *pol) { } struct shared_policy {}; static inline void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) { } static inline void mpol_free_shared_policy(struct shared_policy *sp) { } static inline struct mempolicy * mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx) { return NULL; } static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma, unsigned long addr, int order, pgoff_t *ilx) { *ilx = 0; return NULL; } static inline int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) { return 0; } static inline void numa_policy_init(void) { } static inline void numa_default_policy(void) { } static inline void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new) { } static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) { } static inline int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) { *mpol = NULL; *nodemask = NULL; return 0; } static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to, int flags) { return 0; } static inline void check_highest_zone(int k) { } #ifdef CONFIG_TMPFS static inline int mpol_parse_str(char *str, struct mempolicy **mpol) { return 1; /* error */ } #endif static inline int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { return -1; /* no node preference */ } static inline void mpol_put_task_policy(struct task_struct *task) { } static inline bool mpol_is_preferred_many(struct mempolicy *pol) { return false; } #endif /* CONFIG_NUMA */ #endif |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_INCLUDE_PATH ../../drivers/dma-buf #define TRACE_SYSTEM sync_trace #if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_SYNC_H #include "sync_debug.h" #include <linux/tracepoint.h> TRACE_EVENT(sync_timeline, TP_PROTO(struct sync_timeline *timeline), TP_ARGS(timeline), TP_STRUCT__entry( __string(name, timeline->name) __field(u32, value) ), TP_fast_assign( __assign_str(name, timeline->name); __entry->value = timeline->value; ), TP_printk("name=%s value=%d", __get_str(name), __entry->value) ); #endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */ /* This part must be outside protection */ #include <trace/define_trace.h> |
17 9 17 15 1 3 16 1 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 * Phillip Lougher <phillip@squashfs.org.uk> * * decompressor.c */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "decompressor.h" #include "squashfs.h" #include "page_actor.h" /* * This file (and decompressor.h) implements a decompressor framework for * Squashfs, allowing multiple decompressors to be easily supported */ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { NULL, NULL, NULL, NULL, LZMA_COMPRESSION, "lzma", 0 }; #ifndef CONFIG_SQUASHFS_LZ4 static const struct squashfs_decompressor squashfs_lz4_comp_ops = { NULL, NULL, NULL, NULL, LZ4_COMPRESSION, "lz4", 0 }; #endif #ifndef CONFIG_SQUASHFS_LZO static const struct squashfs_decompressor squashfs_lzo_comp_ops = { NULL, NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 }; #endif #ifndef CONFIG_SQUASHFS_XZ static const struct squashfs_decompressor squashfs_xz_comp_ops = { NULL, NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 }; #endif #ifndef CONFIG_SQUASHFS_ZLIB static const struct squashfs_decompressor squashfs_zlib_comp_ops = { NULL, NULL, NULL, NULL, ZLIB_COMPRESSION, "zlib", 0 }; #endif #ifndef CONFIG_SQUASHFS_ZSTD static const struct squashfs_decompressor squashfs_zstd_comp_ops = { NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0 }; #endif static const struct squashfs_decompressor squashfs_unknown_comp_ops = { NULL, NULL, NULL, NULL, 0, "unknown", 0 }; static const struct squashfs_decompressor *decompressor[] = { &squashfs_zlib_comp_ops, &squashfs_lz4_comp_ops, &squashfs_lzo_comp_ops, &squashfs_xz_comp_ops, &squashfs_lzma_unsupported_comp_ops, &squashfs_zstd_comp_ops, &squashfs_unknown_comp_ops }; const struct squashfs_decompressor *squashfs_lookup_decompressor(int id) { int i; for (i = 0; decompressor[i]->id; i++) if (id == decompressor[i]->id) break; return decompressor[i]; } static void *get_comp_opts(struct super_block *sb, unsigned short flags) { struct squashfs_sb_info *msblk = sb->s_fs_info; void *buffer = NULL, *comp_opts; struct squashfs_page_actor *actor = NULL; int length = 0; /* * Read decompressor specific options from file system if present */ if (SQUASHFS_COMP_OPTS(flags)) { buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buffer == NULL) { comp_opts = ERR_PTR(-ENOMEM); goto out; } actor = squashfs_page_actor_init(&buffer, 1, 0); if (actor == NULL) { comp_opts = ERR_PTR(-ENOMEM); goto out; } length = squashfs_read_data(sb, sizeof(struct squashfs_super_block), 0, NULL, actor); if (length < 0) { comp_opts = ERR_PTR(length); goto out; } } comp_opts = squashfs_comp_opts(msblk, buffer, length); out: kfree(actor); kfree(buffer); return comp_opts; } void *squashfs_decompressor_setup(struct super_block *sb, unsigned short flags) { struct squashfs_sb_info *msblk = sb->s_fs_info; void *stream, *comp_opts = get_comp_opts(sb, flags); if (IS_ERR(comp_opts)) return comp_opts; stream = msblk->thread_ops->create(msblk, comp_opts); if (IS_ERR(stream)) kfree(comp_opts); return stream; } |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 | /* * linux/fs/nls/mac-centeuro.c * * Charset maccenteuro translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ /* * COPYRIGHT AND PERMISSION NOTICE * * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under * the Terms of Use in http://www.unicode.org/copyright.html. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of the Unicode data files and any associated documentation (the "Data * Files") or Unicode software and any associated documentation (the * "Software") to deal in the Data Files or Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Data Files or Software, and * to permit persons to whom the Data Files or Software are furnished to do * so, provided that (a) the above copyright notice(s) and this permission * notice appear with all copies of the Data Files or Software, (b) both the * above copyright notice(s) and this permission notice appear in associated * documentation, and (c) there is clear notice in each modified Data File or * in the Software as well as in the documentation associated with the Data * File(s) or Software that the data or software has been modified. * * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THE DATA FILES OR SOFTWARE. * * Except as contained in this notice, the name of a copyright holder shall * not be used in advertising or otherwise to promote the sale, use or other * dealings in these Data Files or Software without prior written * authorization of the copyright holder. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80 */ 0x00c4, 0x0100, 0x0101, 0x00c9, 0x0104, 0x00d6, 0x00dc, 0x00e1, 0x0105, 0x010c, 0x00e4, 0x010d, 0x0106, 0x0107, 0x00e9, 0x0179, /* 0x90 */ 0x017a, 0x010e, 0x00ed, 0x010f, 0x0112, 0x0113, 0x0116, 0x00f3, 0x0117, 0x00f4, 0x00f6, 0x00f5, 0x00fa, 0x011a, 0x011b, 0x00fc, /* 0xa0 */ 0x2020, 0x00b0, 0x0118, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df, 0x00ae, 0x00a9, 0x2122, 0x0119, 0x00a8, 0x2260, 0x0123, 0x012e, /* 0xb0 */ 0x012f, 0x012a, 0x2264, 0x2265, 0x012b, 0x0136, 0x2202, 0x2211, 0x0142, 0x013b, 0x013c, 0x013d, 0x013e, 0x0139, 0x013a, 0x0145, /* 0xc0 */ 0x0146, 0x0143, 0x00ac, 0x221a, 0x0144, 0x0147, 0x2206, 0x00ab, 0x00bb, 0x2026, 0x00a0, 0x0148, 0x0150, 0x00d5, 0x0151, 0x014c, /* 0xd0 */ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca, 0x014d, 0x0154, 0x0155, 0x0158, 0x2039, 0x203a, 0x0159, 0x0156, /* 0xe0 */ 0x0157, 0x0160, 0x201a, 0x201e, 0x0161, 0x015a, 0x015b, 0x00c1, 0x0164, 0x0165, 0x00cd, 0x017d, 0x017e, 0x016a, 0x00d3, 0x00d4, /* 0xf0 */ 0x016b, 0x016e, 0x00da, 0x016f, 0x0170, 0x0171, 0x0172, 0x0173, 0x00dd, 0x00fd, 0x0137, 0x017b, 0x0141, 0x017c, 0x0122, 0x02c7, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */ 0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */ 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */ 0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */ 0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */ 0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */ 0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */ 0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */ 0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */ 0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */ 0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */ 0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */ 0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */ 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page25[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, page22, NULL, NULL, page25, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "maccenteuro", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_maccenteuro(void) { return register_nls(&table); } static void __exit exit_nls_maccenteuro(void) { unregister_nls(&table); } module_init(init_nls_maccenteuro) module_exit(exit_nls_maccenteuro) MODULE_LICENSE("Dual BSD/GPL"); |
14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 | // SPDX-License-Identifier: GPL-2.0 /* Shared Memory Communications Direct over ISM devices (SMC-D) * * Functions for ISM device. * * Copyright IBM Corp. 2018 */ #include <linux/if_vlan.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <asm/page.h> #include "smc.h" #include "smc_core.h" #include "smc_ism.h" #include "smc_pnet.h" #include "smc_netlink.h" #include "linux/ism.h" struct smcd_dev_list smcd_dev_list = { .list = LIST_HEAD_INIT(smcd_dev_list.list), .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) }; static bool smc_ism_v2_capable; static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN]; #if IS_ENABLED(CONFIG_ISM) static void smcd_register_dev(struct ism_dev *ism); static void smcd_unregister_dev(struct ism_dev *ism); static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event); static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno, u16 dmbemask); static struct ism_client smc_ism_client = { .name = "SMC-D", .add = smcd_register_dev, .remove = smcd_unregister_dev, .handle_event = smcd_handle_event, .handle_irq = smcd_handle_irq, }; #endif /* Test if an ISM communication is possible - same CPC */ int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) { return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, vlan_id); } void smc_ism_get_system_eid(u8 **eid) { if (!smc_ism_v2_capable) *eid = NULL; else *eid = smc_ism_v2_system_eid; } u16 smc_ism_get_chid(struct smcd_dev *smcd) { return smcd->ops->get_chid(smcd); } /* HW supports ISM V2 and thus System EID is defined */ bool smc_ism_is_v2_capable(void) { return smc_ism_v2_capable; } /* Set a connection using this DMBE. */ void smc_ism_set_conn(struct smc_connection *conn) { unsigned long flags; spin_lock_irqsave(&conn->lgr->smcd->lock, flags); conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); } /* Unset a connection using this DMBE. */ void smc_ism_unset_conn(struct smc_connection *conn) { unsigned long flags; if (!conn->rmb_desc) return; spin_lock_irqsave(&conn->lgr->smcd->lock, flags); conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); } /* Register a VLAN identifier with the ISM device. Use a reference count * and add a VLAN identifier only when the first DMB using this VLAN is * registered. */ int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) { struct smc_ism_vlanid *new_vlan, *vlan; unsigned long flags; int rc = 0; if (!vlanid) /* No valid vlan id */ return -EINVAL; /* create new vlan entry, in case we need it */ new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); if (!new_vlan) return -ENOMEM; new_vlan->vlanid = vlanid; refcount_set(&new_vlan->refcnt, 1); /* if there is an existing entry, increase count and return */ spin_lock_irqsave(&smcd->lock, flags); list_for_each_entry(vlan, &smcd->vlan, list) { if (vlan->vlanid == vlanid) { refcount_inc(&vlan->refcnt); kfree(new_vlan); goto out; } } /* no existing entry found. * add new entry to device; might fail, e.g., if HW limit reached */ if (smcd->ops->add_vlan_id(smcd, vlanid)) { kfree(new_vlan); rc = -EIO; goto out; } list_add_tail(&new_vlan->list, &smcd->vlan); out: spin_unlock_irqrestore(&smcd->lock, flags); return rc; } /* Unregister a VLAN identifier with the ISM device. Use a reference count * and remove a VLAN identifier only when the last DMB using this VLAN is * unregistered. */ int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) { struct smc_ism_vlanid *vlan; unsigned long flags; bool found = false; int rc = 0; if (!vlanid) /* No valid vlan id */ return -EINVAL; spin_lock_irqsave(&smcd->lock, flags); list_for_each_entry(vlan, &smcd->vlan, list) { if (vlan->vlanid == vlanid) { if (!refcount_dec_and_test(&vlan->refcnt)) goto out; found = true; break; } } if (!found) { rc = -ENOENT; goto out; /* VLAN id not in table */ } /* Found and the last reference just gone */ if (smcd->ops->del_vlan_id(smcd, vlanid)) rc = -EIO; list_del(&vlan->list); kfree(vlan); out: spin_unlock_irqrestore(&smcd->lock, flags); return rc; } int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) { struct smcd_dmb dmb; int rc = 0; if (!dmb_desc->dma_addr) return rc; memset(&dmb, 0, sizeof(dmb)); dmb.dmb_tok = dmb_desc->token; dmb.sba_idx = dmb_desc->sba_idx; dmb.cpu_addr = dmb_desc->cpu_addr; dmb.dma_addr = dmb_desc->dma_addr; dmb.dmb_len = dmb_desc->len; rc = smcd->ops->unregister_dmb(smcd, &dmb); if (!rc || rc == ISM_ERROR) { dmb_desc->cpu_addr = NULL; dmb_desc->dma_addr = 0; } return rc; } int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, struct smc_buf_desc *dmb_desc) { #if IS_ENABLED(CONFIG_ISM) struct smcd_dmb dmb; int rc; memset(&dmb, 0, sizeof(dmb)); dmb.dmb_len = dmb_len; dmb.sba_idx = dmb_desc->sba_idx; dmb.vlan_id = lgr->vlan_id; dmb.rgid = lgr->peer_gid; rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb, &smc_ism_client); if (!rc) { dmb_desc->sba_idx = dmb.sba_idx; dmb_desc->token = dmb.dmb_tok; dmb_desc->cpu_addr = dmb.cpu_addr; dmb_desc->dma_addr = dmb.dma_addr; dmb_desc->len = dmb.dmb_len; } return rc; #else return 0; #endif } static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd, struct sk_buff *skb, struct netlink_callback *cb) { char smc_pnet[SMC_MAX_PNETID_LEN + 1]; struct smc_pci_dev smc_pci_dev; struct nlattr *port_attrs; struct nlattr *attrs; struct ism_dev *ism; int use_cnt = 0; void *nlh; ism = smcd->priv; nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &smc_gen_nl_family, NLM_F_MULTI, SMC_NETLINK_GET_DEV_SMCD); if (!nlh) goto errmsg; attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD); if (!attrs) goto errout; use_cnt = atomic_read(&smcd->lgr_cnt); if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt)) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0)) goto errattr; memset(&smc_pci_dev, 0, sizeof(smc_pci_dev)); smc_set_pci_values(to_pci_dev(ism->dev.parent), &smc_pci_dev); if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor)) goto errattr; if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device)) goto errattr; if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id)) goto errattr; port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT); if (!port_attrs) goto errattr; if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user)) goto errportattr; memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN); smc_pnet[SMC_MAX_PNETID_LEN] = 0; if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet)) goto errportattr; nla_nest_end(skb, port_attrs); nla_nest_end(skb, attrs); genlmsg_end(skb, nlh); return 0; errportattr: nla_nest_cancel(skb, port_attrs); errattr: nla_nest_cancel(skb, attrs); errout: nlmsg_cancel(skb, nlh); errmsg: return -EMSGSIZE; } static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list, struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); int snum = cb_ctx->pos[0]; struct smcd_dev *smcd; int num = 0; mutex_lock(&dev_list->mutex); list_for_each_entry(smcd, &dev_list->list, list) { if (num < snum) goto next; if (smc_nl_handle_smcd_dev(smcd, skb, cb)) goto errout; next: num++; } errout: mutex_unlock(&dev_list->mutex); cb_ctx->pos[0] = num; } int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb) { smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb); return skb->len; } #if IS_ENABLED(CONFIG_ISM) struct smc_ism_event_work { struct work_struct work; struct smcd_dev *smcd; struct ism_event event; }; #define ISM_EVENT_REQUEST 0x0001 #define ISM_EVENT_RESPONSE 0x0002 #define ISM_EVENT_REQUEST_IR 0x00000001 #define ISM_EVENT_CODE_SHUTDOWN 0x80 #define ISM_EVENT_CODE_TESTLINK 0x83 union smcd_sw_event_info { u64 info; struct { u8 uid[SMC_LGR_ID_SIZE]; unsigned short vlan_id; u16 code; }; }; static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) { union smcd_sw_event_info ev_info; ev_info.info = wrk->event.info; switch (wrk->event.code) { case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); break; case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ if (ev_info.code == ISM_EVENT_REQUEST) { ev_info.code = ISM_EVENT_RESPONSE; wrk->smcd->ops->signal_event(wrk->smcd, wrk->event.tok, ISM_EVENT_REQUEST_IR, ISM_EVENT_CODE_TESTLINK, ev_info.info); } break; } } /* worker for SMC-D events */ static void smc_ism_event_work(struct work_struct *work) { struct smc_ism_event_work *wrk = container_of(work, struct smc_ism_event_work, work); switch (wrk->event.type) { case ISM_EVENT_GID: /* GID event, token is peer GID */ smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); break; case ISM_EVENT_DMB: break; case ISM_EVENT_SWR: /* Software defined event */ smcd_handle_sw_event(wrk); break; } kfree(wrk); } static struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, const struct smcd_ops *ops, int max_dmbs) { struct smcd_dev *smcd; smcd = devm_kzalloc(parent, sizeof(*smcd), GFP_KERNEL); if (!smcd) return NULL; smcd->conn = devm_kcalloc(parent, max_dmbs, sizeof(struct smc_connection *), GFP_KERNEL); if (!smcd->conn) return NULL; smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", WQ_MEM_RECLAIM, name); if (!smcd->event_wq) return NULL; smcd->ops = ops; spin_lock_init(&smcd->lock); spin_lock_init(&smcd->lgr_lock); INIT_LIST_HEAD(&smcd->vlan); INIT_LIST_HEAD(&smcd->lgr_list); init_waitqueue_head(&smcd->lgrs_deleted); return smcd; } static void smcd_register_dev(struct ism_dev *ism) { const struct smcd_ops *ops = ism_get_smcd_ops(); struct smcd_dev *smcd; if (!ops) return; smcd = smcd_alloc_dev(&ism->pdev->dev, dev_name(&ism->pdev->dev), ops, ISM_NR_DMBS); if (!smcd) return; smcd->priv = ism; ism_set_priv(ism, &smc_ism_client, smcd); if (smc_pnetid_by_dev_port(&ism->pdev->dev, 0, smcd->pnetid)) smc_pnetid_by_table_smcd(smcd); mutex_lock(&smcd_dev_list.mutex); if (list_empty(&smcd_dev_list.list)) { u8 *system_eid = NULL; system_eid = smcd->ops->get_system_eid(); if (smcd->ops->supports_v2()) { smc_ism_v2_capable = true; memcpy(smc_ism_v2_system_eid, system_eid, SMC_MAX_EID_LEN); } } /* sort list: devices without pnetid before devices with pnetid */ if (smcd->pnetid[0]) list_add_tail(&smcd->list, &smcd_dev_list.list); else list_add(&smcd->list, &smcd_dev_list.list); mutex_unlock(&smcd_dev_list.mutex); pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", dev_name(&ism->dev), smcd->pnetid, smcd->pnetid_by_user ? " (user defined)" : ""); return; } static void smcd_unregister_dev(struct ism_dev *ism) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); pr_warn_ratelimited("smc: removing smcd device %s\n", dev_name(&ism->dev)); smcd->going_away = 1; smc_smcd_terminate_all(smcd); mutex_lock(&smcd_dev_list.mutex); list_del_init(&smcd->list); mutex_unlock(&smcd_dev_list.mutex); destroy_workqueue(smcd->event_wq); } /* SMCD Device event handler. Called from ISM device interrupt handler. * Parameters are ism device pointer, * - event->type (0 --> DMB, 1 --> GID), * - event->code (event code), * - event->tok (either DMB token when event type 0, or GID when event type 1) * - event->time (time of day) * - event->info (debug info). * * Context: * - Function called in IRQ context from ISM device driver event handler. */ static void smcd_handle_event(struct ism_dev *ism, struct ism_event *event) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); struct smc_ism_event_work *wrk; if (smcd->going_away) return; /* copy event to event work queue, and let it be handled there */ wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); if (!wrk) return; INIT_WORK(&wrk->work, smc_ism_event_work); wrk->smcd = smcd; wrk->event = *event; queue_work(smcd->event_wq, &wrk->work); } /* SMCD Device interrupt handler. Called from ISM device interrupt handler. * Parameters are the ism device pointer, DMB number, and the DMBE bitmask. * Find the connection and schedule the tasklet for this connection. * * Context: * - Function called in IRQ context from ISM device driver IRQ handler. */ static void smcd_handle_irq(struct ism_dev *ism, unsigned int dmbno, u16 dmbemask) { struct smcd_dev *smcd = ism_get_priv(ism, &smc_ism_client); struct smc_connection *conn = NULL; unsigned long flags; spin_lock_irqsave(&smcd->lock, flags); conn = smcd->conn[dmbno]; if (conn && !conn->killed) tasklet_schedule(&conn->rx_tsklet); spin_unlock_irqrestore(&smcd->lock, flags); } #endif int smc_ism_signal_shutdown(struct smc_link_group *lgr) { int rc = 0; #if IS_ENABLED(CONFIG_ISM) union smcd_sw_event_info ev_info; if (lgr->peer_shutdown) return 0; memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); ev_info.vlan_id = lgr->vlan_id; ev_info.code = ISM_EVENT_REQUEST; rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, ISM_EVENT_REQUEST_IR, ISM_EVENT_CODE_SHUTDOWN, ev_info.info); #endif return rc; } int smc_ism_init(void) { int rc = 0; #if IS_ENABLED(CONFIG_ISM) smc_ism_v2_capable = false; memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN); rc = ism_register_client(&smc_ism_client); #endif return rc; } void smc_ism_exit(void) { #if IS_ENABLED(CONFIG_ISM) ism_unregister_client(&smc_ism_client); #endif } |
16 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | /* * DRBG based on NIST SP800-90A * * Copyright Stephan Mueller <smueller@chronox.de>, 2014 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL are * required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #ifndef _DRBG_H #define _DRBG_H #include <linux/random.h> #include <linux/scatterlist.h> #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/slab.h> #include <crypto/internal/rng.h> #include <crypto/rng.h> #include <linux/fips.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/workqueue.h> /* * Concatenation Helper and string operation helper * * SP800-90A requires the concatenation of different data. To avoid copying * buffers around or allocate additional memory, the following data structure * is used to point to the original memory with its size. In addition, it * is used to build a linked list. The linked list defines the concatenation * of individual buffers. The order of memory block referenced in that * linked list determines the order of concatenation. */ struct drbg_string { const unsigned char *buf; size_t len; struct list_head list; }; static inline void drbg_string_fill(struct drbg_string *string, const unsigned char *buf, size_t len) { string->buf = buf; string->len = len; INIT_LIST_HEAD(&string->list); } struct drbg_state; typedef uint32_t drbg_flag_t; struct drbg_core { drbg_flag_t flags; /* flags for the cipher */ __u8 statelen; /* maximum state length */ __u8 blocklen_bytes; /* block size of output in bytes */ char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ /* kernel crypto API backend cipher name */ char backend_cra_name[CRYPTO_MAX_ALG_NAME]; }; struct drbg_state_ops { int (*update)(struct drbg_state *drbg, struct list_head *seed, int reseed); int (*generate)(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl); int (*crypto_init)(struct drbg_state *drbg); int (*crypto_fini)(struct drbg_state *drbg); }; struct drbg_test_data { struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ }; enum drbg_seed_state { DRBG_SEED_STATE_UNSEEDED, DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ DRBG_SEED_STATE_FULL, }; struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ unsigned char *Vbuf; /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; unsigned char *Cbuf; /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ size_t reseed_ctr; size_t reseed_threshold; /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; unsigned char *scratchpadbuf; void *priv_data; /* Cipher handle */ struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ struct skcipher_request *ctr_req; /* CTR mode request handle */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ struct crypto_wait ctr_wait; /* CTR mode async wait obj */ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ enum drbg_seed_state seeded; /* DRBG fully seeded? */ unsigned long last_seed_time; bool pr; /* Prediction resistance enabled? */ bool fips_primed; /* Continuous test primed? */ unsigned char *prev; /* FIPS 140-2 continuous test value */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; }; static inline __u8 drbg_statelen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->statelen; return 0; } static inline __u8 drbg_blocklen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->blocklen_bytes; return 0; } static inline __u8 drbg_keylen(struct drbg_state *drbg) { if (drbg && drbg->core) return (drbg->core->statelen - drbg->core->blocklen_bytes); return 0; } static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) { /* SP800-90A requires the limit 2**19 bits, but we return bytes */ return (1 << 16); } static inline size_t drbg_max_addtl(struct drbg_state *drbg) { /* SP800-90A requires 2**35 bytes additional info str / pers str */ #if (__BITS_PER_LONG == 32) /* * SP800-90A allows smaller maximum numbers to be returned -- we * return SIZE_MAX - 1 to allow the verification of the enforcement * of this value in drbg_healthcheck_sanity. */ return (SIZE_MAX - 1); #else return (1UL<<35); #endif } static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ return (1<<20); } /* * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data. * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl) { return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data and * allow furnishing of test_data * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_reset() to allow the caller to provide test_data * * @drng DRBG handle -- see crypto_rng_reset * @pers personalization string input buffer * @perslen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_reset */ static inline int crypto_drbg_reset_test(struct crypto_rng *drng, struct drbg_string *pers, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_reset(drng, pers->buf, pers->len); } /* DRBG type flags */ #define DRBG_CTR ((drbg_flag_t)1<<0) #define DRBG_HMAC ((drbg_flag_t)1<<1) #define DRBG_HASH ((drbg_flag_t)1<<2) #define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) /* DRBG strength flags */ #define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) #define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) #define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) #define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ DRBG_STRENGTH256) enum drbg_prefixes { DRBG_PREFIX0 = 0x00, DRBG_PREFIX1, DRBG_PREFIX2, DRBG_PREFIX3 }; #endif /* _DRBG_H */ |
41 41 2 38 40 40 20 18 2 2 6 1 6 20 19 9 18 18 4 20 6 6 1 1 1 1 1 7 1 6 1 6 1 6 6 6 1 2 4 4 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/hfs/brec.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handle individual btree records */ #include "btree.h" static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd); static int hfs_brec_update_parent(struct hfs_find_data *fd); static int hfs_btree_inc_height(struct hfs_btree *tree); /* Get the length and offset of the given record in the given node */ u16 hfs_brec_lenoff(struct hfs_bnode *node, u16 rec, u16 *off) { __be16 retval[2]; u16 dataoff; dataoff = node->tree->node_size - (rec + 2) * 2; hfs_bnode_read(node, retval, dataoff, 4); *off = be16_to_cpu(retval[1]); return be16_to_cpu(retval[0]) - *off; } /* Get the length of the key from a keyed record */ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) { u16 retval, recoff; if (node->type != HFS_NODE_INDEX && node->type != HFS_NODE_LEAF) return 0; if ((node->type == HFS_NODE_INDEX) && !(node->tree->attributes & HFS_TREE_VARIDXKEYS)) { if (node->tree->attributes & HFS_TREE_BIGKEYS) retval = node->tree->max_key_len + 2; else retval = node->tree->max_key_len + 1; } else { recoff = hfs_bnode_read_u16(node, node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; if (node->tree->attributes & HFS_TREE_BIGKEYS) { retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { pr_err("keylen %d too large\n", retval); retval = 0; } } else { retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1; if (retval > node->tree->max_key_len + 1) { pr_err("keylen %d too large\n", retval); retval = 0; } } } return retval; } int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node; int size, key_len, rec; int data_off, end_off; int idx_rec_off, data_rec_off, end_rec_off; __be32 cnid; tree = fd->tree; if (!fd->bnode) { if (!tree->root) hfs_btree_inc_height(tree); node = hfs_bnode_find(tree, tree->leaf_head); if (IS_ERR(node)) return PTR_ERR(node); fd->bnode = node; fd->record = -1; } new_node = NULL; key_len = (fd->search_key->key_len | 1) + 1; again: /* new record idx and complete record size */ rec = fd->record + 1; size = key_len + entry_len; node = fd->bnode; hfs_bnode_dump(node); /* get last offset */ end_rec_off = tree->node_size - (node->num_recs + 1) * 2; end_off = hfs_bnode_read_u16(node, end_rec_off); end_rec_off -= 2; hfs_dbg(BNODE_MOD, "insert_rec: %d, %d, %d, %d\n", rec, size, end_off, end_rec_off); if (size > end_rec_off - end_off) { if (new_node) panic("not enough room!\n"); new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); goto again; } if (node->type == HFS_NODE_LEAF) { tree->leaf_count++; mark_inode_dirty(tree->inode); } node->num_recs++; /* write new last offset */ hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); hfs_bnode_write_u16(node, end_rec_off, end_off + size); data_off = end_off; data_rec_off = end_rec_off + 2; idx_rec_off = tree->node_size - (rec + 1) * 2; if (idx_rec_off == data_rec_off) goto skip; /* move all following entries */ do { data_off = hfs_bnode_read_u16(node, data_rec_off + 2); hfs_bnode_write_u16(node, data_rec_off, data_off + size); data_rec_off += 2; } while (data_rec_off < idx_rec_off); /* move data away */ hfs_bnode_move(node, data_off + size, data_off, end_off - data_off); skip: hfs_bnode_write(node, fd->search_key, data_off, key_len); hfs_bnode_write(node, entry, data_off + key_len, entry_len); hfs_bnode_dump(node); /* * update parent key if we inserted a key * at the start of the node and it is not the new node */ if (!rec && new_node != node) { hfs_bnode_read_key(node, fd->search_key, data_off + size); hfs_brec_update_parent(fd); } if (new_node) { hfs_bnode_put(fd->bnode); if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index data entry */ cnid = cpu_to_be32(new_node->this); entry = &cnid; entry_len = sizeof(cnid); /* get index key */ hfs_bnode_read_key(new_node, fd->search_key, 14); __hfs_brec_find(fd->bnode, fd); hfs_bnode_put(new_node); new_node = NULL; if (tree->attributes & HFS_TREE_VARIDXKEYS) key_len = fd->search_key->key_len + 1; else { fd->search_key->key_len = tree->max_key_len; key_len = tree->max_key_len + 1; } goto again; } return 0; } int hfs_brec_remove(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *parent; int end_off, rec_off, data_off, size; tree = fd->tree; node = fd->bnode; again: rec_off = tree->node_size - (fd->record + 2) * 2; end_off = tree->node_size - (node->num_recs + 1) * 2; if (node->type == HFS_NODE_LEAF) { tree->leaf_count--; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_dbg(BNODE_MOD, "remove_rec: %d, %d\n", fd->record, fd->keylength + fd->entrylength); if (!--node->num_recs) { hfs_bnode_unlink(node); if (!node->parent) return 0; parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); hfs_bnode_put(node); node = fd->bnode = parent; __hfs_brec_find(node, fd); goto again; } hfs_bnode_write_u16(node, offsetof(struct hfs_bnode_desc, num_recs), node->num_recs); if (rec_off == end_off) goto skip; size = fd->keylength + fd->entrylength; do { data_off = hfs_bnode_read_u16(node, rec_off); hfs_bnode_write_u16(node, rec_off + 2, data_off - size); rec_off -= 2; } while (rec_off >= end_off); /* fill hole */ hfs_bnode_move(node, fd->keyoffset, fd->keyoffset + size, data_off - fd->keyoffset - size); skip: hfs_bnode_dump(node); if (!fd->record) hfs_brec_update_parent(fd); return 0; } static struct hfs_bnode *hfs_bnode_split(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *next_node; struct hfs_bnode_desc node_desc; int num_recs, new_rec_off, new_off, old_rec_off; int data_start, data_end, size; tree = fd->tree; node = fd->bnode; new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) return new_node; hfs_bnode_get(node); hfs_dbg(BNODE_MOD, "split_nodes: %d - %d - %d\n", node->this, new_node->this, node->next); new_node->next = node->next; new_node->prev = node->this; new_node->parent = node->parent; new_node->type = node->type; new_node->height = node->height; if (node->next) next_node = hfs_bnode_find(tree, node->next); else next_node = NULL; if (IS_ERR(next_node)) { hfs_bnode_put(node); hfs_bnode_put(new_node); return next_node; } size = tree->node_size / 2 - node->num_recs * 2 - 14; old_rec_off = tree->node_size - 4; num_recs = 1; for (;;) { data_start = hfs_bnode_read_u16(node, old_rec_off); if (data_start > size) break; old_rec_off -= 2; if (++num_recs < node->num_recs) continue; /* panic? */ hfs_bnode_put(node); hfs_bnode_put(new_node); if (next_node) hfs_bnode_put(next_node); return ERR_PTR(-ENOSPC); } if (fd->record + 1 < num_recs) { /* new record is in the lower half, * so leave some more space there */ old_rec_off += 2; num_recs--; data_start = hfs_bnode_read_u16(node, old_rec_off); } else { hfs_bnode_put(node); hfs_bnode_get(new_node); fd->bnode = new_node; fd->record -= num_recs; fd->keyoffset -= data_start - 14; fd->entryoffset -= data_start - 14; } new_node->num_recs = node->num_recs - num_recs; node->num_recs = num_recs; new_rec_off = tree->node_size - 2; new_off = 14; size = data_start - new_off; num_recs = new_node->num_recs; data_end = data_start; while (num_recs) { hfs_bnode_write_u16(new_node, new_rec_off, new_off); old_rec_off -= 2; new_rec_off -= 2; data_end = hfs_bnode_read_u16(node, old_rec_off); new_off = data_end - size; num_recs--; } hfs_bnode_write_u16(new_node, new_rec_off, new_off); hfs_bnode_copy(new_node, 14, node, data_start, data_end - data_start); /* update new bnode header */ node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); /* update previous bnode header */ node->next = new_node->this; hfs_bnode_read(node, &node_desc, 0, sizeof(node_desc)); node_desc.next = cpu_to_be32(node->next); node_desc.num_recs = cpu_to_be16(node->num_recs); hfs_bnode_write(node, &node_desc, 0, sizeof(node_desc)); /* update next bnode header */ if (next_node) { next_node->prev = new_node->this; hfs_bnode_read(next_node, &node_desc, 0, sizeof(node_desc)); node_desc.prev = cpu_to_be32(next_node->prev); hfs_bnode_write(next_node, &node_desc, 0, sizeof(node_desc)); hfs_bnode_put(next_node); } else if (node->this == tree->leaf_tail) { /* if there is no next node, this might be the new tail */ tree->leaf_tail = new_node->this; mark_inode_dirty(tree->inode); } hfs_bnode_dump(node); hfs_bnode_dump(new_node); hfs_bnode_put(node); return new_node; } static int hfs_brec_update_parent(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *node, *new_node, *parent; int newkeylen, diff; int rec, rec_off, end_rec_off; int start_off, end_off; tree = fd->tree; node = fd->bnode; new_node = NULL; if (!node->parent) return 0; again: parent = hfs_bnode_find(tree, node->parent); if (IS_ERR(parent)) return PTR_ERR(parent); __hfs_brec_find(parent, fd); if (fd->record < 0) return -ENOENT; hfs_bnode_dump(parent); rec = fd->record; /* size difference between old and new key */ if (tree->attributes & HFS_TREE_VARIDXKEYS) newkeylen = (hfs_bnode_read_u8(node, 14) | 1) + 1; else fd->keylength = newkeylen = tree->max_key_len + 1; hfs_dbg(BNODE_MOD, "update_rec: %d, %d, %d\n", rec, fd->keylength, newkeylen); rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; diff = newkeylen - fd->keylength; if (!diff) goto skip; if (diff > 0) { end_off = hfs_bnode_read_u16(parent, end_rec_off); if (end_rec_off - end_off < diff) { printk(KERN_DEBUG "splitting index node...\n"); fd->bnode = parent; new_node = hfs_bnode_split(fd); if (IS_ERR(new_node)) return PTR_ERR(new_node); parent = fd->bnode; rec = fd->record; rec_off = tree->node_size - (rec + 2) * 2; end_rec_off = tree->node_size - (parent->num_recs + 1) * 2; } } end_off = start_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, start_off + diff); start_off -= 4; /* move previous cnid too */ while (rec_off > end_rec_off) { rec_off -= 2; end_off = hfs_bnode_read_u16(parent, rec_off); hfs_bnode_write_u16(parent, rec_off, end_off + diff); } hfs_bnode_move(parent, start_off + diff, start_off, end_off - start_off); skip: hfs_bnode_copy(parent, fd->keyoffset, node, 14, newkeylen); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) hfs_bnode_write_u8(parent, fd->keyoffset, newkeylen - 1); hfs_bnode_dump(parent); hfs_bnode_put(node); node = parent; if (new_node) { __be32 cnid; if (!new_node->parent) { hfs_btree_inc_height(tree); new_node->parent = tree->root; } fd->bnode = hfs_bnode_find(tree, new_node->parent); /* create index key and entry */ hfs_bnode_read_key(new_node, fd->search_key, 14); cnid = cpu_to_be32(new_node->this); __hfs_brec_find(fd->bnode, fd); hfs_brec_insert(fd, &cnid, sizeof(cnid)); hfs_bnode_put(fd->bnode); hfs_bnode_put(new_node); if (!rec) { if (new_node == node) goto out; /* restore search_key */ hfs_bnode_read_key(node, fd->search_key, 14); } new_node = NULL; } if (!rec && node->parent) goto again; out: fd->bnode = node; return 0; } static int hfs_btree_inc_height(struct hfs_btree *tree) { struct hfs_bnode *node, *new_node; struct hfs_bnode_desc node_desc; int key_size, rec; __be32 cnid; node = NULL; if (tree->root) { node = hfs_bnode_find(tree, tree->root); if (IS_ERR(node)) return PTR_ERR(node); } new_node = hfs_bmap_alloc(tree); if (IS_ERR(new_node)) { hfs_bnode_put(node); return PTR_ERR(new_node); } tree->root = new_node->this; if (!tree->depth) { tree->leaf_head = tree->leaf_tail = new_node->this; new_node->type = HFS_NODE_LEAF; new_node->num_recs = 0; } else { new_node->type = HFS_NODE_INDEX; new_node->num_recs = 1; } new_node->parent = 0; new_node->next = 0; new_node->prev = 0; new_node->height = ++tree->depth; node_desc.next = cpu_to_be32(new_node->next); node_desc.prev = cpu_to_be32(new_node->prev); node_desc.type = new_node->type; node_desc.height = new_node->height; node_desc.num_recs = cpu_to_be16(new_node->num_recs); node_desc.reserved = 0; hfs_bnode_write(new_node, &node_desc, 0, sizeof(node_desc)); rec = tree->node_size - 2; hfs_bnode_write_u16(new_node, rec, 14); if (node) { /* insert old root idx into new root */ node->parent = tree->root; if (node->type == HFS_NODE_LEAF || tree->attributes & HFS_TREE_VARIDXKEYS) key_size = hfs_bnode_read_u8(node, 14) + 1; else key_size = tree->max_key_len + 1; hfs_bnode_copy(new_node, 14, node, 14, key_size); if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) { key_size = tree->max_key_len + 1; hfs_bnode_write_u8(new_node, 14, tree->max_key_len); } key_size = (key_size + 1) & -2; cnid = cpu_to_be32(node->this); hfs_bnode_write(new_node, &cnid, 14 + key_size, 4); rec -= 2; hfs_bnode_write_u16(new_node, rec, 14 + key_size + 4); hfs_bnode_put(node); } hfs_bnode_put(new_node); mark_inode_dirty(tree->inode); return 0; } |
373 373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 | // SPDX-License-Identifier: GPL-2.0 /* * NFS exporting and validation. * * We maintain a list of clients, each of which has a list of * exports. To export an fs to a given client, you first have * to create the client entry with NFSCTL_ADDCLIENT, which * creates a client control block and adds it to the hash * table. Then, you call NFSCTL_EXPORT for each fs. * * * Copyright (C) 1995, 1996 Olaf Kirch, <okir@monad.swb.de> */ #include <linux/slab.h> #include <linux/namei.h> #include <linux/module.h> #include <linux/exportfs.h> #include <linux/sunrpc/svc_xprt.h> #include "nfsd.h" #include "nfsfh.h" #include "netns.h" #include "pnfs.h" #include "filecache.h" #include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_EXPORT /* * We have two caches. * One maps client+vfsmnt+dentry to export options - the export map * The other maps client+filehandle-fragment to export options. - the expkey map * * The export options are actually stored in the first map, and the * second map contains a reference to the entry in the first map. */ #define EXPKEY_HASHBITS 8 #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) static void expkey_put(struct kref *ref) { struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref); if (test_bit(CACHE_VALID, &key->h.flags) && !test_bit(CACHE_NEGATIVE, &key->h.flags)) path_put(&key->ek_path); auth_domain_put(key->ek_client); kfree_rcu(key, ek_rcu); } static int expkey_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h); } static void expkey_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { /* client fsidtype \xfsid */ struct svc_expkey *ek = container_of(h, struct svc_expkey, h); char type[5]; qword_add(bpp, blen, ek->ek_client->name); snprintf(type, 5, "%d", ek->ek_fsidtype); qword_add(bpp, blen, type); qword_addhex(bpp, blen, (char*)ek->ek_fsid, key_len(ek->ek_fsidtype)); (*bpp)[-1] = '\n'; } static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new, struct svc_expkey *old); static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *); static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) { /* client fsidtype fsid expiry [path] */ char *buf; int len; struct auth_domain *dom = NULL; int err; int fsidtype; char *ep; struct svc_expkey key; struct svc_expkey *ek = NULL; if (mesg[mlen - 1] != '\n') return -EINVAL; mesg[mlen-1] = 0; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); err = -ENOMEM; if (!buf) goto out; err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; err = -ENOENT; dom = auth_domain_find(buf); if (!dom) goto out; dprintk("found domain %s\n", buf); err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; fsidtype = simple_strtoul(buf, &ep, 10); if (*ep) goto out; dprintk("found fsidtype %d\n", fsidtype); if (key_len(fsidtype)==0) /* invalid type */ goto out; if ((len=qword_get(&mesg, buf, PAGE_SIZE)) <= 0) goto out; dprintk("found fsid length %d\n", len); if (len != key_len(fsidtype)) goto out; /* OK, we seem to have a valid key */ key.h.flags = 0; err = get_expiry(&mesg, &key.h.expiry_time); if (err) goto out; key.ek_client = dom; key.ek_fsidtype = fsidtype; memcpy(key.ek_fsid, buf, len); ek = svc_expkey_lookup(cd, &key); err = -ENOMEM; if (!ek) goto out; /* now we want a pathname, or empty meaning NEGATIVE */ err = -EINVAL; len = qword_get(&mesg, buf, PAGE_SIZE); if (len < 0) goto out; dprintk("Path seems to be <%s>\n", buf); err = 0; if (len == 0) { set_bit(CACHE_NEGATIVE, &key.h.flags); ek = svc_expkey_update(cd, &key, ek); if (ek) trace_nfsd_expkey_update(ek, NULL); else err = -ENOMEM; } else { err = kern_path(buf, 0, &key.ek_path); if (err) goto out; dprintk("Found the path %s\n", buf); ek = svc_expkey_update(cd, &key, ek); if (ek) trace_nfsd_expkey_update(ek, buf); else err = -ENOMEM; path_put(&key.ek_path); } cache_flush(); out: if (ek) cache_put(&ek->h, cd); if (dom) auth_domain_put(dom); kfree(buf); return err; } static int expkey_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct svc_expkey *ek ; int i; if (h ==NULL) { seq_puts(m, "#domain fsidtype fsid [path]\n"); return 0; } ek = container_of(h, struct svc_expkey, h); seq_printf(m, "%s %d 0x", ek->ek_client->name, ek->ek_fsidtype); for (i=0; i < key_len(ek->ek_fsidtype)/4; i++) seq_printf(m, "%08x", ek->ek_fsid[i]); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) { seq_printf(m, " "); seq_path(m, &ek->ek_path, "\\ \t\n"); } seq_printf(m, "\n"); return 0; } static inline int expkey_match (struct cache_head *a, struct cache_head *b) { struct svc_expkey *orig = container_of(a, struct svc_expkey, h); struct svc_expkey *new = container_of(b, struct svc_expkey, h); if (orig->ek_fsidtype != new->ek_fsidtype || orig->ek_client != new->ek_client || memcmp(orig->ek_fsid, new->ek_fsid, key_len(orig->ek_fsidtype)) != 0) return 0; return 1; } static inline void expkey_init(struct cache_head *cnew, struct cache_head *citem) { struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); struct svc_expkey *item = container_of(citem, struct svc_expkey, h); kref_get(&item->ek_client->ref); new->ek_client = item->ek_client; new->ek_fsidtype = item->ek_fsidtype; memcpy(new->ek_fsid, item->ek_fsid, sizeof(new->ek_fsid)); } static inline void expkey_update(struct cache_head *cnew, struct cache_head *citem) { struct svc_expkey *new = container_of(cnew, struct svc_expkey, h); struct svc_expkey *item = container_of(citem, struct svc_expkey, h); new->ek_path = item->ek_path; path_get(&item->ek_path); } static struct cache_head *expkey_alloc(void) { struct svc_expkey *i = kmalloc(sizeof(*i), GFP_KERNEL); if (i) return &i->h; else return NULL; } static void expkey_flush(void) { /* * Take the nfsd_mutex here to ensure that the file cache is not * destroyed while we're in the middle of flushing. */ mutex_lock(&nfsd_mutex); nfsd_file_cache_purge(current->nsproxy->net_ns); mutex_unlock(&nfsd_mutex); } static const struct cache_detail svc_expkey_cache_template = { .owner = THIS_MODULE, .hash_size = EXPKEY_HASHMAX, .name = "nfsd.fh", .cache_put = expkey_put, .cache_upcall = expkey_upcall, .cache_request = expkey_request, .cache_parse = expkey_parse, .cache_show = expkey_show, .match = expkey_match, .init = expkey_init, .update = expkey_update, .alloc = expkey_alloc, .flush = expkey_flush, }; static int svc_expkey_hash(struct svc_expkey *item) { int hash = item->ek_fsidtype; char * cp = (char*)item->ek_fsid; int len = key_len(item->ek_fsidtype); hash ^= hash_mem(cp, len, EXPKEY_HASHBITS); hash ^= hash_ptr(item->ek_client, EXPKEY_HASHBITS); hash &= EXPKEY_HASHMASK; return hash; } static struct svc_expkey * svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item) { struct cache_head *ch; int hash = svc_expkey_hash(item); ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); if (ch) return container_of(ch, struct svc_expkey, h); else return NULL; } static struct svc_expkey * svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new, struct svc_expkey *old) { struct cache_head *ch; int hash = svc_expkey_hash(new); ch = sunrpc_cache_update(cd, &new->h, &old->h, hash); if (ch) return container_of(ch, struct svc_expkey, h); else return NULL; } #define EXPORT_HASHBITS 8 #define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc) { struct nfsd4_fs_location *locations = fsloc->locations; int i; if (!locations) return; for (i = 0; i < fsloc->locations_count; i++) { kfree(locations[i].path); kfree(locations[i].hosts); } kfree(locations); fsloc->locations = NULL; } static int export_stats_init(struct export_stats *stats) { stats->start_time = ktime_get_seconds(); return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM); } static void export_stats_reset(struct export_stats *stats) { if (stats) nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM); } static void export_stats_destroy(struct export_stats *stats) { if (stats) nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM); } static void svc_export_put(struct kref *ref) { struct svc_export *exp = container_of(ref, struct svc_export, h.ref); path_put(&exp->ex_path); auth_domain_put(exp->ex_client); nfsd4_fslocs_free(&exp->ex_fslocs); export_stats_destroy(exp->ex_stats); kfree(exp->ex_stats); kfree(exp->ex_uuid); kfree_rcu(exp, ex_rcu); } static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall(cd, h); } static void svc_export_request(struct cache_detail *cd, struct cache_head *h, char **bpp, int *blen) { /* client path */ struct svc_export *exp = container_of(h, struct svc_export, h); char *pth; qword_add(bpp, blen, exp->ex_client->name); pth = d_path(&exp->ex_path, *bpp, *blen); if (IS_ERR(pth)) { /* is this correct? */ (*bpp)[0] = '\n'; return; } qword_add(bpp, blen, pth); (*bpp)[-1] = '\n'; } static struct svc_export *svc_export_update(struct svc_export *new, struct svc_export *old); static struct svc_export *svc_export_lookup(struct svc_export *); static int check_export(struct path *path, int *flags, unsigned char *uuid) { struct inode *inode = d_inode(path->dentry); /* * We currently export only dirs, regular files, and (for v4 * pseudoroot) symlinks. */ if (!S_ISDIR(inode->i_mode) && !S_ISLNK(inode->i_mode) && !S_ISREG(inode->i_mode)) return -ENOTDIR; /* * Mountd should never pass down a writeable V4ROOT export, but, * just to make sure: */ if (*flags & NFSEXP_V4ROOT) *flags |= NFSEXP_READONLY; /* There are two requirements on a filesystem to be exportable. * 1: We must be able to identify the filesystem from a number. * either a device number (so FS_REQUIRES_DEV needed) * or an FSID number (so NFSEXP_FSID or ->uuid is needed). * 2: We must be able to find an inode from a filehandle. * This means that s_export_op must be set. * 3: We must not currently be on an idmapped mount. */ if (!(inode->i_sb->s_type->fs_flags & FS_REQUIRES_DEV) && !(*flags & NFSEXP_FSID) && uuid == NULL) { dprintk("exp_export: export of non-dev fs without fsid\n"); return -EINVAL; } if (!exportfs_can_decode_fh(inode->i_sb->s_export_op)) { dprintk("exp_export: export of invalid fs type.\n"); return -EINVAL; } if (is_idmapped_mnt(path->mnt)) { dprintk("exp_export: export of idmapped mounts not yet supported.\n"); return -EINVAL; } if (inode->i_sb->s_export_op->flags & EXPORT_OP_NOSUBTREECHK && !(*flags & NFSEXP_NOSUBTREECHECK)) { dprintk("%s: %s does not support subtree checking!\n", __func__, inode->i_sb->s_type->name); return -EINVAL; } return 0; } #ifdef CONFIG_NFSD_V4 static int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc) { int len; int migrated, i, err; /* more than one fsloc */ if (fsloc->locations) return -EINVAL; /* listsize */ err = get_uint(mesg, &fsloc->locations_count); if (err) return err; if (fsloc->locations_count > MAX_FS_LOCATIONS) return -EINVAL; if (fsloc->locations_count == 0) return 0; fsloc->locations = kcalloc(fsloc->locations_count, sizeof(struct nfsd4_fs_location), GFP_KERNEL); if (!fsloc->locations) return -ENOMEM; for (i=0; i < fsloc->locations_count; i++) { /* colon separated host list */ err = -EINVAL; len = qword_get(mesg, buf, PAGE_SIZE); if (len <= 0) goto out_free_all; err = -ENOMEM; fsloc->locations[i].hosts = kstrdup(buf, GFP_KERNEL); if (!fsloc->locations[i].hosts) goto out_free_all; err = -EINVAL; /* slash separated path component list */ len = qword_get(mesg, buf, PAGE_SIZE); if (len <= 0) goto out_free_all; err = -ENOMEM; fsloc->locations[i].path = kstrdup(buf, GFP_KERNEL); if (!fsloc->locations[i].path) goto out_free_all; } /* migrated */ err = get_int(mesg, &migrated); if (err) goto out_free_all; err = -EINVAL; if (migrated < 0 || migrated > 1) goto out_free_all; fsloc->migrated = migrated; return 0; out_free_all: nfsd4_fslocs_free(fsloc); return err; } static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { struct exp_flavor_info *f; u32 listsize; int err; /* more than one secinfo */ if (exp->ex_nflavors) return -EINVAL; err = get_uint(mesg, &listsize); if (err) return err; if (listsize > MAX_SECINFO_LIST) return -EINVAL; for (f = exp->ex_flavors; f < exp->ex_flavors + listsize; f++) { err = get_uint(mesg, &f->pseudoflavor); if (err) return err; /* * XXX: It would be nice to also check whether this * pseudoflavor is supported, so we can discover the * problem at export time instead of when a client fails * to authenticate. */ err = get_uint(mesg, &f->flags); if (err) return err; /* Only some flags are allowed to differ between flavors: */ if (~NFSEXP_SECINFO_FLAGS & (f->flags ^ exp->ex_flags)) return -EINVAL; } exp->ex_nflavors = listsize; return 0; } #else /* CONFIG_NFSD_V4 */ static inline int fsloc_parse(char **mesg, char *buf, struct nfsd4_fs_locations *fsloc){return 0;} static inline int secinfo_parse(char **mesg, char *buf, struct svc_export *exp) { return 0; } #endif static int xprtsec_parse(char **mesg, char *buf, struct svc_export *exp) { unsigned int i, mode, listsize; int err; err = get_uint(mesg, &listsize); if (err) return err; if (listsize > NFSEXP_XPRTSEC_NUM) return -EINVAL; exp->ex_xprtsec_modes = 0; for (i = 0; i < listsize; i++) { err = get_uint(mesg, &mode); if (err) return err; if (mode > NFSEXP_XPRTSEC_MTLS) return -EINVAL; exp->ex_xprtsec_modes |= mode; } return 0; } static inline int nfsd_uuid_parse(char **mesg, char *buf, unsigned char **puuid) { int len; /* more than one uuid */ if (*puuid) return -EINVAL; /* expect a 16 byte uuid encoded as \xXXXX... */ len = qword_get(mesg, buf, PAGE_SIZE); if (len != EX_UUID_LEN) return -EINVAL; *puuid = kmemdup(buf, EX_UUID_LEN, GFP_KERNEL); if (*puuid == NULL) return -ENOMEM; return 0; } static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) { /* client path expiry [flags anonuid anongid fsid] */ char *buf; int err; struct auth_domain *dom = NULL; struct svc_export exp = {}, *expp; int an_int; if (mesg[mlen-1] != '\n') return -EINVAL; mesg[mlen-1] = 0; buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* client */ err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out; err = -ENOENT; dom = auth_domain_find(buf); if (!dom) goto out; /* path */ err = -EINVAL; if (qword_get(&mesg, buf, PAGE_SIZE) <= 0) goto out1; err = kern_path(buf, 0, &exp.ex_path); if (err) goto out1; exp.ex_client = dom; exp.cd = cd; exp.ex_devid_map = NULL; exp.ex_xprtsec_modes = NFSEXP_XPRTSEC_ALL; /* expiry */ err = get_expiry(&mesg, &exp.h.expiry_time); if (err) goto out3; /* flags */ err = get_int(&mesg, &an_int); if (err == -ENOENT) { err = 0; set_bit(CACHE_NEGATIVE, &exp.h.flags); } else { if (err || an_int < 0) goto out3; exp.ex_flags= an_int; /* anon uid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_anon_uid= make_kuid(current_user_ns(), an_int); /* anon gid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_anon_gid= make_kgid(current_user_ns(), an_int); /* fsid */ err = get_int(&mesg, &an_int); if (err) goto out3; exp.ex_fsid = an_int; while (qword_get(&mesg, buf, PAGE_SIZE) > 0) { if (strcmp(buf, "fsloc") == 0) err = fsloc_parse(&mesg, buf, &exp.ex_fslocs); else if (strcmp(buf, "uuid") == 0) err = nfsd_uuid_parse(&mesg, buf, &exp.ex_uuid); else if (strcmp(buf, "secinfo") == 0) err = secinfo_parse(&mesg, buf, &exp); else if (strcmp(buf, "xprtsec") == 0) err = xprtsec_parse(&mesg, buf, &exp); else /* quietly ignore unknown words and anything * following. Newer user-space can try to set * new values, then see what the result was. */ break; if (err) goto out4; } err = check_export(&exp.ex_path, &exp.ex_flags, exp.ex_uuid); if (err) goto out4; /* * No point caching this if it would immediately expire. * Also, this protects exportfs's dummy export from the * anon_uid/anon_gid checks: */ if (exp.h.expiry_time < seconds_since_boot()) goto out4; /* * For some reason exportfs has been passing down an * invalid (-1) uid & gid on the "dummy" export which it * uses to test export support. To make sure exportfs * sees errors from check_export we therefore need to * delay these checks till after check_export: */ err = -EINVAL; if (!uid_valid(exp.ex_anon_uid)) goto out4; if (!gid_valid(exp.ex_anon_gid)) goto out4; err = 0; nfsd4_setup_layout_type(&exp); } expp = svc_export_lookup(&exp); if (!expp) { err = -ENOMEM; goto out4; } expp = svc_export_update(&exp, expp); if (expp) { trace_nfsd_export_update(expp); cache_flush(); exp_put(expp); } else err = -ENOMEM; out4: nfsd4_fslocs_free(&exp.ex_fslocs); kfree(exp.ex_uuid); out3: path_put(&exp.ex_path); out1: auth_domain_put(dom); out: kfree(buf); return err; } static void exp_flags(struct seq_file *m, int flag, int fsid, kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs); static void show_secinfo(struct seq_file *m, struct svc_export *exp); static int is_export_stats_file(struct seq_file *m) { /* * The export_stats file uses the same ops as the exports file. * We use the file's name to determine the reported info per export. * There is no rename in nsfdfs, so d_name.name is stable. */ return !strcmp(m->file->f_path.dentry->d_name.name, "export_stats"); } static int svc_export_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct svc_export *exp; bool export_stats = is_export_stats_file(m); if (h == NULL) { if (export_stats) seq_puts(m, "#path domain start-time\n#\tstats\n"); else seq_puts(m, "#path domain(flags)\n"); return 0; } exp = container_of(h, struct svc_export, h); seq_path(m, &exp->ex_path, " \t\n\\"); seq_putc(m, '\t'); seq_escape(m, exp->ex_client->name, " \t\n\\"); if (export_stats) { struct percpu_counter *counter = exp->ex_stats->counter; seq_printf(m, "\t%lld\n", exp->ex_stats->start_time); seq_printf(m, "\tfh_stale: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_FH_STALE])); seq_printf(m, "\tio_read: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_IO_READ])); seq_printf(m, "\tio_write: %lld\n", percpu_counter_sum_positive(&counter[EXP_STATS_IO_WRITE])); seq_putc(m, '\n'); return 0; } seq_putc(m, '('); if (test_bit(CACHE_VALID, &h->flags) && !test_bit(CACHE_NEGATIVE, &h->flags)) { exp_flags(m, exp->ex_flags, exp->ex_fsid, exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs); if (exp->ex_uuid) { int i; seq_puts(m, ",uuid="); for (i = 0; i < EX_UUID_LEN; i++) { if ((i&3) == 0 && i) seq_putc(m, ':'); seq_printf(m, "%02x", exp->ex_uuid[i]); } } show_secinfo(m, exp); } seq_puts(m, ")\n"); return 0; } static int svc_export_match(struct cache_head *a, struct cache_head *b) { struct svc_export *orig = container_of(a, struct svc_export, h); struct svc_export *new = container_of(b, struct svc_export, h); return orig->ex_client == new->ex_client && path_equal(&orig->ex_path, &new->ex_path); } static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) { struct svc_export *new = container_of(cnew, struct svc_export, h); struct svc_export *item = container_of(citem, struct svc_export, h); kref_get(&item->ex_client->ref); new->ex_client = item->ex_client; new->ex_path = item->ex_path; path_get(&item->ex_path); new->ex_fslocs.locations = NULL; new->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = 0; new->ex_layout_types = 0; new->ex_uuid = NULL; new->cd = item->cd; export_stats_reset(new->ex_stats); } static void export_update(struct cache_head *cnew, struct cache_head *citem) { struct svc_export *new = container_of(cnew, struct svc_export, h); struct svc_export *item = container_of(citem, struct svc_export, h); int i; new->ex_flags = item->ex_flags; new->ex_anon_uid = item->ex_anon_uid; new->ex_anon_gid = item->ex_anon_gid; new->ex_fsid = item->ex_fsid; new->ex_devid_map = item->ex_devid_map; item->ex_devid_map = NULL; new->ex_uuid = item->ex_uuid; item->ex_uuid = NULL; new->ex_fslocs.locations = item->ex_fslocs.locations; item->ex_fslocs.locations = NULL; new->ex_fslocs.locations_count = item->ex_fslocs.locations_count; item->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = item->ex_fslocs.migrated; item->ex_fslocs.migrated = 0; new->ex_layout_types = item->ex_layout_types; new->ex_nflavors = item->ex_nflavors; for (i = 0; i < MAX_SECINFO_LIST; i++) { new->ex_flavors[i] = item->ex_flavors[i]; } new->ex_xprtsec_modes = item->ex_xprtsec_modes; } static struct cache_head *svc_export_alloc(void) { struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL); if (!i) return NULL; i->ex_stats = kmalloc(sizeof(*(i->ex_stats)), GFP_KERNEL); if (!i->ex_stats) { kfree(i); return NULL; } if (export_stats_init(i->ex_stats)) { kfree(i->ex_stats); kfree(i); return NULL; } return &i->h; } static const struct cache_detail svc_export_cache_template = { .owner = THIS_MODULE, .hash_size = EXPORT_HASHMAX, .name = "nfsd.export", .cache_put = svc_export_put, .cache_upcall = svc_export_upcall, .cache_request = svc_export_request, .cache_parse = svc_export_parse, .cache_show = svc_export_show, .match = svc_export_match, .init = svc_export_init, .update = export_update, .alloc = svc_export_alloc, }; static int svc_export_hash(struct svc_export *exp) { int hash; hash = hash_ptr(exp->ex_client, EXPORT_HASHBITS); hash ^= hash_ptr(exp->ex_path.dentry, EXPORT_HASHBITS); hash ^= hash_ptr(exp->ex_path.mnt, EXPORT_HASHBITS); return hash; } static struct svc_export * svc_export_lookup(struct svc_export *exp) { struct cache_head *ch; int hash = svc_export_hash(exp); ch = sunrpc_cache_lookup_rcu(exp->cd, &exp->h, hash); if (ch) return container_of(ch, struct svc_export, h); else return NULL; } static struct svc_export * svc_export_update(struct svc_export *new, struct svc_export *old) { struct cache_head *ch; int hash = svc_export_hash(old); ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash); if (ch) return container_of(ch, struct svc_export, h); else return NULL; } static struct svc_expkey * exp_find_key(struct cache_detail *cd, struct auth_domain *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) { struct svc_expkey key, *ek; int err; if (!clp) return ERR_PTR(-ENOENT); key.ek_client = clp; key.ek_fsidtype = fsid_type; memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); ek = svc_expkey_lookup(cd, &key); if (ek == NULL) return ERR_PTR(-ENOMEM); err = cache_check(cd, &ek->h, reqp); if (err) { trace_nfsd_exp_find_key(&key, err); return ERR_PTR(err); } return ek; } static struct svc_export * exp_get_by_name(struct cache_detail *cd, struct auth_domain *clp, const struct path *path, struct cache_req *reqp) { struct svc_export *exp, key; int err; if (!clp) return ERR_PTR(-ENOENT); key.ex_client = clp; key.ex_path = *path; key.cd = cd; exp = svc_export_lookup(&key); if (exp == NULL) return ERR_PTR(-ENOMEM); err = cache_check(cd, &exp->h, reqp); if (err) { trace_nfsd_exp_get_by_name(&key, err); return ERR_PTR(err); } return exp; } /* * Find the export entry for a given dentry. */ static struct svc_export * exp_parent(struct cache_detail *cd, struct auth_domain *clp, struct path *path) { struct dentry *saved = dget(path->dentry); struct svc_export *exp = exp_get_by_name(cd, clp, path, NULL); while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { struct dentry *parent = dget_parent(path->dentry); dput(path->dentry); path->dentry = parent; exp = exp_get_by_name(cd, clp, path, NULL); } dput(path->dentry); path->dentry = saved; return exp; } /* * Obtain the root fh on behalf of a client. * This could be done in user space, but I feel that it adds some safety * since its harder to fool a kernel module than a user space program. */ int exp_rootfh(struct net *net, struct auth_domain *clp, char *name, struct knfsd_fh *f, int maxsize) { struct svc_export *exp; struct path path; struct inode *inode; struct svc_fh fh; int err; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct cache_detail *cd = nn->svc_export_cache; err = -EPERM; /* NB: we probably ought to check that it's NUL-terminated */ if (kern_path(name, 0, &path)) { printk("nfsd: exp_rootfh path not found %s", name); return err; } inode = d_inode(path.dentry); dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", name, path.dentry, clp->name, inode->i_sb->s_id, inode->i_ino); exp = exp_parent(cd, clp, &path); if (IS_ERR(exp)) { err = PTR_ERR(exp); goto out; } /* * fh must be initialized before calling fh_compose */ fh_init(&fh, maxsize); if (fh_compose(&fh, exp, path.dentry, NULL)) err = -EINVAL; else err = 0; memcpy(f, &fh.fh_handle, sizeof(struct knfsd_fh)); fh_put(&fh); exp_put(exp); out: path_put(&path); return err; } static struct svc_export *exp_find(struct cache_detail *cd, struct auth_domain *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) { struct svc_export *exp; struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id); struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp); if (IS_ERR(ek)) return ERR_CAST(ek); exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp); cache_put(&ek->h, nn->svc_expkey_cache); if (IS_ERR(exp)) return ERR_CAST(exp); return exp; } __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp) { struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors; struct svc_xprt *xprt = rqstp->rq_xprt; if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) { if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags)) goto ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) goto ok; } if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) { if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) && test_bit(XPT_PEER_AUTH, &xprt->xpt_flags)) goto ok; } goto denied; ok: /* legacy gss-only clients are always OK: */ if (exp->ex_client == rqstp->rq_gssclient) return 0; /* ip-address based client; check sec= export option: */ for (f = exp->ex_flavors; f < end; f++) { if (f->pseudoflavor == rqstp->rq_cred.cr_flavor) return 0; } /* defaults in absence of sec= options: */ if (exp->ex_nflavors == 0) { if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL || rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX) return 0; } /* If the compound op contains a spo_must_allowed op, * it will be sent with integrity/protection which * will have to be expressly allowed on mounts that * don't support it */ if (nfsd4_spo_must_allow(rqstp)) return 0; denied: return rqstp->rq_vers < 4 ? nfserr_acces : nfserr_wrongsec; } /* * Uses rq_client and rq_gssclient to find an export; uses rq_client (an * auth_unix client) if it's available and has secinfo information; * otherwise, will try to use rq_gssclient. * * Called from functions that handle requests; functions that do work on * behalf of mountd are passed a single client name to use, and should * use exp_get_by_name() or exp_find(). */ struct svc_export * rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path) { struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct cache_detail *cd = nn->svc_export_cache; if (rqstp->rq_client == NULL) goto gss; /* First try the auth_unix client: */ exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle); if (PTR_ERR(exp) == -ENOENT) goto gss; if (IS_ERR(exp)) return exp; /* If it has secinfo, assume there are no gss/... clients */ if (exp->ex_nflavors > 0) return exp; gss: /* Otherwise, try falling back on gss client */ if (rqstp->rq_gssclient == NULL) return exp; gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle); if (PTR_ERR(gssexp) == -ENOENT) return exp; if (!IS_ERR(exp)) exp_put(exp); return gssexp; } struct svc_export * rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv) { struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct cache_detail *cd = nn->svc_export_cache; if (rqstp->rq_client == NULL) goto gss; /* First try the auth_unix client: */ exp = exp_find(cd, rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle); if (PTR_ERR(exp) == -ENOENT) goto gss; if (IS_ERR(exp)) return exp; /* If it has secinfo, assume there are no gss/... clients */ if (exp->ex_nflavors > 0) return exp; gss: /* Otherwise, try falling back on gss client */ if (rqstp->rq_gssclient == NULL) return exp; gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv, &rqstp->rq_chandle); if (PTR_ERR(gssexp) == -ENOENT) return exp; if (!IS_ERR(exp)) exp_put(exp); return gssexp; } struct svc_export * rqst_exp_parent(struct svc_rqst *rqstp, struct path *path) { struct dentry *saved = dget(path->dentry); struct svc_export *exp = rqst_exp_get_by_name(rqstp, path); while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { struct dentry *parent = dget_parent(path->dentry); dput(path->dentry); path->dentry = parent; exp = rqst_exp_get_by_name(rqstp, path); } dput(path->dentry); path->dentry = saved; return exp; } struct svc_export *rqst_find_fsidzero_export(struct svc_rqst *rqstp) { u32 fsidv[2]; mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL); return rqst_exp_find(rqstp, FSID_NUM, fsidv); } /* * Called when we need the filehandle for the root of the pseudofs, * for a given NFSv4 client. The root is defined to be the * export point with fsid==0 */ __be32 exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp) { struct svc_export *exp; __be32 rv; exp = rqst_find_fsidzero_export(rqstp); if (IS_ERR(exp)) return nfserrno(PTR_ERR(exp)); rv = fh_compose(fhp, exp, exp->ex_path.dentry, NULL); exp_put(exp); return rv; } static struct flags { int flag; char *name[2]; } expflags[] = { { NFSEXP_READONLY, {"ro", "rw"}}, { NFSEXP_INSECURE_PORT, {"insecure", ""}}, { NFSEXP_ROOTSQUASH, {"root_squash", "no_root_squash"}}, { NFSEXP_ALLSQUASH, {"all_squash", ""}}, { NFSEXP_ASYNC, {"async", "sync"}}, { NFSEXP_GATHERED_WRITES, {"wdelay", "no_wdelay"}}, { NFSEXP_NOREADDIRPLUS, {"nordirplus", ""}}, { NFSEXP_NOHIDE, {"nohide", ""}}, { NFSEXP_CROSSMOUNT, {"crossmnt", ""}}, { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, { NFSEXP_PNFS, {"pnfs", ""}}, { NFSEXP_SECURITY_LABEL, {"security_label", ""}}, { 0, {"", ""}} }; static void show_expflags(struct seq_file *m, int flags, int mask) { struct flags *flg; int state, first = 0; for (flg = expflags; flg->flag; flg++) { if (flg->flag & ~mask) continue; state = (flg->flag & flags) ? 0 : 1; if (*flg->name[state]) seq_printf(m, "%s%s", first++?",":"", flg->name[state]); } } static void show_secinfo_flags(struct seq_file *m, int flags) { seq_printf(m, ","); show_expflags(m, flags, NFSEXP_SECINFO_FLAGS); } static bool secinfo_flags_equal(int f, int g) { f &= NFSEXP_SECINFO_FLAGS; g &= NFSEXP_SECINFO_FLAGS; return f == g; } static int show_secinfo_run(struct seq_file *m, struct exp_flavor_info **fp, struct exp_flavor_info *end) { int flags; flags = (*fp)->flags; seq_printf(m, ",sec=%d", (*fp)->pseudoflavor); (*fp)++; while (*fp != end && secinfo_flags_equal(flags, (*fp)->flags)) { seq_printf(m, ":%d", (*fp)->pseudoflavor); (*fp)++; } return flags; } static void show_secinfo(struct seq_file *m, struct svc_export *exp) { struct exp_flavor_info *f; struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors; int flags; if (exp->ex_nflavors == 0) return; f = exp->ex_flavors; flags = show_secinfo_run(m, &f, end); if (!secinfo_flags_equal(flags, exp->ex_flags)) show_secinfo_flags(m, flags); while (f != end) { flags = show_secinfo_run(m, &f, end); show_secinfo_flags(m, flags); } } static void exp_flags(struct seq_file *m, int flag, int fsid, kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fsloc) { struct user_namespace *userns = m->file->f_cred->user_ns; show_expflags(m, flag, NFSEXP_ALLFLAGS); if (flag & NFSEXP_FSID) seq_printf(m, ",fsid=%d", fsid); if (!uid_eq(anonu, make_kuid(userns, (uid_t)-2)) && !uid_eq(anonu, make_kuid(userns, 0x10000-2))) seq_printf(m, ",anonuid=%u", from_kuid_munged(userns, anonu)); if (!gid_eq(anong, make_kgid(userns, (gid_t)-2)) && !gid_eq(anong, make_kgid(userns, 0x10000-2))) seq_printf(m, ",anongid=%u", from_kgid_munged(userns, anong)); if (fsloc && fsloc->locations_count > 0) { char *loctype = (fsloc->migrated) ? "refer" : "replicas"; int i; seq_printf(m, ",%s=", loctype); seq_escape(m, fsloc->locations[0].path, ",;@ \t\n\\"); seq_putc(m, '@'); seq_escape(m, fsloc->locations[0].hosts, ",;@ \t\n\\"); for (i = 1; i < fsloc->locations_count; i++) { seq_putc(m, ';'); seq_escape(m, fsloc->locations[i].path, ",;@ \t\n\\"); seq_putc(m, '@'); seq_escape(m, fsloc->locations[i].hosts, ",;@ \t\n\\"); } } } static int e_show(struct seq_file *m, void *p) { struct cache_head *cp = p; struct svc_export *exp = container_of(cp, struct svc_export, h); struct cache_detail *cd = m->private; bool export_stats = is_export_stats_file(m); if (p == SEQ_START_TOKEN) { seq_puts(m, "# Version 1.1\n"); if (export_stats) seq_puts(m, "# Path Client Start-time\n#\tStats\n"); else seq_puts(m, "# Path Client(Flags) # IPs\n"); return 0; } exp_get(exp); if (cache_check(cd, &exp->h, NULL)) return 0; exp_put(exp); return svc_export_show(m, cd, cp); } const struct seq_operations nfs_exports_op = { .start = cache_seq_start_rcu, .next = cache_seq_next_rcu, .stop = cache_seq_stop_rcu, .show = e_show, }; /* * Initialize the exports module. */ int nfsd_export_init(struct net *net) { int rv; struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum); nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net); if (IS_ERR(nn->svc_export_cache)) return PTR_ERR(nn->svc_export_cache); rv = cache_register_net(nn->svc_export_cache, net); if (rv) goto destroy_export_cache; nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net); if (IS_ERR(nn->svc_expkey_cache)) { rv = PTR_ERR(nn->svc_expkey_cache); goto unregister_export_cache; } rv = cache_register_net(nn->svc_expkey_cache, net); if (rv) goto destroy_expkey_cache; return 0; destroy_expkey_cache: cache_destroy_net(nn->svc_expkey_cache, net); unregister_export_cache: cache_unregister_net(nn->svc_export_cache, net); destroy_export_cache: cache_destroy_net(nn->svc_export_cache, net); return rv; } /* * Flush exports table - called when last nfsd thread is killed */ void nfsd_export_flush(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); cache_purge(nn->svc_expkey_cache); cache_purge(nn->svc_export_cache); } /* * Shutdown the exports module. */ void nfsd_export_shutdown(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum); cache_unregister_net(nn->svc_expkey_cache, net); cache_unregister_net(nn->svc_export_cache, net); cache_destroy_net(nn->svc_expkey_cache, net); cache_destroy_net(nn->svc_export_cache, net); svcauth_unix_purge(net); dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum); } |
11 11 6 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539) * * Copyright (C) 2015 Martin Willi * Copyright (C) 2018 Google LLC */ #include <asm/unaligned.h> #include <crypto/algapi.h> #include <crypto/internal/chacha.h> #include <crypto/internal/skcipher.h> #include <linux/module.h> static int chacha_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { struct skcipher_walk walk; u32 state[16]; int err; err = skcipher_walk_virt(&walk, req, false); chacha_init_generic(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; if (nbytes < walk.total) nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); chacha_crypt_generic(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes, ctx->nrounds); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } static int crypto_chacha_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); return chacha_stream_xor(req, ctx, req->iv); } static int crypto_xchacha_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx subctx; u32 state[16]; u8 real_iv[16]; /* Compute the subkey given the original key and first 128 nonce bits */ chacha_init_generic(state, ctx->key, req->iv); hchacha_block_generic(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; /* Build the real IV */ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */ /* Generate the stream and XOR it with the data */ return chacha_stream_xor(req, &subctx, real_iv); } static struct skcipher_alg algs[] = { { .base.cra_name = "chacha20", .base.cra_driver_name = "chacha20-generic", .base.cra_priority = 100, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = crypto_chacha_crypt, .decrypt = crypto_chacha_crypt, }, { .base.cra_name = "xchacha20", .base.cra_driver_name = "xchacha20-generic", .base.cra_priority = 100, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha20_setkey, .encrypt = crypto_xchacha_crypt, .decrypt = crypto_xchacha_crypt, }, { .base.cra_name = "xchacha12", .base.cra_driver_name = "xchacha12-generic", .base.cra_priority = 100, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct chacha_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = XCHACHA_IV_SIZE, .chunksize = CHACHA_BLOCK_SIZE, .setkey = chacha12_setkey, .encrypt = crypto_xchacha_crypt, .decrypt = crypto_xchacha_crypt, } }; static int __init chacha_generic_mod_init(void) { return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); } static void __exit chacha_generic_mod_fini(void) { crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } subsys_initcall(chacha_generic_mod_init); module_exit(chacha_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)"); MODULE_ALIAS_CRYPTO("chacha20"); MODULE_ALIAS_CRYPTO("chacha20-generic"); MODULE_ALIAS_CRYPTO("xchacha20"); MODULE_ALIAS_CRYPTO("xchacha20-generic"); MODULE_ALIAS_CRYPTO("xchacha12"); MODULE_ALIAS_CRYPTO("xchacha12-generic"); |
3 1 6 1 1 4 4 1 1 3 1 2 1 2 3 7 3 1 4 2 2 1 1 11 5 10 4 8 4 6 1 2 7 12 17 17 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ #include <linux/skmsg.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/util_macros.h> #include <net/inet_common.h> #include <net/tls.h> void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tcp; int copied; if (!skb || !skb->len || !sk_is_tcp(sk)) return; if (skb_bpf_strparser(skb)) return; tcp = tcp_sk(sk); copied = tcp->copied_seq + skb->len; WRITE_ONCE(tcp->copied_seq, copied); tcp_rcv_space_adjust(sk); __tcp_cleanup_rbuf(sk, skb->len); } static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, u32 apply_bytes, int flags) { bool apply = apply_bytes; struct scatterlist *sge; u32 size, copied = 0; struct sk_msg *tmp; int i, ret = 0; tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL); if (unlikely(!tmp)) return -ENOMEM; lock_sock(sk); tmp->sg.start = msg->sg.start; i = msg->sg.start; do { sge = sk_msg_elem(msg, i); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; if (!sk_wmem_schedule(sk, size)) { if (!copied) ret = -ENOMEM; break; } sk_mem_charge(sk, size); sk_msg_xfer(tmp, msg, i, size); copied += size; if (sge->length) get_page(sk_msg_page(tmp, i)); sk_msg_iter_var_next(i); tmp->sg.end = i; if (apply) { apply_bytes -= size; if (!apply_bytes) { if (sge->length) sk_msg_iter_var_prev(i); break; } } } while (i != msg->sg.end); if (!ret) { msg->sg.start = i; sk_psock_queue_msg(psock, tmp); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); kfree(tmp); } release_sock(sk); return ret; } static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, int flags, bool uncharge) { struct msghdr msghdr = {}; bool apply = apply_bytes; struct scatterlist *sge; struct page *page; int size, ret = 0; u32 off; while (1) { struct bio_vec bvec; bool has_tx_ulp; sge = sk_msg_elem(msg, msg->sg.start); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; off = sge->offset; page = sg_page(sge); tcp_rate_check_app_limited(sk); retry: msghdr.msg_flags = flags | MSG_SPLICE_PAGES; has_tx_ulp = tls_sw_has_ctx_tx(sk); if (has_tx_ulp) msghdr.msg_flags |= MSG_SENDPAGE_NOPOLICY; if (size < sge->length && msg->sg.start != msg->sg.end) msghdr.msg_flags |= MSG_MORE; bvec_set_page(&bvec, page, size, off); iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, size); ret = tcp_sendmsg_locked(sk, &msghdr, size); if (ret <= 0) return ret; if (apply) apply_bytes -= ret; msg->sg.size -= ret; sge->offset += ret; sge->length -= ret; if (uncharge) sk_mem_uncharge(sk, ret); if (ret != size) { size -= ret; off += ret; goto retry; } if (!sge->length) { put_page(page); sk_msg_iter_next(msg, start); sg_init_table(sge, 1); if (msg->sg.start == msg->sg.end) break; } if (apply && !apply_bytes) break; } return 0; } static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg, u32 apply_bytes, int flags, bool uncharge) { int ret; lock_sock(sk); ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge); release_sock(sk); return ret; } int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, struct sk_msg *msg, u32 bytes, int flags) { struct sk_psock *psock = sk_psock_get(sk); int ret; if (unlikely(!psock)) return -EPIPE; ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) : tcp_bpf_push_locked(sk, msg, bytes, flags, false); sk_psock_put(sk, psock); return ret; } EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir); #ifdef CONFIG_BPF_SYSCALL static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int ret = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) return 1; if (!timeo) return ret; add_wait_queue(sk_sleep(sk), &wait); sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ret = sk_wait_event(sk, &timeo, !list_empty(&psock->ingress_msg) || !skb_queue_empty_lockless(&sk->sk_receive_queue), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); remove_wait_queue(sk_sleep(sk), &wait); return ret; } static bool is_next_msg_fin(struct sk_psock *psock) { struct scatterlist *sge; struct sk_msg *msg_rx; int i; msg_rx = sk_psock_peek_msg(psock); i = msg_rx->sg.start; sge = sk_msg_elem(msg_rx, i); if (!sge->length) { struct sk_buff *skb = msg_rx->skb; if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) return true; } return false; } static int tcp_bpf_recvmsg_parser(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct tcp_sock *tcp = tcp_sk(sk); int peek = flags & MSG_PEEK; u32 seq = tcp->copied_seq; struct sk_psock *psock; int copied = 0; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); /* We may have received data on the sk_receive_queue pre-accept and * then we can not use read_skb in this context because we haven't * assigned a sk_socket yet so have no link to the ops. The work-around * is to check the sk_receive_queue and in these cases read skbs off * queue again. The read_skb hook is not running at this point because * of lock_sock so we avoid having multiple runners in read_skb. */ if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { tcp_data_ready(sk); /* This handles the ENOMEM errors if we both receive data * pre accept and are already under memory pressure. At least * let user know to retry. */ if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) { copied = -EAGAIN; goto out; } } msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); /* The typical case for EFAULT is the socket was gracefully * shutdown with a FIN pkt. So check here the other case is * some error on copy_page_to_iter which would be unexpected. * On fin return correct return code to zero. */ if (copied == -EFAULT) { bool is_fin = is_next_msg_fin(psock); if (is_fin) { copied = 0; seq++; goto out; } } seq += copied; if (!copied) { long timeo; int data; if (sock_flag(sk, SOCK_DONE)) goto out; if (sk->sk_err) { copied = sock_error(sk); goto out; } if (sk->sk_shutdown & RCV_SHUTDOWN) goto out; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; goto out; } timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); if (!timeo) { copied = -EAGAIN; goto out; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); goto out; } data = tcp_msg_wait_data(sk, psock, timeo); if (data < 0) { copied = data; goto unlock; } if (data && !sk_psock_queue_empty(psock)) goto msg_bytes_ready; copied = -EAGAIN; } out: if (!peek) WRITE_ONCE(tcp->copied_seq, seq); tcp_rcv_space_adjust(sk); if (copied > 0) __tcp_cleanup_rbuf(sk, copied); unlock: release_sock(sk); sk_psock_put(sk, psock); return copied; } static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct sk_psock *psock; int copied, ret; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); if (!len) return 0; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); if (!skb_queue_empty(&sk->sk_receive_queue) && sk_psock_queue_empty(psock)) { sk_psock_put(sk, psock); return tcp_recvmsg(sk, msg, len, flags, addr_len); } lock_sock(sk); msg_bytes_ready: copied = sk_msg_recvmsg(sk, psock, msg, len, flags); if (!copied) { long timeo; int data; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); data = tcp_msg_wait_data(sk, psock, timeo); if (data < 0) { ret = data; goto unlock; } if (data) { if (!sk_psock_queue_empty(psock)) goto msg_bytes_ready; release_sock(sk); sk_psock_put(sk, psock); return tcp_recvmsg(sk, msg, len, flags, addr_len); } copied = -EAGAIN; } ret = copied; unlock: release_sock(sk); sk_psock_put(sk, psock); return ret; } static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg, int *copied, int flags) { bool cork = false, enospc = sk_msg_full(msg), redir_ingress; struct sock *sk_redir; u32 tosend, origsize, sent, delta = 0; u32 eval; int ret; more_data: if (psock->eval == __SK_NONE) { /* Track delta in msg size to add/subtract it on SK_DROP from * returned to user copied size. This ensures user doesn't * get a positive return code with msg_cut_data and SK_DROP * verdict. */ delta = msg->sg.size; psock->eval = sk_psock_msg_verdict(sk, psock, msg); delta -= msg->sg.size; } if (msg->cork_bytes && msg->cork_bytes > msg->sg.size && !enospc) { psock->cork_bytes = msg->cork_bytes - msg->sg.size; if (!psock->cork) { psock->cork = kzalloc(sizeof(*psock->cork), GFP_ATOMIC | __GFP_NOWARN); if (!psock->cork) return -ENOMEM; } memcpy(psock->cork, msg, sizeof(*msg)); return 0; } tosend = msg->sg.size; if (psock->apply_bytes && psock->apply_bytes < tosend) tosend = psock->apply_bytes; eval = __SK_NONE; switch (psock->eval) { case __SK_PASS: ret = tcp_bpf_push(sk, msg, tosend, flags, true); if (unlikely(ret)) { *copied -= sk_msg_free(sk, msg); break; } sk_msg_apply_bytes(psock, tosend); break; case __SK_REDIRECT: redir_ingress = psock->redir_ingress; sk_redir = psock->sk_redir; sk_msg_apply_bytes(psock, tosend); if (!psock->apply_bytes) { /* Clean up before releasing the sock lock. */ eval = psock->eval; psock->eval = __SK_NONE; psock->sk_redir = NULL; } if (psock->cork) { cork = true; psock->cork = NULL; } sk_msg_return(sk, msg, tosend); release_sock(sk); origsize = msg->sg.size; ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, msg, tosend, flags); sent = origsize - msg->sg.size; if (eval == __SK_REDIRECT) sock_put(sk_redir); lock_sock(sk); if (unlikely(ret < 0)) { int free = sk_msg_free_nocharge(sk, msg); if (!cork) *copied -= free; } if (cork) { sk_msg_free(sk, msg); kfree(msg); msg = NULL; ret = 0; } break; case __SK_DROP: default: sk_msg_free_partial(sk, msg, tosend); sk_msg_apply_bytes(psock, tosend); *copied -= (tosend + delta); return -EACCES; } if (likely(!ret)) { if (!psock->apply_bytes) { psock->eval = __SK_NONE; if (psock->sk_redir) { sock_put(psock->sk_redir); psock->sk_redir = NULL; } } if (msg && msg->sg.data[msg->sg.start].page_link && msg->sg.data[msg->sg.start].length) { if (eval == __SK_REDIRECT) sk_mem_charge(sk, tosend - sent); goto more_data; } } return ret; } static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct sk_msg tmp, *msg_tx = NULL; int copied = 0, err = 0; struct sk_psock *psock; long timeo; int flags; /* Don't let internal flags through */ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED); flags |= MSG_NO_SHARED_FRAGS; psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_sendmsg(sk, msg, size); lock_sock(sk); timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); while (msg_data_left(msg)) { bool enospc = false; u32 copy, osize; if (sk->sk_err) { err = -sk->sk_err; goto out_err; } copy = msg_data_left(msg); if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; if (psock->cork) { msg_tx = psock->cork; } else { msg_tx = &tmp; sk_msg_init(msg_tx); } osize = msg_tx->sg.size; err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1); if (err) { if (err != -ENOSPC) goto wait_for_memory; enospc = true; copy = msg_tx->sg.size - osize; } err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx, copy); if (err < 0) { sk_msg_trim(sk, msg_tx, osize); goto out_err; } copied += copy; if (psock->cork_bytes) { if (size > psock->cork_bytes) psock->cork_bytes = 0; else psock->cork_bytes -= size; if (psock->cork_bytes && !enospc) goto out_err; /* All cork bytes are accounted, rerun the prog. */ psock->eval = __SK_NONE; psock->cork_bytes = 0; } err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags); if (unlikely(err < 0)) goto out_err; continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: err = sk_stream_wait_memory(sk, &timeo); if (err) { if (msg_tx && msg_tx != psock->cork) sk_msg_free(sk, msg_tx); goto out_err; } } out_err: if (err < 0) err = sk_stream_error(sk, msg->msg_flags, err); release_sock(sk); sk_psock_put(sk, psock); return copied ? copied : err; } enum { TCP_BPF_IPV4, TCP_BPF_IPV6, TCP_BPF_NUM_PROTS, }; enum { TCP_BPF_BASE, TCP_BPF_TX, TCP_BPF_RX, TCP_BPF_TXRX, TCP_BPF_NUM_CFGS, }; static struct proto *tcpv6_prot_saved __read_mostly; static DEFINE_SPINLOCK(tcpv6_prot_lock); static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS]; static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS], struct proto *base) { prot[TCP_BPF_BASE] = *base; prot[TCP_BPF_BASE].destroy = sock_map_destroy; prot[TCP_BPF_BASE].close = sock_map_close; prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; prot[TCP_BPF_BASE].sock_is_readable = sk_msg_is_readable; prot[TCP_BPF_TX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg; prot[TCP_BPF_RX] = prot[TCP_BPF_BASE]; prot[TCP_BPF_RX].recvmsg = tcp_bpf_recvmsg_parser; prot[TCP_BPF_TXRX] = prot[TCP_BPF_TX]; prot[TCP_BPF_TXRX].recvmsg = tcp_bpf_recvmsg_parser; } static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops) { if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { spin_lock_bh(&tcpv6_prot_lock); if (likely(ops != tcpv6_prot_saved)) { tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops); smp_store_release(&tcpv6_prot_saved, ops); } spin_unlock_bh(&tcpv6_prot_lock); } } static int __init tcp_bpf_v4_build_proto(void) { tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); return 0; } late_initcall(tcp_bpf_v4_build_proto); static int tcp_bpf_assert_proto_ops(struct proto *ops) { /* In order to avoid retpoline, we make assumptions when we call * into ops if e.g. a psock is not present. Make sure they are * indeed valid assumptions. */ return ops->recvmsg == tcp_recvmsg && ops->sendmsg == tcp_sendmsg ? 0 : -ENOTSUPP; } int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE; if (psock->progs.stream_verdict || psock->progs.skb_verdict) { config = (config == TCP_BPF_TX) ? TCP_BPF_TXRX : TCP_BPF_RX; } if (restore) { if (inet_csk_has_ulp(sk)) { /* TLS does not have an unhash proto in SW cases, * but we need to ensure we stop using the sock_map * unhash routine because the associated psock is being * removed. So use the original unhash handler. */ WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash); tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space); } else { sk->sk_write_space = psock->saved_write_space; /* Pairs with lockless read in sk_clone_lock() */ sock_replace_proto(sk, psock->sk_proto); } return 0; } if (sk->sk_family == AF_INET6) { if (tcp_bpf_assert_proto_ops(psock->sk_proto)) return -EINVAL; tcp_bpf_check_v6_needs_rebuild(psock->sk_proto); } /* Pairs with lockless read in sk_clone_lock() */ sock_replace_proto(sk, &tcp_bpf_prots[family][config]); return 0; } EXPORT_SYMBOL_GPL(tcp_bpf_update_proto); /* If a child got cloned from a listening socket that had tcp_bpf * protocol callbacks installed, we need to restore the callbacks to * the default ones because the child does not inherit the psock state * that tcp_bpf callbacks expect. */ void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) { struct proto *prot = newsk->sk_prot; if (is_insidevar(prot, tcp_bpf_prots)) newsk->sk_prot = sk->sk_prot_creator; } #endif /* CONFIG_BPF_SYSCALL */ |
2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nft_fib.h> static void nft_fib_inet_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); switch (nft_pf(pkt)) { case NFPROTO_IPV4: switch (priv->result) { case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIFNAME: return nft_fib4_eval(expr, regs, pkt); case NFT_FIB_RESULT_ADDRTYPE: return nft_fib4_eval_type(expr, regs, pkt); } break; case NFPROTO_IPV6: switch (priv->result) { case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIFNAME: return nft_fib6_eval(expr, regs, pkt); case NFT_FIB_RESULT_ADDRTYPE: return nft_fib6_eval_type(expr, regs, pkt); } break; } regs->verdict.code = NF_DROP; } static struct nft_expr_type nft_fib_inet_type; static const struct nft_expr_ops nft_fib_inet_ops = { .type = &nft_fib_inet_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), .eval = nft_fib_inet_eval, .init = nft_fib_init, .dump = nft_fib_dump, .validate = nft_fib_validate, .reduce = nft_fib_reduce, }; static struct nft_expr_type nft_fib_inet_type __read_mostly = { .family = NFPROTO_INET, .name = "fib", .ops = &nft_fib_inet_ops, .policy = nft_fib_policy, .maxattr = NFTA_FIB_MAX, .owner = THIS_MODULE, }; static int __init nft_fib_inet_module_init(void) { return nft_register_expr(&nft_fib_inet_type); } static void __exit nft_fib_inet_module_exit(void) { nft_unregister_expr(&nft_fib_inet_type); } module_init(nft_fib_inet_module_init); module_exit(nft_fib_inet_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); MODULE_ALIAS_NFT_AF_EXPR(1, "fib"); MODULE_DESCRIPTION("nftables fib inet support"); |
1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 6 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 | // SPDX-License-Identifier: GPL-2.0 /* * n_gsm.c GSM 0710 tty multiplexor * Copyright (c) 2009/10 Intel Corporation * Copyright (c) 2022/23 Siemens Mobility GmbH * * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE * * * Outgoing path: * tty -> DLCI fifo -> scheduler -> GSM MUX data queue ---o-> ldisc * control message -> GSM MUX control queue --´ * * Incoming path: * ldisc -> gsm_queue() -o--> tty * `-> gsm_control_response() * * TO DO: * Mostly done: ioctls for setting modes/timing * Partly done: hooks so you can pull off frames to non tty devs * Restart DLCI 0 when it closes ? * Improve the tx engine * Resolve tx side locking by adding a queue_head and routing * all control traffic via it * General tidy/document * Review the locking/move to refcounts more (mux now moved to an * alloc/free model ready) * Use newest tty open/close port helpers and install hooks * What to do about power functions ? * Termios setting and negotiation * Do we need a 'which mux are you' ioctl to correlate mux and tty sets * */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/bitfield.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/math.h> #include <linux/nospec.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/tty_flip.h> #include <linux/tty_driver.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/skbuff.h> #include <net/arp.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/gsmmux.h> #include "tty.h" static int debug; module_param(debug, int, 0600); /* Module debug bits */ #define DBG_DUMP BIT(0) /* Data transmission dump. */ #define DBG_CD_ON BIT(1) /* Always assume CD line on. */ #define DBG_DATA BIT(2) /* Data transmission details. */ #define DBG_ERRORS BIT(3) /* Details for fail conditions. */ #define DBG_TTY BIT(4) /* Transmission statistics for DLCI TTYs. */ #define DBG_PAYLOAD BIT(5) /* Limits DBG_DUMP to payload frames. */ /* Defaults: these are from the specification */ #define T1 10 /* 100mS */ #define T2 34 /* 333mS */ #define T3 10 /* 10s */ #define N2 3 /* Retry 3 times */ #define K 2 /* outstanding I frames */ #define MAX_T3 255 /* In seconds. */ #define MAX_WINDOW_SIZE 7 /* Limit of K in error recovery mode. */ /* Use long timers for testing at low speed with debug on */ #ifdef DEBUG_TIMING #define T1 100 #define T2 200 #endif /* * Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte * limits so this is plenty */ #define MAX_MRU 1500 #define MAX_MTU 1500 #define MIN_MTU (PROT_OVERHEAD + 1) /* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ #define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) /* * struct gsm_mux_net - network interface * * Created when net interface is initialized. */ struct gsm_mux_net { struct kref ref; struct gsm_dlci *dlci; }; /* * Each block of data we have queued to go out is in the form of * a gsm_msg which holds everything we need in a link layer independent * format */ struct gsm_msg { struct list_head list; u8 addr; /* DLCI address + flags */ u8 ctrl; /* Control byte + flags */ unsigned int len; /* Length of data block (can be zero) */ unsigned char *data; /* Points into buffer but not at the start */ unsigned char buffer[]; }; enum gsm_dlci_state { DLCI_CLOSED, DLCI_WAITING_CONFIG, /* Waiting for DLCI configuration from user */ DLCI_CONFIGURE, /* Sending PN (for adaption > 1) */ DLCI_OPENING, /* Sending SABM not seen UA */ DLCI_OPEN, /* SABM/UA complete */ DLCI_CLOSING, /* Sending DISC not seen UA/DM */ }; enum gsm_dlci_mode { DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */ DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */ }; /* * Each active data link has a gsm_dlci structure associated which ties * the link layer to an optional tty (if the tty side is open). To avoid * complexity right now these are only ever freed up when the mux is * shut down. * * At the moment we don't free DLCI objects until the mux is torn down * this avoid object life time issues but might be worth review later. */ struct gsm_dlci { struct gsm_mux *gsm; int addr; enum gsm_dlci_state state; struct mutex mutex; /* Link layer */ enum gsm_dlci_mode mode; spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; /* Uplink tty if active */ struct tty_port port; /* The tty bound to this DLCI if there is one */ #define TX_SIZE 4096 /* Must be power of 2. */ struct kfifo fifo; /* Queue fifo for the DLCI */ int adaption; /* Adaption layer in use */ int prev_adaption; u32 modem_rx; /* Our incoming virtual modem lines */ u32 modem_tx; /* Our outgoing modem lines */ unsigned int mtu; bool dead; /* Refuse re-open */ /* Configuration */ u8 prio; /* Priority */ u8 ftype; /* Frame type */ u8 k; /* Window size */ /* Flow control */ bool throttled; /* Private copy of throttle state */ bool constipated; /* Throttle status for outgoing */ /* Packetised I/O */ struct sk_buff *skb; /* Frame being sent */ struct sk_buff_head skb_list; /* Queued frames */ /* Data handling callback */ void (*data)(struct gsm_dlci *dlci, const u8 *data, int len); void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len); struct net_device *net; /* network interface, if created */ }; /* * Parameter bits used for parameter negotiation according to 3GPP 27.010 * chapter 5.4.6.3.1. */ struct gsm_dlci_param_bits { u8 d_bits; u8 i_cl_bits; u8 p_bits; u8 t_bits; __le16 n_bits; u8 na_bits; u8 k_bits; }; static_assert(sizeof(struct gsm_dlci_param_bits) == 8); #define PN_D_FIELD_DLCI GENMASK(5, 0) #define PN_I_CL_FIELD_FTYPE GENMASK(3, 0) #define PN_I_CL_FIELD_ADAPTION GENMASK(7, 4) #define PN_P_FIELD_PRIO GENMASK(5, 0) #define PN_T_FIELD_T1 GENMASK(7, 0) #define PN_N_FIELD_N1 GENMASK(15, 0) #define PN_NA_FIELD_N2 GENMASK(7, 0) #define PN_K_FIELD_K GENMASK(2, 0) /* Total number of supported devices */ #define GSM_TTY_MINORS 256 /* DLCI 0, 62/63 are special or reserved see gsmtty_open */ #define NUM_DLCI 64 /* * DLCI 0 is used to pass control blocks out of band of the data * flow (and with a higher link priority). One command can be outstanding * at a time and we use this structure to manage them. They are created * and destroyed by the user context, and updated by the receive paths * and timers */ struct gsm_control { u8 cmd; /* Command we are issuing */ u8 *data; /* Data for the command in case we retransmit */ int len; /* Length of block for retransmission */ int done; /* Done flag */ int error; /* Error if any */ }; enum gsm_encoding { GSM_BASIC_OPT, GSM_ADV_OPT, }; enum gsm_mux_state { GSM_SEARCH, GSM_START, GSM_ADDRESS, GSM_CONTROL, GSM_LEN, GSM_DATA, GSM_FCS, GSM_OVERRUN, GSM_LEN0, GSM_LEN1, GSM_SSOF, }; /* * Each GSM mux we have is represented by this structure. If we are * operating as an ldisc then we use this structure as our ldisc * state. We need to sort out lifetimes and locking with respect * to the gsm mux array. For now we don't free DLCI objects that * have been instantiated until the mux itself is terminated. * * To consider further: tty open versus mux shutdown. */ struct gsm_mux { struct tty_struct *tty; /* The tty our ldisc is bound to */ spinlock_t lock; struct mutex mutex; unsigned int num; struct kref ref; /* Events on the GSM channel */ wait_queue_head_t event; /* ldisc send work */ struct work_struct tx_work; /* Bits for GSM mode decoding */ /* Framing Layer */ unsigned char *buf; enum gsm_mux_state state; unsigned int len; unsigned int address; unsigned int count; bool escape; enum gsm_encoding encoding; u8 control; u8 fcs; u8 *txframe; /* TX framing buffer */ /* Method for the receiver side */ void (*receive)(struct gsm_mux *gsm, u8 ch); /* Link Layer */ unsigned int mru; unsigned int mtu; int initiator; /* Did we initiate connection */ bool dead; /* Has the mux been shut down */ struct gsm_dlci *dlci[NUM_DLCI]; int old_c_iflag; /* termios c_iflag value before attach */ bool constipated; /* Asked by remote to shut up */ bool has_devices; /* Devices were registered */ spinlock_t tx_lock; unsigned int tx_bytes; /* TX data outstanding */ #define TX_THRESH_HI 8192 #define TX_THRESH_LO 2048 struct list_head tx_ctrl_list; /* Pending control packets */ struct list_head tx_data_list; /* Pending data packets */ /* Control messages */ struct timer_list kick_timer; /* Kick TX queuing on timeout */ struct timer_list t2_timer; /* Retransmit timer for commands */ int cretries; /* Command retry counter */ struct gsm_control *pending_cmd;/* Our current pending command */ spinlock_t control_lock; /* Protects the pending command */ /* Keep-alive */ struct timer_list ka_timer; /* Keep-alive response timer */ u8 ka_num; /* Keep-alive match pattern */ signed int ka_retries; /* Keep-alive retry counter, -1 if not yet initialized */ /* Configuration */ int adaption; /* 1 or 2 supported */ u8 ftype; /* UI or UIH */ int t1, t2; /* Timers in 1/100th of a sec */ unsigned int t3; /* Power wake-up timer in seconds. */ int n2; /* Retry count */ u8 k; /* Window size */ bool wait_config; /* Wait for configuration by ioctl before DLCI open */ u32 keep_alive; /* Control channel keep-alive in 10ms */ /* Statistics (not currently exposed) */ unsigned long bad_fcs; unsigned long malformed; unsigned long io_error; unsigned long open_error; unsigned long bad_size; unsigned long unsupported; }; /* * Mux objects - needed so that we can translate a tty index into the * relevant mux and DLCI. */ #define MAX_MUX 4 /* 256 minors */ static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */ static DEFINE_SPINLOCK(gsm_mux_lock); static struct tty_driver *gsm_tty_driver; /* * This section of the driver logic implements the GSM encodings * both the basic and the 'advanced'. Reliable transport is not * supported. */ #define CR 0x02 #define EA 0x01 #define PF 0x10 /* I is special: the rest are ..*/ #define RR 0x01 #define UI 0x03 #define RNR 0x05 #define REJ 0x09 #define DM 0x0F #define SABM 0x2F #define DISC 0x43 #define UA 0x63 #define UIH 0xEF /* Channel commands */ #define CMD_NSC 0x09 #define CMD_TEST 0x11 #define CMD_PSC 0x21 #define CMD_RLS 0x29 #define CMD_FCOFF 0x31 #define CMD_PN 0x41 #define CMD_RPN 0x49 #define CMD_FCON 0x51 #define CMD_CLD 0x61 #define CMD_SNC 0x69 #define CMD_MSC 0x71 /* Virtual modem bits */ #define MDM_FC 0x01 #define MDM_RTC 0x02 #define MDM_RTR 0x04 #define MDM_IC 0x20 #define MDM_DV 0x40 #define GSM0_SOF 0xF9 #define GSM1_SOF 0x7E #define GSM1_ESCAPE 0x7D #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 #define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; /* * CRC table for GSM 0710 */ static const u8 gsm_fcs8[256] = { 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75, 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B, 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69, 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67, 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D, 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43, 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51, 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F, 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05, 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B, 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19, 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17, 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D, 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33, 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21, 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F, 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95, 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B, 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89, 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87, 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD, 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3, 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1, 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF, 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5, 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB, 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9, 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7, 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD, 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3, 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1, 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF }; #define INIT_FCS 0xFF #define GOOD_FCS 0xCF static void gsm_dlci_close(struct gsm_dlci *dlci); static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, u8 ctrl); static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg); static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr); static void gsmld_write_trigger(struct gsm_mux *gsm); static void gsmld_write_task(struct work_struct *work); /** * gsm_fcs_add - update FCS * @fcs: Current FCS * @c: Next data * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add(u8 fcs, u8 c) { return gsm_fcs8[fcs ^ c]; } /** * gsm_fcs_add_block - update FCS for a block * @fcs: Current FCS * @c: buffer of data * @len: length of buffer * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) { while (len--) fcs = gsm_fcs8[fcs ^ *c++]; return fcs; } /** * gsm_read_ea - read a byte into an EA * @val: variable holding value * @c: byte going into the EA * * Processes one byte of an EA. Updates the passed variable * and returns 1 if the EA is now completely read */ static int gsm_read_ea(unsigned int *val, u8 c) { /* Add the next 7 bits into the value */ *val <<= 7; *val |= c >> 1; /* Was this the last byte of the EA 1 = yes*/ return c & EA; } /** * gsm_read_ea_val - read a value until EA * @val: variable holding value * @data: buffer of data * @dlen: length of data * * Processes an EA value. Updates the passed variable and * returns the processed data length. */ static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen) { unsigned int len = 0; for (; dlen > 0; dlen--) { len++; if (gsm_read_ea(val, *data++)) break; } return len; } /** * gsm_encode_modem - encode modem data bits * @dlci: DLCI to encode from * * Returns the correct GSM encoded modem status bits (6 bit field) for * the current status of the DLCI and attached tty object */ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) { u8 modembits = 0; /* FC is true flow control not modem bits */ if (dlci->throttled) modembits |= MDM_FC; if (dlci->modem_tx & TIOCM_DTR) modembits |= MDM_RTC; if (dlci->modem_tx & TIOCM_RTS) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; /* special mappings for passive side to operate as UE */ if (dlci->modem_tx & TIOCM_OUT1) modembits |= MDM_IC; if (dlci->modem_tx & TIOCM_OUT2) modembits |= MDM_DV; return modembits; } static void gsm_hex_dump_bytes(const char *fname, const u8 *data, unsigned long len) { char *prefix; if (!fname) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len, true); return; } prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, true); kfree(prefix); } /** * gsm_encode_params - encode DLCI parameters * @dlci: DLCI to encode from * @params: buffer to fill with the encoded parameters * * Encodes the parameters according to GSM 07.10 section 5.4.6.3.1 * table 3. */ static int gsm_encode_params(const struct gsm_dlci *dlci, struct gsm_dlci_param_bits *params) { const struct gsm_mux *gsm = dlci->gsm; unsigned int i, cl; switch (dlci->ftype) { case UIH: i = 0; /* UIH */ break; case UI: i = 1; /* UI */ break; default: pr_debug("unsupported frame type %d\n", dlci->ftype); return -EINVAL; } switch (dlci->adaption) { case 1: /* Unstructured */ cl = 0; /* convergence layer type 1 */ break; case 2: /* Unstructured with modem bits. */ cl = 1; /* convergence layer type 2 */ break; default: pr_debug("unsupported adaption %d\n", dlci->adaption); return -EINVAL; } params->d_bits = FIELD_PREP(PN_D_FIELD_DLCI, dlci->addr); /* UIH, convergence layer type 1 */ params->i_cl_bits = FIELD_PREP(PN_I_CL_FIELD_FTYPE, i) | FIELD_PREP(PN_I_CL_FIELD_ADAPTION, cl); params->p_bits = FIELD_PREP(PN_P_FIELD_PRIO, dlci->prio); params->t_bits = FIELD_PREP(PN_T_FIELD_T1, gsm->t1); params->n_bits = cpu_to_le16(FIELD_PREP(PN_N_FIELD_N1, dlci->mtu)); params->na_bits = FIELD_PREP(PN_NA_FIELD_N2, gsm->n2); params->k_bits = FIELD_PREP(PN_K_FIELD_K, dlci->k); return 0; } /** * gsm_register_devices - register all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static int gsm_register_devices(struct tty_driver *driver, unsigned int index) { struct device *dev; int i; unsigned int base; if (!driver || index >= MAX_MUX) return -EINVAL; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't register device 0 - this is the control channel * and not a usable tty interface */ dev = tty_register_device(gsm_tty_driver, base + i, NULL); if (IS_ERR(dev)) { if (debug & DBG_ERRORS) pr_info("%s failed to register device minor %u", __func__, base + i); for (i--; i >= 1; i--) tty_unregister_device(gsm_tty_driver, base + i); return PTR_ERR(dev); } } return 0; } /** * gsm_unregister_devices - unregister all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static void gsm_unregister_devices(struct tty_driver *driver, unsigned int index) { int i; unsigned int base; if (!driver || index >= MAX_MUX) return; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't unregister device 0 - this is the control * channel and not a usable tty interface */ tty_unregister_device(gsm_tty_driver, base + i); } } /** * gsm_print_packet - display a frame for debug * @hdr: header to print before decode * @addr: address EA from the frame * @cr: C/R bit seen as initiator * @control: control including PF bit * @data: following data bytes * @dlen: length of data * * Displays a packet in human readable format for debugging purposes. The * style is based on amateur radio LAP-B dump display. */ static void gsm_print_packet(const char *hdr, int addr, int cr, u8 control, const u8 *data, int dlen) { if (!(debug & DBG_DUMP)) return; /* Only show user payload frames if debug & DBG_PAYLOAD */ if (!(debug & DBG_PAYLOAD) && addr != 0) if ((control & ~PF) == UI || (control & ~PF) == UIH) return; pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]); switch (control & ~PF) { case SABM: pr_cont("SABM"); break; case UA: pr_cont("UA"); break; case DISC: pr_cont("DISC"); break; case DM: pr_cont("DM"); break; case UI: pr_cont("UI"); break; case UIH: pr_cont("UIH"); break; default: if (!(control & 0x01)) { pr_cont("I N(S)%d N(R)%d", (control & 0x0E) >> 1, (control & 0xE0) >> 5); } else switch (control & 0x0F) { case RR: pr_cont("RR(%d)", (control & 0xE0) >> 5); break; case RNR: pr_cont("RNR(%d)", (control & 0xE0) >> 5); break; case REJ: pr_cont("REJ(%d)", (control & 0xE0) >> 5); break; default: pr_cont("[%02X]", control); } } if (control & PF) pr_cont("(P)"); else pr_cont("(F)"); gsm_hex_dump_bytes(NULL, data, dlen); } /* * Link level transmission side */ /** * gsm_stuff_frame - bytestuff a packet * @input: input buffer * @output: output buffer * @len: length of input * * Expand a buffer by bytestuffing it. The worst case size change * is doubling and the caller is responsible for handing out * suitable sized buffers. */ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) { int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE || (*input & ISO_IEC_646_MASK) == XON || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; } else *output++ = *input++; olen++; } return olen; } /** * gsm_send - send a control frame * @gsm: our GSM mux * @addr: address for control frame * @cr: command/response bit seen as initiator * @control: control byte including PF bit * * Format up and transmit a control frame. These should be transmitted * ahead of data when they are needed. */ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) { struct gsm_msg *msg; u8 *dp; int ocr; unsigned long flags; msg = gsm_data_alloc(gsm, addr, 0, control); if (!msg) return -ENOMEM; /* toggle C/R coding if not initiator */ ocr = cr ^ (gsm->initiator ? 0 : 1); msg->data -= 3; dp = msg->data; *dp++ = (addr << 2) | (ocr << 1) | EA; *dp++ = control; if (gsm->encoding == GSM_BASIC_OPT) *dp++ = EA; /* Length of data = 0 */ *dp = 0xFF - gsm_fcs_add_block(INIT_FCS, msg->data, dp - msg->data); msg->len = (dp - msg->data) + 1; gsm_print_packet("Q->", addr, cr, control, NULL, 0); spin_lock_irqsave(&gsm->tx_lock, flags); list_add_tail(&msg->list, &gsm->tx_ctrl_list); gsm->tx_bytes += msg->len; spin_unlock_irqrestore(&gsm->tx_lock, flags); gsmld_write_trigger(gsm); return 0; } /** * gsm_dlci_clear_queues - remove outstanding data for a DLCI * @gsm: mux * @dlci: clear for this DLCI * * Clears the data queues for a given DLCI. */ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int addr = dlci->addr; unsigned long flags; /* Clear DLCI write fifo first */ spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); spin_unlock_irqrestore(&dlci->lock, flags); /* Clear data packets in MUX write queue */ spin_lock_irqsave(&gsm->tx_lock, flags); list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { if (msg->addr != addr) continue; gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); } spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsm_response - send a control response * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level response frame. */ static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 0, control); } /** * gsm_command - send a control command * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level command frame. */ static inline void gsm_command(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 1, control); } /* Data transmission */ #define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */ /** * gsm_data_alloc - allocate data frame * @gsm: GSM mux * @addr: DLCI address * @len: length excluding header and FCS * @ctrl: control byte * * Allocate a new data buffer for sending frames with data. Space is left * at the front for header bytes but that is treated as an implementation * detail and not for the high level code to use */ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, u8 ctrl) { struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN, GFP_ATOMIC); if (m == NULL) return NULL; m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */ m->len = len; m->addr = addr; m->ctrl = ctrl; INIT_LIST_HEAD(&m->list); return m; } /** * gsm_send_packet - sends a single packet * @gsm: GSM Mux * @msg: packet to send * * The given packet is encoded and sent out. No memory is freed. * The caller must hold the gsm tx lock. */ static int gsm_send_packet(struct gsm_mux *gsm, struct gsm_msg *msg) { int len, ret; if (gsm->encoding == GSM_BASIC_OPT) { gsm->txframe[0] = GSM0_SOF; memcpy(gsm->txframe + 1, msg->data, msg->len); gsm->txframe[msg->len + 1] = GSM0_SOF; len = msg->len + 2; } else { gsm->txframe[0] = GSM1_SOF; len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len); gsm->txframe[len + 1] = GSM1_SOF; len += 2; } if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, gsm->txframe, len); gsm_print_packet("-->", msg->addr, gsm->initiator, msg->ctrl, msg->data, msg->len); ret = gsmld_output(gsm, gsm->txframe, len); if (ret <= 0) return ret; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; return 0; } /** * gsm_is_flow_ctrl_msg - checks if flow control message * @msg: message to check * * Returns true if the given message is a flow control command of the * control channel. False is returned in any other case. */ static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg) { unsigned int cmd; if (msg->addr > 0) return false; switch (msg->ctrl & ~PF) { case UI: case UIH: cmd = 0; if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1) break; switch (cmd & ~PF) { case CMD_FCOFF: case CMD_FCON: return true; } break; } return false; } /** * gsm_data_kick - poke the queue * @gsm: GSM Mux * * The tty device has called us to indicate that room has appeared in * the transmit queue. Ram more data into the pipe if we have any. * If we have been flow-stopped by a CMD_FCOFF, then we can only * send messages on DLCI0 until CMD_FCON. The caller must hold * the gsm tx lock. */ static int gsm_data_kick(struct gsm_mux *gsm) { struct gsm_msg *msg, *nmsg; struct gsm_dlci *dlci; int ret; clear_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); /* Serialize control messages and control channel messages first */ list_for_each_entry_safe(msg, nmsg, &gsm->tx_ctrl_list, list) { if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg)) continue; ret = gsm_send_packet(gsm, msg); switch (ret) { case -ENOSPC: return -ENOSPC; case -ENODEV: /* ldisc not open */ gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; default: if (ret >= 0) { list_del(&msg->list); kfree(msg); } break; } } if (gsm->constipated) return -EAGAIN; /* Serialize other channels */ if (list_empty(&gsm->tx_data_list)) return 0; list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { dlci = gsm->dlci[msg->addr]; /* Send only messages for DLCIs with valid state */ if (dlci->state != DLCI_OPEN) { gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; } ret = gsm_send_packet(gsm, msg); switch (ret) { case -ENOSPC: return -ENOSPC; case -ENODEV: /* ldisc not open */ gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); continue; default: if (ret >= 0) { list_del(&msg->list); kfree(msg); } break; } } return 1; } /** * __gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. The Caller must hold * the gsm tx lock. */ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { struct gsm_mux *gsm = dlci->gsm; u8 *dp = msg->data; u8 *fcs = dp + msg->len; /* Fill in the header */ if (gsm->encoding == GSM_BASIC_OPT) { if (msg->len < 128) *--dp = (msg->len << 1) | EA; else { *--dp = (msg->len >> 7); /* bits 7 - 15 */ *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ } } *--dp = msg->ctrl; if (gsm->initiator) *--dp = (msg->addr << 2) | CR | EA; else *--dp = (msg->addr << 2) | EA; *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp); /* Ugly protocol layering violation */ if (msg->ctrl == UI || msg->ctrl == (UI|PF)) *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len); *fcs = 0xFF - *fcs; gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl, msg->data, msg->len); /* Move the header back and adjust the length, also allow for the FCS now tacked on the end */ msg->len += (msg->data - dp) + 1; msg->data = dp; /* Add to the actual output queue */ switch (msg->ctrl & ~PF) { case UI: case UIH: if (msg->addr > 0) { list_add_tail(&msg->list, &gsm->tx_data_list); break; } fallthrough; default: list_add_tail(&msg->list, &gsm->tx_ctrl_list); break; } gsm->tx_bytes += msg->len; gsmld_write_trigger(gsm); mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100); } /** * gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. Take the * the gsm tx lock and dlci lock. */ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { unsigned long flags; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); __gsm_data_queue(dlci, msg); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /** * gsm_dlci_data_output - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles the usual tty * interface which is a byte stream with optional modem data. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int h, len, size; /* for modem bits without break data */ h = ((dlci->adaption == 1) ? 0 : 1); len = kfifo_len(&dlci->fifo); if (len == 0) return 0; /* MTU/MRU count only the data bits but watch adaption mode */ if ((len + h) > dlci->mtu) len = dlci->mtu - h; size = len + h; msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) return -ENOMEM; dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. * Always one byte as we never send inline break data */ *dp++ = (gsm_encode_modem(dlci) << 1) | EA; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); break; } WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len, &dlci->lock)); /* Notify upper layer about available send space. */ tty_port_tty_wakeup(&dlci->port); __gsm_data_queue(dlci, msg); /* Bytes of data we used up */ return size; } /** * gsm_dlci_data_output_framed - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles framed data * queued as skbuffs to the DLCI. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int len, size; int last = 0, first = 0; int overhead = 0; /* One byte per frame is used for B/F flags */ if (dlci->adaption == 4) overhead = 1; /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) return 0; first = 1; } len = dlci->skb->len + overhead; /* MTU/MRU count only the data bits */ if (len > dlci->mtu) { if (dlci->adaption == 3) { /* Over long frame, bin it */ dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; return 0; } len = dlci->mtu; } else last = 1; size = len + overhead; msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (msg == NULL) { skb_queue_tail(&dlci->skb_list, dlci->skb); dlci->skb = NULL; return -ENOMEM; } dp = msg->data; if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ /* Flag byte to carry the start/end info */ *dp++ = last << 7 | first << 6 | 1; /* EA */ len--; } memcpy(dp, dlci->skb->data, len); skb_pull(dlci->skb, len); __gsm_data_queue(dlci, msg); if (last) { dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; } return size; } /** * gsm_dlci_modem_output - try and push modem status out of a DLCI * @gsm: mux * @dlci: the DLCI to pull modem status from * @brk: break signal * * Push an empty frame in to the transmit queue to update the modem status * bits and to transmit an optional break. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, u8 brk) { u8 *dp = NULL; struct gsm_msg *msg; int size = 0; /* for modem bits without break data */ switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ size++; if (brk > 0) size++; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); return -EINVAL; } msg = gsm_data_alloc(gsm, dlci->addr, size, dlci->ftype); if (!msg) { pr_err("%s: gsm_data_alloc error", __func__); return -ENOMEM; } dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ if (brk == 0) { *dp++ = (gsm_encode_modem(dlci) << 1) | EA; } else { *dp++ = gsm_encode_modem(dlci) << 1; *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */ } break; default: /* Handled above */ break; } __gsm_data_queue(dlci, msg); return size; } /** * gsm_dlci_data_sweep - look for data to send * @gsm: the GSM mux * * Sweep the GSM mux channels in priority order looking for ones with * data to send. We could do with optimising this scan a bit. We aim * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit * TX_THRESH_LO we get called again * * FIXME: We should round robin between groups and in theory you can * renegotiate DLCI priorities with optional stuff. Needs optimising. */ static int gsm_dlci_data_sweep(struct gsm_mux *gsm) { /* Priority ordering: We should do priority with RR of the groups */ int i, len, ret = 0; bool sent; struct gsm_dlci *dlci; while (gsm->tx_bytes < TX_THRESH_HI) { for (sent = false, i = 1; i < NUM_DLCI; i++) { dlci = gsm->dlci[i]; /* skip unused or blocked channel */ if (!dlci || dlci->constipated) continue; /* skip channels with invalid state */ if (dlci->state != DLCI_OPEN) continue; /* count the sent data per adaption */ if (dlci->adaption < 3 && !dlci->net) len = gsm_dlci_data_output(gsm, dlci); else len = gsm_dlci_data_output_framed(gsm, dlci); /* on error exit */ if (len < 0) return ret; if (len > 0) { ret++; sent = true; /* The lower DLCs can starve the higher DLCs! */ break; } /* try next */ } if (!sent) break; } return ret; } /** * gsm_dlci_data_kick - transmit if possible * @dlci: DLCI to kick * * Transmit data from this DLCI if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { unsigned long flags; int sweep; if (dlci->constipated) return; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { if (dlci->net) gsm_dlci_data_output_framed(dlci->gsm, dlci); else gsm_dlci_data_output(dlci->gsm, dlci); } if (sweep) gsm_dlci_data_sweep(dlci->gsm); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /* * Control message processing */ /** * gsm_control_command - send a command frame to a control * @gsm: gsm channel * @cmd: the command to use * @data: data to follow encoded info * @dlen: length of data * * Encode up and queue a UI/UIH frame containing our command. */ static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; struct gsm_dlci *dlci = gsm->dlci[0]; msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype); if (msg == NULL) return -ENOMEM; msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); gsm_data_queue(dlci, msg); return 0; } /** * gsm_control_reply - send a response frame to a control * @gsm: gsm channel * @cmd: the command to use * @data: data to follow encoded info * @dlen: length of data * * Encode up and queue a UI/UIH frame containing our response. */ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; struct gsm_dlci *dlci = gsm->dlci[0]; msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype); if (msg == NULL) return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); gsm_data_queue(dlci, msg); } /** * gsm_process_modem - process received modem status * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; /* The modem status command can either contain one octet (V.24 signals) * or two octets (V.24 signals + break signals). This is specified in * section 5.4.6.3.7 of the 07.10 mux spec. */ if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; modem = (modem >> 7) & 0x7f; } /* Flow control/ready to communicate */ fc = (modem & MDM_FC) || !(modem & MDM_RTR); if (fc && !dlci->constipated) { /* Need to throttle our output on this device */ dlci->constipated = true; } else if (!fc && dlci->constipated) { dlci->constipated = false; gsm_dlci_data_kick(dlci); } /* Map modem bits */ if (modem & MDM_RTC) mlines |= TIOCM_DSR | TIOCM_DTR; if (modem & MDM_RTR) mlines |= TIOCM_RTS | TIOCM_CTS; if (modem & MDM_IC) mlines |= TIOCM_RI; if (modem & MDM_DV) mlines |= TIOCM_CD; /* Carrier drop -> hangup */ if (tty) { if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD)) if (!C_CLOCAL(tty)) tty_hangup(tty); } if (brk & 0x01) tty_insert_flip_char(&dlci->port, 0, TTY_BREAK); dlci->modem_rx = mlines; wake_up_interruptible(&dlci->gsm->event); } /** * gsm_process_negotiation - process received parameters * @gsm: GSM channel * @addr: DLCI address * @cr: command/response * @params: encoded parameters from the parameter negotiation message * * Used when the response for our parameter negotiation command was * received. */ static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr, unsigned int cr, const struct gsm_dlci_param_bits *params) { struct gsm_dlci *dlci = gsm->dlci[addr]; unsigned int ftype, i, adaption, prio, n1, k; i = FIELD_GET(PN_I_CL_FIELD_FTYPE, params->i_cl_bits); adaption = FIELD_GET(PN_I_CL_FIELD_ADAPTION, params->i_cl_bits) + 1; prio = FIELD_GET(PN_P_FIELD_PRIO, params->p_bits); n1 = FIELD_GET(PN_N_FIELD_N1, get_unaligned_le16(¶ms->n_bits)); k = FIELD_GET(PN_K_FIELD_K, params->k_bits); if (n1 < MIN_MTU) { if (debug & DBG_ERRORS) pr_info("%s N1 out of range in PN\n", __func__); return -EINVAL; } switch (i) { case 0x00: ftype = UIH; break; case 0x01: ftype = UI; break; case 0x02: /* I frames are not supported */ if (debug & DBG_ERRORS) pr_info("%s unsupported I frame request in PN\n", __func__); gsm->unsupported++; return -EINVAL; default: if (debug & DBG_ERRORS) pr_info("%s i out of range in PN\n", __func__); return -EINVAL; } if (!cr && gsm->initiator) { if (adaption != dlci->adaption) { if (debug & DBG_ERRORS) pr_info("%s invalid adaption %d in PN\n", __func__, adaption); return -EINVAL; } if (prio != dlci->prio) { if (debug & DBG_ERRORS) pr_info("%s invalid priority %d in PN", __func__, prio); return -EINVAL; } if (n1 > gsm->mru || n1 > dlci->mtu) { /* We requested a frame size but the other party wants * to send larger frames. The standard allows only a * smaller response value than requested (5.4.6.3.1). */ if (debug & DBG_ERRORS) pr_info("%s invalid N1 %d in PN\n", __func__, n1); return -EINVAL; } dlci->mtu = n1; if (ftype != dlci->ftype) { if (debug & DBG_ERRORS) pr_info("%s invalid i %d in PN\n", __func__, i); return -EINVAL; } if (ftype != UI && ftype != UIH && k > dlci->k) { if (debug & DBG_ERRORS) pr_info("%s invalid k %d in PN\n", __func__, k); return -EINVAL; } dlci->k = k; } else if (cr && !gsm->initiator) { /* Only convergence layer type 1 and 2 are supported. */ if (adaption != 1 && adaption != 2) { if (debug & DBG_ERRORS) pr_info("%s invalid adaption %d in PN\n", __func__, adaption); return -EINVAL; } dlci->adaption = adaption; if (n1 > gsm->mru) { /* Propose a smaller value */ dlci->mtu = gsm->mru; } else if (n1 > MAX_MTU) { /* Propose a smaller value */ dlci->mtu = MAX_MTU; } else { dlci->mtu = n1; } dlci->prio = prio; dlci->ftype = ftype; dlci->k = k; } else { return -EINVAL; } return 0; } /** * gsm_control_modem - modem status received * @gsm: GSM channel * @data: data following command * @clen: command length * * We have received a modem status control message. This is used by * the GSM mux protocol to pass virtual modem line status and optionally * to indicate break signals. Unpack it, convert to Linux representation * and if need be stuff a break message down the tty. */ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) { unsigned int addr = 0; unsigned int modem = 0; struct gsm_dlci *dlci; int len = clen; int cl = clen; const u8 *dp = data; struct tty_struct *tty; len = gsm_read_ea_val(&addr, data, cl); if (len < 1) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; dlci = gsm->dlci[addr]; /* Must be at least one byte following the EA */ if ((cl - len) < 1) return; dp += len; cl -= len; /* get the modem status */ len = gsm_read_ea_val(&modem, dp, cl); if (len < 1) return; tty = tty_port_tty_get(&dlci->port); gsm_process_modem(tty, dlci, modem, cl); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } gsm_control_reply(gsm, CMD_MSC, data, clen); } /** * gsm_control_negotiation - parameter negotiation received * @gsm: GSM channel * @cr: command/response flag * @data: data following command * @dlen: data length * * We have received a parameter negotiation message. This is used by * the GSM mux protocol to configure protocol parameters for a new DLCI. */ static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr, const u8 *data, unsigned int dlen) { unsigned int addr; struct gsm_dlci_param_bits pn_reply; struct gsm_dlci *dlci; struct gsm_dlci_param_bits *params; if (dlen < sizeof(struct gsm_dlci_param_bits)) { gsm->open_error++; return; } /* Invalid DLCI? */ params = (struct gsm_dlci_param_bits *)data; addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits); if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) { gsm->open_error++; return; } dlci = gsm->dlci[addr]; /* Too late for parameter negotiation? */ if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) { gsm->open_error++; return; } /* Process the received parameters */ if (gsm_process_negotiation(gsm, addr, cr, params) != 0) { /* Negotiation failed. Close the link. */ if (debug & DBG_ERRORS) pr_info("%s PN failed\n", __func__); gsm->open_error++; gsm_dlci_close(dlci); return; } if (cr) { /* Reply command with accepted parameters. */ if (gsm_encode_params(dlci, &pn_reply) == 0) gsm_control_reply(gsm, CMD_PN, (const u8 *)&pn_reply, sizeof(pn_reply)); else if (debug & DBG_ERRORS) pr_info("%s PN invalid\n", __func__); } else if (dlci->state == DLCI_CONFIGURE) { /* Proceed with link setup by sending SABM before UA */ dlci->state = DLCI_OPENING; gsm_command(gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { if (debug & DBG_ERRORS) pr_info("%s PN in invalid state\n", __func__); gsm->open_error++; } } /** * gsm_control_rls - remote line status * @gsm: GSM channel * @data: data bytes * @clen: data length * * The modem sends us a two byte message on the control channel whenever * it wishes to send us an error state from the virtual link. Stuff * this into the uplink tty if present */ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen) { struct tty_port *port; unsigned int addr = 0; u8 bits; int len = clen; const u8 *dp = data; while (gsm_read_ea(&addr, *dp++) == 0) { len--; if (len == 0) return; } /* Must be at least one byte following ea */ len--; if (len <= 0) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; /* No error ? */ bits = *dp; if ((bits & 1) == 0) return; port = &gsm->dlci[addr]->port; if (bits & 2) tty_insert_flip_char(port, 0, TTY_OVERRUN); if (bits & 4) tty_insert_flip_char(port, 0, TTY_PARITY); if (bits & 8) tty_insert_flip_char(port, 0, TTY_FRAME); tty_flip_buffer_push(port); gsm_control_reply(gsm, CMD_RLS, data, clen); } static void gsm_dlci_begin_close(struct gsm_dlci *dlci); /** * gsm_control_message - DLCI 0 control processing * @gsm: our GSM mux * @command: the command EA * @data: data beyond the command/length EAs * @clen: length * * Input processor for control messages from the other end of the link. * Processes the incoming request and queues a response frame or an * NSC response if not supported */ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { u8 buf[1]; switch (command) { case CMD_CLD: { struct gsm_dlci *dlci = gsm->dlci[0]; /* Modem wishes to close down */ if (dlci) { dlci->dead = true; gsm->dead = true; gsm_dlci_begin_close(dlci); } } break; case CMD_TEST: /* Modem wishes to test, reply with the data */ gsm_control_reply(gsm, CMD_TEST, data, clen); break; case CMD_FCON: /* Modem can accept data again */ gsm->constipated = false; gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ gsmld_write_trigger(gsm); break; case CMD_FCOFF: /* Modem wants us to STFU */ gsm->constipated = true; gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); break; case CMD_MSC: /* Out of band modem line change indicator for a DLCI */ gsm_control_modem(gsm, data, clen); break; case CMD_RLS: /* Out of band error reception for a DLCI */ gsm_control_rls(gsm, data, clen); break; case CMD_PSC: /* Modem wishes to enter power saving state */ gsm_control_reply(gsm, CMD_PSC, NULL, 0); break; /* Optional commands */ case CMD_PN: /* Modem sends a parameter negotiation command */ gsm_control_negotiation(gsm, 1, data, clen); break; /* Optional unsupported commands */ case CMD_RPN: /* Remote port negotiation */ case CMD_SNC: /* Service negotiation command */ gsm->unsupported++; fallthrough; default: /* Reply to bad commands with an NSC */ buf[0] = command; gsm_control_reply(gsm, CMD_NSC, buf, 1); break; } } /** * gsm_control_response - process a response to our control * @gsm: our GSM mux * @command: the command (response) EA * @data: data beyond the command/length EA * @clen: length * * Process a response to an outstanding command. We only allow a single * control message in flight so this is fairly easy. All the clean up * is done by the caller, we just update the fields, flag it as done * and return */ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { struct gsm_control *ctrl; struct gsm_dlci *dlci; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; dlci = gsm->dlci[0]; command |= 1; /* Does the reply match our command */ if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { /* Our command was replied to, kill the retry timer */ del_timer(&gsm->t2_timer); gsm->pending_cmd = NULL; /* Rejected by the other end */ if (command == CMD_NSC) ctrl->error = -EOPNOTSUPP; ctrl->done = 1; wake_up(&gsm->event); /* Or did we receive the PN response to our PN command */ } else if (command == CMD_PN) { gsm_control_negotiation(gsm, 0, data, clen); /* Or did we receive the TEST response to our TEST command */ } else if (command == CMD_TEST && clen == 1 && *data == gsm->ka_num) { gsm->ka_retries = -1; /* trigger new keep-alive message */ if (dlci && !dlci->dead) mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_keep_alive - check timeout or start keep-alive * @t: timer contained in our gsm object * * Called off the keep-alive timer expiry signaling that our link * partner is not responding anymore. Link will be closed. * This is also called to startup our timer. */ static void gsm_control_keep_alive(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, ka_timer); unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); if (gsm->ka_num && gsm->ka_retries == 0) { /* Keep-alive expired -> close the link */ if (debug & DBG_ERRORS) pr_debug("%s keep-alive timed out\n", __func__); spin_unlock_irqrestore(&gsm->control_lock, flags); if (gsm->dlci[0]) gsm_dlci_begin_close(gsm->dlci[0]); return; } else if (gsm->keep_alive && gsm->dlci[0] && !gsm->dlci[0]->dead) { if (gsm->ka_retries > 0) { /* T2 expired for keep-alive -> resend */ gsm->ka_retries--; } else { /* Start keep-alive timer */ gsm->ka_num++; if (!gsm->ka_num) gsm->ka_num++; gsm->ka_retries = (signed int)gsm->n2; } gsm_control_command(gsm, CMD_TEST, &gsm->ka_num, sizeof(gsm->ka_num)); mod_timer(&gsm->ka_timer, jiffies + gsm->t2 * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_transmit - send control packet * @gsm: gsm mux * @ctrl: frame to send * * Send out a pending control command (called under control lock) */ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { gsm_control_command(gsm, ctrl->cmd, ctrl->data, ctrl->len); } /** * gsm_control_retransmit - retransmit a control frame * @t: timer contained in our gsm object * * Called off the T2 timer expiry in order to retransmit control frames * that have been lost in the system somewhere. The control_lock protects * us from colliding with another sender or a receive completion event. * In that situation the timer may still occur in a small window but * gsm->pending_cmd will be NULL and we just let the timer expire. */ static void gsm_control_retransmit(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, t2_timer); struct gsm_control *ctrl; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; if (ctrl) { if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) { gsm->pending_cmd = NULL; ctrl->error = -ETIMEDOUT; ctrl->done = 1; spin_unlock_irqrestore(&gsm->control_lock, flags); wake_up(&gsm->event); return; } gsm->cretries--; gsm_control_transmit(gsm, ctrl); mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_send - send a control frame on DLCI 0 * @gsm: the GSM channel * @command: command to send including CR bit * @data: bytes of data (must be kmalloced) * @clen: length of the block to send * * Queue and dispatch a control command. Only one command can be * active at a time. In theory more can be outstanding but the matching * gets really complicated so for now stick to one outstanding. */ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; retry: wait_event(gsm->event, gsm->pending_cmd == NULL); spin_lock_irqsave(&gsm->control_lock, flags); if (gsm->pending_cmd != NULL) { spin_unlock_irqrestore(&gsm->control_lock, flags); goto retry; } ctrl->cmd = command; ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; /* If DLCI0 is in ADM mode skip retries, it won't respond */ if (gsm->dlci[0]->mode == DLCI_MODE_ADM) gsm->cretries = 0; else gsm->cretries = gsm->n2; mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); return ctrl; } /** * gsm_control_wait - wait for a control to finish * @gsm: GSM mux * @control: control we are waiting on * * Waits for the control to complete or time out. Frees any used * resources and returns 0 for success, or an error if the remote * rejected or ignored the request. */ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) { int err; wait_event(gsm->event, control->done == 1); err = control->error; kfree(control); return err; } /* * DLCI level handling: Needs krefs */ /* * State transitions and timers */ /** * gsm_dlci_close - a DLCI has closed * @dlci: DLCI that closed * * Perform processing when moving a DLCI into closed state. If there * is an attached tty this is hung up */ static void gsm_dlci_close(struct gsm_dlci *dlci) { del_timer(&dlci->t1); if (debug & DBG_ERRORS) pr_debug("DLCI %d goes closed.\n", dlci->addr); dlci->state = DLCI_CLOSED; /* Prevent us from sending data before the link is up again */ dlci->constipated = true; if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); gsm_dlci_clear_queues(dlci->gsm, dlci); /* Ensure that gsmtty_open() can return. */ tty_port_set_initialized(&dlci->port, false); wake_up_interruptible(&dlci->port.open_wait); } else { del_timer(&dlci->gsm->ka_timer); dlci->gsm->dead = true; } /* A DLCI 0 close is a MUX termination so we need to kick that back to userspace somehow */ gsm_dlci_data_kick(dlci); wake_up_all(&dlci->gsm->event); } /** * gsm_dlci_open - a DLCI has opened * @dlci: DLCI that opened * * Perform processing when moving a DLCI into open state. */ static void gsm_dlci_open(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; /* Note that SABM UA .. SABM UA first UA lost can mean that we go open -> open */ del_timer(&dlci->t1); /* This will let a tty open continue */ dlci->state = DLCI_OPEN; dlci->constipated = false; if (debug & DBG_ERRORS) pr_debug("DLCI %d goes open.\n", dlci->addr); /* Send current modem state */ if (dlci->addr) { gsm_modem_update(dlci, 0); } else { /* Start keep-alive control */ gsm->ka_num = 0; gsm->ka_retries = -1; mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100); } gsm_dlci_data_kick(dlci); wake_up(&dlci->gsm->event); } /** * gsm_dlci_negotiate - start parameter negotiation * @dlci: DLCI to open * * Starts the parameter negotiation for the new DLCI. This needs to be done * before the DLCI initialized the channel via SABM. */ static int gsm_dlci_negotiate(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; struct gsm_dlci_param_bits params; int ret; ret = gsm_encode_params(dlci, ¶ms); if (ret != 0) return ret; /* We cannot asynchronous wait for the command response with * gsm_command() and gsm_control_wait() at this point. */ ret = gsm_control_command(gsm, CMD_PN, (const u8 *)¶ms, sizeof(params)); return ret; } /** * gsm_dlci_t1 - T1 timer expiry * @t: timer contained in the DLCI that opened * * The T1 timer handles retransmits of control frames (essentially of * SABM and DISC). We resend the command until the retry count runs out * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) * * Some control dlci can stay in ADM mode with other dlci working just * fine. In that case we can just keep the control dlci open after the * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(struct timer_list *t) { struct gsm_dlci *dlci = from_timer(dlci, t, t1); struct gsm_mux *gsm = dlci->gsm; switch (dlci->state) { case DLCI_CONFIGURE: if (dlci->retries && gsm_dlci_negotiate(dlci) == 0) { dlci->retries--; mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else { gsm->open_error++; gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; case DLCI_OPENING: if (dlci->retries) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else if (!dlci->addr && gsm->control == (DM | PF)) { if (debug & DBG_ERRORS) pr_info("DLCI %d opening in ADM mode.\n", dlci->addr); dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { gsm->open_error++; gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; case DLCI_CLOSING: if (dlci->retries) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else gsm_dlci_close(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } } /** * gsm_dlci_begin_open - start channel open procedure * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages * to the modem which should then reply with a UA or ADM, at which point * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. * Parameter negotiation is performed before SABM if required. */ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci ? dlci->gsm : NULL; bool need_pn = false; if (!gsm) return; if (dlci->addr != 0) { if (gsm->adaption != 1 || gsm->adaption != dlci->adaption) need_pn = true; if (dlci->prio != (roundup(dlci->addr + 1, 8) - 1)) need_pn = true; if (gsm->ftype != dlci->ftype) need_pn = true; } switch (dlci->state) { case DLCI_CLOSED: case DLCI_WAITING_CONFIG: case DLCI_CLOSING: dlci->retries = gsm->n2; if (!need_pn) { dlci->state = DLCI_OPENING; gsm_command(gsm, dlci->addr, SABM|PF); } else { /* Configure DLCI before setup */ dlci->state = DLCI_CONFIGURE; if (gsm_dlci_negotiate(dlci) != 0) { gsm_dlci_close(dlci); return; } } mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); break; default: break; } } /** * gsm_dlci_set_opening - change state to opening * @dlci: DLCI to open * * Change internal state to wait for DLCI open from initiator side. * We set off timers and responses upon reception of an SABM. */ static void gsm_dlci_set_opening(struct gsm_dlci *dlci) { switch (dlci->state) { case DLCI_CLOSED: case DLCI_WAITING_CONFIG: case DLCI_CLOSING: dlci->state = DLCI_OPENING; break; default: break; } } /** * gsm_dlci_set_wait_config - wait for channel configuration * @dlci: DLCI to configure * * Wait for a DLCI configuration from the application. */ static void gsm_dlci_set_wait_config(struct gsm_dlci *dlci) { switch (dlci->state) { case DLCI_CLOSED: case DLCI_CLOSING: dlci->state = DLCI_WAITING_CONFIG; break; default: break; } } /** * gsm_dlci_begin_close - start channel open procedure * @dlci: DLCI to open * * Commence closing a DLCI from the Linux side. We issue DISC messages * to the modem which should then reply with a UA, at which point we * will move into closed state. Closing is done asynchronously with retry * off timers. We may also receive a DM reply from the other end which * indicates the channel was already closed. */ static void gsm_dlci_begin_close(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING) return; dlci->retries = gsm->n2; dlci->state = DLCI_CLOSING; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); wake_up_interruptible(&gsm->event); } /** * gsm_dlci_data - data arrived * @dlci: channel * @data: block of bytes received * @clen: length of received block * * A UI or UIH frame has arrived which contains data for a channel * other than the control channel. If the relevant virtual tty is * open we shovel the bits down it, if not we drop them. */ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) { /* krefs .. */ struct tty_port *port = &dlci->port; struct tty_struct *tty; unsigned int modem = 0; int len; if (debug & DBG_TTY) pr_debug("%d bytes for tty\n", clen); switch (dlci->adaption) { /* Unsupported types */ case 4: /* Packetised interruptible data */ break; case 3: /* Packetised uininterruptible voice/data */ break; case 2: /* Asynchronous serial with line state in each frame */ len = gsm_read_ea_val(&modem, data, clen); if (len < 1) return; tty = tty_port_tty_get(port); if (tty) { gsm_process_modem(tty, dlci, modem, len); tty_wakeup(tty); tty_kref_put(tty); } /* Skip processed modem data */ data += len; clen -= len; fallthrough; case 1: /* Line state will go via DLCI 0 controls only */ default: tty_insert_flip_string(port, data, clen); tty_flip_buffer_push(port); } } /** * gsm_dlci_command - data arrived on control channel * @dlci: channel * @data: block of bytes received * @len: length of received block * * A UI or UIH frame has arrived which contains data for DLCI 0 the * control channel. This should contain a command EA followed by * control data bytes. The command EA contains a command/response bit * and we divide up the work accordingly. */ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len) { /* See what command is involved */ unsigned int command = 0; unsigned int clen = 0; unsigned int dlen; /* read the command */ dlen = gsm_read_ea_val(&command, data, len); len -= dlen; data += dlen; /* read any control data */ dlen = gsm_read_ea_val(&clen, data, len); len -= dlen; data += dlen; /* Malformed command? */ if (clen > len) { dlci->gsm->malformed++; return; } if (command & 1) gsm_control_message(dlci->gsm, command, data, clen); else gsm_control_response(dlci->gsm, command, data, clen); } /** * gsm_kick_timer - transmit if possible * @t: timer contained in our gsm object * * Transmit data from DLCIs if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_kick_timer(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, kick_timer); unsigned long flags; int sent = 0; spin_lock_irqsave(&gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ if (gsm->tx_bytes < TX_THRESH_LO) sent = gsm_dlci_data_sweep(gsm); spin_unlock_irqrestore(&gsm->tx_lock, flags); if (sent && debug & DBG_DATA) pr_info("%s TX queue stalled\n", __func__); } /** * gsm_dlci_copy_config_values - copy DLCI configuration * @dlci: source DLCI * @dc: configuration structure to fill */ static void gsm_dlci_copy_config_values(struct gsm_dlci *dlci, struct gsm_dlci_config *dc) { memset(dc, 0, sizeof(*dc)); dc->channel = (u32)dlci->addr; dc->adaption = (u32)dlci->adaption; dc->mtu = (u32)dlci->mtu; dc->priority = (u32)dlci->prio; if (dlci->ftype == UIH) dc->i = 1; else dc->i = 2; dc->k = (u32)dlci->k; } /** * gsm_dlci_config - configure DLCI from configuration * @dlci: DLCI to configure * @dc: DLCI configuration * @open: open DLCI after configuration? */ static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, int open) { struct gsm_mux *gsm; bool need_restart = false; bool need_open = false; unsigned int i; /* * Check that userspace doesn't put stuff in here to prevent breakages * in the future. */ for (i = 0; i < ARRAY_SIZE(dc->reserved); i++) if (dc->reserved[i]) return -EINVAL; if (!dlci) return -EINVAL; gsm = dlci->gsm; /* Stuff we don't support yet - I frame transport */ if (dc->adaption != 1 && dc->adaption != 2) return -EOPNOTSUPP; if (dc->mtu > MAX_MTU || dc->mtu < MIN_MTU || dc->mtu > gsm->mru) return -EINVAL; if (dc->priority >= 64) return -EINVAL; if (dc->i == 0 || dc->i > 2) /* UIH and UI only */ return -EINVAL; if (dc->k > 7) return -EINVAL; if (dc->flags & ~GSM_FL_RESTART) /* allow future extensions */ return -EINVAL; /* * See what is needed for reconfiguration */ /* Framing fields */ if (dc->adaption != dlci->adaption) need_restart = true; if (dc->mtu != dlci->mtu) need_restart = true; if (dc->i != dlci->ftype) need_restart = true; /* Requires care */ if (dc->priority != dlci->prio) need_restart = true; if (dc->flags & GSM_FL_RESTART) need_restart = true; if ((open && gsm->wait_config) || need_restart) need_open = true; if (dlci->state == DLCI_WAITING_CONFIG) { need_restart = false; need_open = true; } /* * Close down what is needed, restart and initiate the new * configuration. */ if (need_restart) { gsm_dlci_begin_close(dlci); wait_event_interruptible(gsm->event, dlci->state == DLCI_CLOSED); if (signal_pending(current)) return -EINTR; } /* * Setup the new configuration values */ dlci->adaption = (int)dc->adaption; if (dc->mtu) dlci->mtu = (unsigned int)dc->mtu; else dlci->mtu = gsm->mtu; if (dc->priority) dlci->prio = (u8)dc->priority; else dlci->prio = roundup(dlci->addr + 1, 8) - 1; if (dc->i == 1) dlci->ftype = UIH; else if (dc->i == 2) dlci->ftype = UI; if (dc->k) dlci->k = (u8)dc->k; else dlci->k = gsm->k; if (need_open) { if (gsm->initiator) gsm_dlci_begin_open(dlci); else gsm_dlci_set_opening(dlci); } return 0; } /* * Allocate/Free DLCI channels */ /** * gsm_dlci_alloc - allocate a DLCI * @gsm: GSM mux * @addr: address of the DLCI * * Allocate and install a new DLCI object into the GSM mux. * * FIXME: review locking races */ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) { struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC); if (dlci == NULL) return NULL; spin_lock_init(&dlci->lock); mutex_init(&dlci->mutex); if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) { kfree(dlci); return NULL; } skb_queue_head_init(&dlci->skb_list); timer_setup(&dlci->t1, gsm_dlci_t1, 0); tty_port_init(&dlci->port); dlci->port.ops = &gsm_port_ops; dlci->gsm = gsm; dlci->addr = addr; dlci->adaption = gsm->adaption; dlci->mtu = gsm->mtu; if (addr == 0) dlci->prio = 0; else dlci->prio = roundup(addr + 1, 8) - 1; dlci->ftype = gsm->ftype; dlci->k = gsm->k; dlci->state = DLCI_CLOSED; if (addr) { dlci->data = gsm_dlci_data; /* Prevent us from sending data before the link is up */ dlci->constipated = true; } else { dlci->data = gsm_dlci_command; } gsm->dlci[addr] = dlci; return dlci; } /** * gsm_dlci_free - free DLCI * @port: tty port for DLCI to free * * Free up a DLCI. * * Can sleep. */ static void gsm_dlci_free(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); timer_shutdown_sync(&dlci->t1); dlci->gsm->dlci[dlci->addr] = NULL; kfifo_free(&dlci->fifo); while ((dlci->skb = skb_dequeue(&dlci->skb_list))) dev_kfree_skb(dlci->skb); kfree(dlci); } static inline void dlci_get(struct gsm_dlci *dlci) { tty_port_get(&dlci->port); } static inline void dlci_put(struct gsm_dlci *dlci) { tty_port_put(&dlci->port); } static void gsm_destroy_network(struct gsm_dlci *dlci); /** * gsm_dlci_release - release DLCI * @dlci: DLCI to destroy * * Release a DLCI. Actual free is deferred until either * mux is closed or tty is closed - whichever is last. * * Can sleep. */ static void gsm_dlci_release(struct gsm_dlci *dlci) { struct tty_struct *tty = tty_port_tty_get(&dlci->port); if (tty) { mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); /* We cannot use tty_hangup() because in tty_kref_put() the tty * driver assumes that the hangup queue is free and reuses it to * queue release_one_tty() -> NULL pointer panic in * process_one_work(). */ tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); } dlci->state = DLCI_CLOSED; dlci_put(dlci); } /* * LAPBish link layer logic */ /** * gsm_queue - a GSM frame is ready to process * @gsm: pointer to our gsm mux * * At this point in time a frame has arrived and been demangled from * the line encoding. All the differences between the encodings have * been handled below us and the frame is unpacked into the structures. * The fcs holds the header FCS but any data FCS must be added here. */ static void gsm_queue(struct gsm_mux *gsm) { struct gsm_dlci *dlci; u8 cr; int address; if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & DBG_DATA) pr_debug("BAD FCS %02x\n", gsm->fcs); return; } address = gsm->address >> 1; if (address >= NUM_DLCI) goto invalid; cr = gsm->address & 1; /* C/R bit */ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); dlci = gsm->dlci[address]; switch (gsm->control) { case SABM|PF: if (cr == 1) { gsm->open_error++; goto invalid; } if (dlci == NULL) dlci = gsm_dlci_alloc(gsm, address); if (dlci == NULL) { gsm->open_error++; return; } if (dlci->dead) gsm_response(gsm, address, DM|PF); else { gsm_response(gsm, address, UA|PF); gsm_dlci_open(dlci); } break; case DISC|PF: if (cr == 1) goto invalid; if (dlci == NULL || dlci->state == DLCI_CLOSED) { gsm_response(gsm, address, DM|PF); return; } /* Real close complete */ gsm_response(gsm, address, UA|PF); gsm_dlci_close(dlci); break; case UA|PF: if (cr == 0 || dlci == NULL) break; switch (dlci->state) { case DLCI_CLOSING: gsm_dlci_close(dlci); break; case DLCI_OPENING: gsm_dlci_open(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } break; case DM: /* DM can be valid unsolicited */ case DM|PF: if (cr) goto invalid; if (dlci == NULL) return; gsm_dlci_close(dlci); break; case UI: case UI|PF: case UIH: case UIH|PF: if (dlci == NULL || dlci->state != DLCI_OPEN) { gsm_response(gsm, address, DM|PF); return; } dlci->data(dlci, gsm->buf, gsm->len); break; default: goto invalid; } return; invalid: gsm->malformed++; return; } /** * gsm0_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in gsm mode 0 */ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) { unsigned int len; switch (gsm->state) { case GSM_SEARCH: /* SOF marker */ if (c == GSM0_SOF) { gsm->state = GSM_ADDRESS; gsm->address = 0; gsm->len = 0; gsm->fcs = INIT_FCS; } break; case GSM_ADDRESS: /* Address EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM_CONTROL; break; case GSM_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->state = GSM_LEN0; break; case GSM_LEN0: /* Length EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->len, c)) { if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM_FCS; else gsm->state = GSM_DATA; break; } gsm->state = GSM_LEN1; break; case GSM_LEN1: gsm->fcs = gsm_fcs_add(gsm->fcs, c); len = c; gsm->len |= len << 7; if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM_FCS; else gsm->state = GSM_DATA; break; case GSM_DATA: /* Data */ gsm->buf[gsm->count++] = c; if (gsm->count == gsm->len) { /* Calculate final FCS for UI frames over all data */ if ((gsm->control & ~PF) != UIH) { gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } gsm->state = GSM_FCS; } break; case GSM_FCS: /* FCS follows the packet */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->state = GSM_SSOF; break; case GSM_SSOF: gsm->state = GSM_SEARCH; if (c == GSM0_SOF) gsm_queue(gsm); else gsm->bad_size++; break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm1_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in mode 1 (Advanced option) */ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) { /* handle XON/XOFF */ if ((c & ISO_IEC_646_MASK) == XON) { gsm->constipated = true; return; } else if ((c & ISO_IEC_646_MASK) == XOFF) { gsm->constipated = false; /* Kick the link in case it is idling */ gsmld_write_trigger(gsm); return; } if (c == GSM1_SOF) { /* EOF is only valid in frame if we have got to the data state */ if (gsm->state == GSM_DATA) { if (gsm->count < 1) { /* Missing FSC */ gsm->malformed++; gsm->state = GSM_START; return; } /* Remove the FCS from data */ gsm->count--; if ((gsm->control & ~PF) != UIH) { /* Calculate final FCS for UI frames over all * data but FCS */ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } /* Add the FCS itself to test against GOOD_FCS */ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); gsm->len = gsm->count; gsm_queue(gsm); gsm->state = GSM_START; return; } /* Any partial frame was a runt so go back to start */ if (gsm->state != GSM_START) { if (gsm->state != GSM_SEARCH) gsm->malformed++; gsm->state = GSM_START; } /* A SOF in GSM_START means we are still reading idling or framing bytes */ return; } if (c == GSM1_ESCAPE) { gsm->escape = true; return; } /* Only an unescaped SOF gets us out of GSM search */ if (gsm->state == GSM_SEARCH) return; if (gsm->escape) { c ^= GSM1_ESCAPE_BITS; gsm->escape = false; } switch (gsm->state) { case GSM_START: /* First byte after SOF */ gsm->address = 0; gsm->state = GSM_ADDRESS; gsm->fcs = INIT_FCS; fallthrough; case GSM_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM_CONTROL; break; case GSM_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->count = 0; gsm->state = GSM_DATA; break; case GSM_DATA: /* Data */ if (gsm->count > gsm->mru) { /* Allow one for the FCS */ gsm->state = GSM_OVERRUN; gsm->bad_size++; } else gsm->buf[gsm->count++] = c; break; case GSM_OVERRUN: /* Over-long - eg a dropped SOF */ break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm_error - handle tty error * @gsm: ldisc data * * Handle an error in the receipt of data for a frame. Currently we just * go back to hunting for a SOF. * * FIXME: better diagnostics ? */ static void gsm_error(struct gsm_mux *gsm) { gsm->state = GSM_SEARCH; gsm->io_error++; } /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); wait_event(gsm->event, dlci->state == DLCI_CLOSED); } dlci->dead = true; } /* Finish outstanding timers, making sure they are done */ del_timer_sync(&gsm->kick_timer); del_timer_sync(&gsm->t2_timer); del_timer_sync(&gsm->ka_timer); /* Finish writing to ldisc */ flush_work(&gsm->tx_work); /* Free up any link layer users and finally the control channel */ if (gsm->has_devices) { gsm_unregister_devices(gsm_tty_driver, gsm->num); gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); list_for_each_entry_safe(txq, ntxq, &gsm->tx_ctrl_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_ctrl_list); list_for_each_entry_safe(txq, ntxq, &gsm->tx_data_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_data_list); } /** * gsm_activate_mux - generic GSM setup * @gsm: our mux * * Set up the bits of the mux which are the same for all framing * protocols. Add the mux to the mux table so it can be opened and * finally kick off connecting to DLCI 0 on the modem. */ static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; int ret; dlci = gsm_dlci_alloc(gsm, 0); if (dlci == NULL) return -ENOMEM; if (gsm->encoding == GSM_BASIC_OPT) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; ret = gsm_register_devices(gsm_tty_driver, gsm->num); if (ret) return ret; gsm->has_devices = true; gsm->dead = false; /* Tty opens are now permissible */ return 0; } /** * gsm_free_mux - free up a mux * @gsm: mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_mux(struct gsm_mux *gsm) { int i; for (i = 0; i < MAX_MUX; i++) { if (gsm == gsm_mux[i]) { gsm_mux[i] = NULL; break; } } mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); } /** * gsm_free_muxr - free up a mux * @ref: kreference to the mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_muxr(struct kref *ref) { struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref); gsm_free_mux(gsm); } static inline void mux_get(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_get(&gsm->ref); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline void mux_put(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_put(&gsm->ref, gsm_free_muxr); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) { return gsm->num * NUM_DLCI; } static inline unsigned int mux_line_to_num(unsigned int line) { return line / NUM_DLCI; } /** * gsm_alloc_mux - allocate a mux * * Creates a new mux ready for activation. */ static struct gsm_mux *gsm_alloc_mux(void) { int i; struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); if (gsm == NULL) return NULL; gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL); if (gsm->buf == NULL) { kfree(gsm); return NULL; } gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); if (gsm->txframe == NULL) { kfree(gsm->buf); kfree(gsm); return NULL; } spin_lock_init(&gsm->lock); mutex_init(&gsm->mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_ctrl_list); INIT_LIST_HEAD(&gsm->tx_data_list); timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); timer_setup(&gsm->ka_timer, gsm_control_keep_alive, 0); INIT_WORK(&gsm->tx_work, gsmld_write_task); init_waitqueue_head(&gsm->event); spin_lock_init(&gsm->control_lock); spin_lock_init(&gsm->tx_lock); gsm->t1 = T1; gsm->t2 = T2; gsm->t3 = T3; gsm->n2 = N2; gsm->k = K; gsm->ftype = UIH; gsm->adaption = 1; gsm->encoding = GSM_ADV_OPT; gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ gsm->mtu = 64; gsm->dead = true; /* Avoid early tty opens */ gsm->wait_config = false; /* Disabled */ gsm->keep_alive = 0; /* Disabled */ /* Store the instance to the mux array or abort if no space is * available. */ spin_lock(&gsm_mux_lock); for (i = 0; i < MAX_MUX; i++) { if (!gsm_mux[i]) { gsm_mux[i] = gsm; gsm->num = i; break; } } spin_unlock(&gsm_mux_lock); if (i == MAX_MUX) { mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); return NULL; } return gsm; } static void gsm_copy_config_values(struct gsm_mux *gsm, struct gsm_config *c) { memset(c, 0, sizeof(*c)); c->adaption = gsm->adaption; c->encapsulation = gsm->encoding; c->initiator = gsm->initiator; c->t1 = gsm->t1; c->t2 = gsm->t2; c->t3 = gsm->t3; c->n2 = gsm->n2; if (gsm->ftype == UIH) c->i = 1; else c->i = 2; pr_debug("Ftype %d i %d\n", gsm->ftype, c->i); c->mru = gsm->mru; c->mtu = gsm->mtu; c->k = gsm->k; } static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) { int need_close = 0; int need_restart = 0; /* Stuff we don't support yet - UI or I frame transport */ if (c->adaption != 1 && c->adaption != 2) return -EOPNOTSUPP; /* Check the MRU/MTU range looks sane */ if (c->mru < MIN_MTU || c->mtu < MIN_MTU) return -EINVAL; if (c->mru > MAX_MRU || c->mtu > MAX_MTU) return -EINVAL; if (c->t3 > MAX_T3) return -EINVAL; if (c->n2 > 255) return -EINVAL; if (c->encapsulation > 1) /* Basic, advanced, no I */ return -EINVAL; if (c->initiator > 1) return -EINVAL; if (c->k > MAX_WINDOW_SIZE) return -EINVAL; if (c->i == 0 || c->i > 2) /* UIH and UI only */ return -EINVAL; /* * See what is needed for reconfiguration */ /* Timing fields */ if (c->t1 != 0 && c->t1 != gsm->t1) need_restart = 1; if (c->t2 != 0 && c->t2 != gsm->t2) need_restart = 1; if (c->encapsulation != gsm->encoding) need_restart = 1; if (c->adaption != gsm->adaption) need_restart = 1; /* Requires care */ if (c->initiator != gsm->initiator) need_close = 1; if (c->mru != gsm->mru) need_restart = 1; if (c->mtu != gsm->mtu) need_restart = 1; /* * Close down what is needed, restart and initiate the new * configuration. On the first time there is no DLCI[0] * and closing or cleaning up is not necessary. */ if (need_close || need_restart) gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; gsm->mtu = c->mtu; gsm->encoding = c->encapsulation ? GSM_ADV_OPT : GSM_BASIC_OPT; gsm->adaption = c->adaption; gsm->n2 = c->n2; if (c->i == 1) gsm->ftype = UIH; else if (c->i == 2) gsm->ftype = UI; if (c->t1) gsm->t1 = c->t1; if (c->t2) gsm->t2 = c->t2; if (c->t3) gsm->t3 = c->t3; if (c->k) gsm->k = c->k; /* * FIXME: We need to separate activation/deactivation from adding * and removing from the mux array */ if (gsm->dead) { int ret = gsm_activate_mux(gsm); if (ret) return ret; if (gsm->initiator) gsm_dlci_begin_open(gsm->dlci[0]); } return 0; } static void gsm_copy_config_ext_values(struct gsm_mux *gsm, struct gsm_config_ext *ce) { memset(ce, 0, sizeof(*ce)); ce->wait_config = gsm->wait_config ? 1 : 0; ce->keep_alive = gsm->keep_alive; } static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce) { bool need_restart = false; unsigned int i; /* * Check that userspace doesn't put stuff in here to prevent breakages * in the future. */ for (i = 0; i < ARRAY_SIZE(ce->reserved); i++) if (ce->reserved[i]) return -EINVAL; if (ce->flags & ~GSM_FL_RESTART) return -EINVAL; /* Requires care */ if (ce->flags & GSM_FL_RESTART) need_restart = true; /* * Close down what is needed, restart and initiate the new * configuration. On the first time there is no DLCI[0] * and closing or cleaning up is not necessary. */ if (need_restart) gsm_cleanup_mux(gsm, true); /* * Setup the new configuration values */ gsm->wait_config = ce->wait_config ? true : false; gsm->keep_alive = ce->keep_alive; if (gsm->dead) { int ret = gsm_activate_mux(gsm); if (ret) return ret; if (gsm->initiator) gsm_dlci_begin_open(gsm->dlci[0]); } return 0; } /** * gsmld_output - write to link * @gsm: our mux * @data: bytes to output * @len: size * * Write a block of data from the GSM mux to the data channel. This * will eventually be serialized from above but at the moment isn't. */ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len) { if (tty_write_room(gsm->tty) < len) { set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); return -ENOSPC; } if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, data, len); return gsm->tty->ops->write(gsm->tty, data, len); } /** * gsmld_write_trigger - schedule ldisc write task * @gsm: our mux */ static void gsmld_write_trigger(struct gsm_mux *gsm) { if (!gsm || !gsm->dlci[0] || gsm->dlci[0]->dead) return; schedule_work(&gsm->tx_work); } /** * gsmld_write_task - ldisc write task * @work: our tx write work * * Writes out data to the ldisc if possible. We are doing this here to * avoid dead-locking. This returns if no space or data is left for output. */ static void gsmld_write_task(struct work_struct *work) { struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work); unsigned long flags; int i, ret; /* All outstanding control channel and control messages and one data * frame is sent. */ ret = -ENODEV; spin_lock_irqsave(&gsm->tx_lock, flags); if (gsm->tty) ret = gsm_data_kick(gsm); spin_unlock_irqrestore(&gsm->tx_lock, flags); if (ret >= 0) for (i = 0; i < NUM_DLCI; i++) if (gsm->dlci[i]) tty_port_tty_wakeup(&gsm->dlci[i]->port); } /** * gsmld_attach_gsm - mode set up * @tty: our tty structure * @gsm: our mux * * Set up the MUX for basic mode and commence connecting to the * modem. Currently called from the line discipline set up but * will need moving to an ioctl path. */ static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { gsm->tty = tty_kref_get(tty); /* Turn off tty XON/XOFF handling to handle it explicitly. */ gsm->old_c_iflag = tty->termios.c_iflag; tty->termios.c_iflag &= (IXON | IXOFF); } /** * gsmld_detach_gsm - stop doing 0710 mux * @tty: tty attached to the mux * @gsm: mux * * Shutdown and then clean up the resources used by the line discipline */ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { WARN_ON(tty != gsm->tty); /* Restore tty XON/XOFF handling. */ gsm->tty->termios.c_iflag = gsm->old_c_iflag; tty_kref_put(gsm->tty); gsm->tty = NULL; } static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct gsm_mux *gsm = tty->disc_data; char flags = TTY_NORMAL; if (debug & DBG_DATA) gsm_hex_dump_bytes(__func__, cp, count); for (; count; count--, cp++) { if (fp) flags = *fp++; switch (flags) { case TTY_NORMAL: if (gsm->receive) gsm->receive(gsm, *cp); break; case TTY_OVERRUN: case TTY_BREAK: case TTY_PARITY: case TTY_FRAME: gsm_error(gsm); break; default: WARN_ONCE(1, "%s: unknown flag %d\n", tty_name(tty), flags); break; } } /* FASYNC if needed ? */ /* If clogged call tty_throttle(tty); */ } /** * gsmld_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the line discipline is * being closed, when the tty layer wants the buffer flushed (eg * at hangup). */ static void gsmld_flush_buffer(struct tty_struct *tty) { } /** * gsmld_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void gsmld_close(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; /* The ldisc locks and closes the port before calling our close. This * means we have no way to do a proper disconnect. We will not bother * to do one. */ gsm_cleanup_mux(gsm, false); gsmld_detach_gsm(tty, gsm); gsmld_flush_buffer(tty); /* Do other clean up here */ mux_put(gsm); } /** * gsmld_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int gsmld_open(struct tty_struct *tty) { struct gsm_mux *gsm; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EINVAL; /* Attach our ldisc data */ gsm = gsm_alloc_mux(); if (gsm == NULL) return -ENOMEM; tty->disc_data = gsm; tty->receive_room = 65536; /* Attach the initial passive connection */ gsmld_attach_gsm(tty, gsm); /* The mux will not be activated yet, we wait for correct * configuration first. */ if (gsm->encoding == GSM_BASIC_OPT) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; return 0; } /** * gsmld_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void gsmld_write_wakeup(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; /* Queue poll */ gsmld_write_trigger(gsm); } /** * gsmld_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * @cookie: unused * @offset: unused * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. */ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return -EOPNOTSUPP; } /** * gsmld_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Called when the owner of the device wants to send a frame * itself (or some other control data). The data is transferred * as-is and must be properly framed and checksummed as appropriate * by userspace. Frames are either sent whole or not at all as this * avoids pain user side. */ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const u8 *buf, size_t nr) { struct gsm_mux *gsm = tty->disc_data; unsigned long flags; int space; int ret; if (!gsm) return -ENODEV; ret = -ENOBUFS; spin_lock_irqsave(&gsm->tx_lock, flags); space = tty_write_room(tty); if (space >= nr) ret = tty->ops->write(tty, buf, nr); else set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_unlock_irqrestore(&gsm->tx_lock, flags); return ret; } /** * gsmld_poll - poll method for N_GSM0710 * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { __poll_t mask = 0; struct gsm_mux *gsm = tty->disc_data; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (gsm->dead) mask |= EPOLLHUP; if (tty_hung_up_p(file)) mask |= EPOLLHUP; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct gsm_config c; struct gsm_config_ext ce; struct gsm_dlci_config dc; struct gsm_mux *gsm = tty->disc_data; unsigned int base, addr; struct gsm_dlci *dlci; switch (cmd) { case GSMIOC_GETCONF: gsm_copy_config_values(gsm, &c); if (copy_to_user((void __user *)arg, &c, sizeof(c))) return -EFAULT; return 0; case GSMIOC_SETCONF: if (copy_from_user(&c, (void __user *)arg, sizeof(c))) return -EFAULT; return gsm_config(gsm, &c); case GSMIOC_GETFIRST: base = mux_num_to_base(gsm); return put_user(base + 1, (__u32 __user *)arg); case GSMIOC_GETCONF_EXT: gsm_copy_config_ext_values(gsm, &ce); if (copy_to_user((void __user *)arg, &ce, sizeof(ce))) return -EFAULT; return 0; case GSMIOC_SETCONF_EXT: if (copy_from_user(&ce, (void __user *)arg, sizeof(ce))) return -EFAULT; return gsm_config_ext(gsm, &ce); case GSMIOC_GETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel == 0 || dc.channel >= NUM_DLCI) return -EINVAL; addr = array_index_nospec(dc.channel, NUM_DLCI); dlci = gsm->dlci[addr]; if (!dlci) { dlci = gsm_dlci_alloc(gsm, addr); if (!dlci) return -ENOMEM; } gsm_dlci_copy_config_values(dlci, &dc); if (copy_to_user((void __user *)arg, &dc, sizeof(dc))) return -EFAULT; return 0; case GSMIOC_SETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel == 0 || dc.channel >= NUM_DLCI) return -EINVAL; addr = array_index_nospec(dc.channel, NUM_DLCI); dlci = gsm->dlci[addr]; if (!dlci) { dlci = gsm_dlci_alloc(gsm, addr); if (!dlci) return -ENOMEM; } return gsm_dlci_config(dlci, &dc, 0); default: return n_tty_ioctl_helper(tty, cmd, arg); } } /* * Network interface * */ static int gsm_mux_net_open(struct net_device *net) { pr_debug("%s called\n", __func__); netif_start_queue(net); return 0; } static int gsm_mux_net_close(struct net_device *net) { netif_stop_queue(net); return 0; } static void dlci_net_free(struct gsm_dlci *dlci) { if (!dlci->net) { WARN_ON(1); return; } dlci->adaption = dlci->prev_adaption; dlci->data = dlci->prev_data; free_netdev(dlci->net); dlci->net = NULL; } static void net_free(struct kref *ref) { struct gsm_mux_net *mux_net; struct gsm_dlci *dlci; mux_net = container_of(ref, struct gsm_mux_net, ref); dlci = mux_net->dlci; if (dlci->net) { unregister_netdev(dlci->net); dlci_net_free(dlci); } } static inline void muxnet_get(struct gsm_mux_net *mux_net) { kref_get(&mux_net->ref); } static inline void muxnet_put(struct gsm_mux_net *mux_net) { kref_put(&mux_net->ref, net_free); } static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct gsm_mux_net *mux_net = netdev_priv(net); struct gsm_dlci *dlci = mux_net->dlci; muxnet_get(mux_net); skb_queue_head(&dlci->skb_list, skb); net->stats.tx_packets++; net->stats.tx_bytes += skb->len; gsm_dlci_data_kick(dlci); /* And tell the kernel when the last transmit started. */ netif_trans_update(net); muxnet_put(mux_net); return NETDEV_TX_OK; } /* called when a packet did not ack after watchdogtimeout */ static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue) { /* Tell syslog we are hosed. */ dev_dbg(&net->dev, "Tx timed out.\n"); /* Update statistics */ net->stats.tx_errors++; } static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const unsigned char *in_buf, int size) { struct net_device *net = dlci->net; struct sk_buff *skb; struct gsm_mux_net *mux_net = netdev_priv(net); muxnet_get(mux_net); /* Allocate an sk_buff */ skb = dev_alloc_skb(size + NET_IP_ALIGN); if (!skb) { /* We got no receive buffer. */ net->stats.rx_dropped++; muxnet_put(mux_net); return; } skb_reserve(skb, NET_IP_ALIGN); skb_put_data(skb, in_buf, size); skb->dev = net; skb->protocol = htons(ETH_P_IP); /* Ship it off to the kernel */ netif_rx(skb); /* update out statistics */ net->stats.rx_packets++; net->stats.rx_bytes += size; muxnet_put(mux_net); return; } static void gsm_mux_net_init(struct net_device *net) { static const struct net_device_ops gsm_netdev_ops = { .ndo_open = gsm_mux_net_open, .ndo_stop = gsm_mux_net_close, .ndo_start_xmit = gsm_mux_net_start_xmit, .ndo_tx_timeout = gsm_mux_net_tx_timeout, }; net->netdev_ops = &gsm_netdev_ops; /* fill in the other fields */ net->watchdog_timeo = GSM_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->tx_queue_len = 10; } /* caller holds the dlci mutex */ static void gsm_destroy_network(struct gsm_dlci *dlci) { struct gsm_mux_net *mux_net; pr_debug("destroy network interface\n"); if (!dlci->net) return; mux_net = netdev_priv(dlci->net); muxnet_put(mux_net); } /* caller holds the dlci mutex */ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) { char *netname; int retval = 0; struct net_device *net; struct gsm_mux_net *mux_net; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Already in a non tty mode */ if (dlci->adaption > 2) return -EBUSY; if (nc->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; if (nc->adaption != 3 && nc->adaption != 4) return -EPROTONOSUPPORT; pr_debug("create network interface\n"); netname = "gsm%d"; if (nc->if_name[0] != '\0') netname = nc->if_name; net = alloc_netdev(sizeof(struct gsm_mux_net), netname, NET_NAME_UNKNOWN, gsm_mux_net_init); if (!net) { pr_err("alloc_netdev failed\n"); return -ENOMEM; } net->mtu = dlci->mtu; net->min_mtu = MIN_MTU; net->max_mtu = dlci->mtu; mux_net = netdev_priv(net); mux_net->dlci = dlci; kref_init(&mux_net->ref); strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */ /* reconfigure dlci for network */ dlci->prev_adaption = dlci->adaption; dlci->prev_data = dlci->data; dlci->adaption = nc->adaption; dlci->data = gsm_mux_rx_netchar; dlci->net = net; pr_debug("register netdev\n"); retval = register_netdev(net); if (retval) { pr_err("network register fail %d\n", retval); dlci_net_free(dlci); return retval; } return net->ifindex; /* return network index */ } /* Line discipline for real tty */ static struct tty_ldisc_ops tty_ldisc_packet = { .owner = THIS_MODULE, .num = N_GSM0710, .name = "n_gsm", .open = gsmld_open, .close = gsmld_close, .flush_buffer = gsmld_flush_buffer, .read = gsmld_read, .write = gsmld_write, .ioctl = gsmld_ioctl, .poll = gsmld_poll, .receive_buf = gsmld_receive_buf, .write_wakeup = gsmld_write_wakeup }; /* * Virtual tty side */ /** * gsm_modem_upd_via_data - send modem bits via convergence layer * @dlci: channel * @brk: break signal * * Send an empty frame to signal mobile state changes and to transmit the * break signal for adaption 2. */ static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { struct gsm_mux *gsm = dlci->gsm; unsigned long flags; if (dlci->state != DLCI_OPEN || dlci->adaption != 2) return; spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_modem_output(gsm, dlci, brk); spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsm_modem_upd_via_msc - send modem bits via control frame * @dlci: channel * @brk: break signal */ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) { u8 modembits[3]; struct gsm_control *ctrl; int len = 2; if (dlci->gsm->encoding != GSM_BASIC_OPT) return 0; modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ if (!brk) { modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; } else { modembits[1] = gsm_encode_modem(dlci) << 1; modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */ len++; } ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len); if (ctrl == NULL) return -ENOMEM; return gsm_control_wait(dlci->gsm, ctrl); } /** * gsm_modem_update - send modem status line state * @dlci: channel * @brk: break signal */ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk) { if (dlci->gsm->dead) return -EL2HLT; if (dlci->adaption == 2) { /* Send convergence layer type 2 empty data frame. */ gsm_modem_upd_via_data(dlci, brk); return 0; } else if (dlci->gsm->encoding == GSM_BASIC_OPT) { /* Send as MSC control message. */ return gsm_modem_upd_via_msc(dlci, brk); } /* Modem status lines are not supported. */ return -EPROTONOSUPPORT; } /** * gsm_wait_modem_change - wait for modem status line change * @dlci: channel * @mask: modem status line bits * * The function returns if: * - any given modem status line bit changed * - the wait event function got interrupted (e.g. by a signal) * - the underlying DLCI was closed * - the underlying ldisc device was removed */ static int gsm_wait_modem_change(struct gsm_dlci *dlci, u32 mask) { struct gsm_mux *gsm = dlci->gsm; u32 old = dlci->modem_rx; int ret; ret = wait_event_interruptible(gsm->event, gsm->dead || dlci->state != DLCI_OPEN || (old ^ dlci->modem_rx) & mask); if (gsm->dead) return -ENODEV; if (dlci->state != DLCI_OPEN) return -EL2NSYNC; return ret; } static bool gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); struct gsm_mux *gsm = dlci->gsm; /* Not yet open so no carrier info */ if (dlci->state != DLCI_OPEN) return false; if (debug & DBG_CD_ON) return true; /* * Basic mode with control channel in ADM mode may not respond * to CMD_MSC at all and modem_rx is empty. */ if (gsm->encoding == GSM_BASIC_OPT && gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx) return true; return dlci->modem_rx & TIOCM_CD; } static void gsm_dtr_rts(struct tty_port *port, bool active) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); unsigned int modem_tx = dlci->modem_tx; if (active) modem_tx |= TIOCM_DTR | TIOCM_RTS; else modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; gsm_modem_update(dlci, 0); } } static const struct tty_port_operations gsm_port_ops = { .carrier_raised = gsm_carrier_raised, .dtr_rts = gsm_dtr_rts, .destruct = gsm_dlci_free, }; static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty) { struct gsm_mux *gsm; struct gsm_dlci *dlci; unsigned int line = tty->index; unsigned int mux = mux_line_to_num(line); bool alloc = false; int ret; line = line & 0x3F; if (mux >= MAX_MUX) return -ENXIO; /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */ if (gsm_mux[mux] == NULL) return -EUNATCH; if (line == 0 || line > 61) /* 62/63 reserved */ return -ECHRNG; gsm = gsm_mux[mux]; if (gsm->dead) return -EL2HLT; /* If DLCI 0 is not yet fully open return an error. This is ok from a locking perspective as we don't have to worry about this if DLCI0 is lost */ mutex_lock(&gsm->mutex); if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) { mutex_unlock(&gsm->mutex); return -EL2NSYNC; } dlci = gsm->dlci[line]; if (dlci == NULL) { alloc = true; dlci = gsm_dlci_alloc(gsm, line); } if (dlci == NULL) { mutex_unlock(&gsm->mutex); return -ENOMEM; } ret = tty_port_install(&dlci->port, driver, tty); if (ret) { if (alloc) dlci_put(dlci); mutex_unlock(&gsm->mutex); return ret; } dlci_get(dlci); dlci_get(gsm->dlci[0]); mux_get(gsm); tty->driver_data = dlci; mutex_unlock(&gsm->mutex); return 0; } static int gsmtty_open(struct tty_struct *tty, struct file *filp) { struct gsm_dlci *dlci = tty->driver_data; struct tty_port *port = &dlci->port; port->count++; tty_port_tty_set(port, tty); dlci->modem_rx = 0; /* We could in theory open and close before we wait - eg if we get a DM straight back. This is ok as that will have caused a hangup */ tty_port_set_initialized(port, true); /* Start sending off SABM messages */ if (!dlci->gsm->wait_config) { /* Start sending off SABM messages */ if (dlci->gsm->initiator) gsm_dlci_begin_open(dlci); else gsm_dlci_set_opening(dlci); } else { gsm_dlci_set_wait_config(dlci); } /* And wait for virtual carrier */ return tty_port_block_til_ready(port, tty, filp); } static void gsmtty_close(struct tty_struct *tty, struct file *filp) { struct gsm_dlci *dlci = tty->driver_data; if (dlci == NULL) return; if (dlci->state == DLCI_CLOSED) return; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); if (tty_port_close_start(&dlci->port, tty, filp) == 0) return; gsm_dlci_begin_close(dlci); if (tty_port_initialized(&dlci->port) && C_HUPCL(tty)) tty_port_lower_dtr_rts(&dlci->port); tty_port_close_end(&dlci->port, tty); tty_port_tty_set(&dlci->port, NULL); return; } static void gsmtty_hangup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; tty_port_hangup(&dlci->port); gsm_dlci_begin_close(dlci); } static ssize_t gsmtty_write(struct tty_struct *tty, const u8 *buf, size_t len) { int sent; struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return -EINVAL; /* Stuff the bytes into the fifo queue */ sent = kfifo_in_locked(&dlci->fifo, buf, len, &dlci->lock); /* Need to kick the channel */ gsm_dlci_data_kick(dlci); return sent; } static unsigned int gsmtty_write_room(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return 0; return kfifo_avail(&dlci->fifo); } static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return 0; return kfifo_len(&dlci->fifo); } static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; unsigned long flags; if (dlci->state == DLCI_CLOSED) return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it away */ spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); spin_unlock_irqrestore(&dlci->lock, flags); /* Need to unhook this DLCI from the transmit queue logic */ } static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) { /* The FIFO handles the queue so the kernel will do the right thing waiting on chars_in_buffer before calling us. No work to do here */ } static int gsmtty_tiocmget(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return -EINVAL; return dlci->modem_rx; } static int gsmtty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct gsm_dlci *dlci = tty->driver_data; unsigned int modem_tx = dlci->modem_tx; if (dlci->state == DLCI_CLOSED) return -EINVAL; modem_tx &= ~clear; modem_tx |= set; if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; return gsm_modem_update(dlci, 0); } return 0; } static int gsmtty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct gsm_dlci *dlci = tty->driver_data; struct gsm_netconfig nc; struct gsm_dlci_config dc; int index; if (dlci->state == DLCI_CLOSED) return -EINVAL; switch (cmd) { case GSMIOC_ENABLE_NET: if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) return -EFAULT; nc.if_name[IFNAMSIZ-1] = '\0'; /* return net interface index or error code */ mutex_lock(&dlci->mutex); index = gsm_create_network(dlci, &nc); mutex_unlock(&dlci->mutex); if (copy_to_user((void __user *)arg, &nc, sizeof(nc))) return -EFAULT; return index; case GSMIOC_DISABLE_NET: if (!capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); return 0; case GSMIOC_GETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel != dlci->addr) return -EPERM; gsm_dlci_copy_config_values(dlci, &dc); if (copy_to_user((void __user *)arg, &dc, sizeof(dc))) return -EFAULT; return 0; case GSMIOC_SETCONF_DLCI: if (copy_from_user(&dc, (void __user *)arg, sizeof(dc))) return -EFAULT; if (dc.channel >= NUM_DLCI) return -EINVAL; if (dc.channel != 0 && dc.channel != dlci->addr) return -EPERM; return gsm_dlci_config(dlci, &dc, 1); case TIOCMIWAIT: return gsm_wait_modem_change(dlci, (u32)arg); default: return -ENOIOCTLCMD; } } static void gsmtty_set_termios(struct tty_struct *tty, const struct ktermios *old) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; /* For the moment its fixed. In actual fact the speed information for the virtual channel can be propogated in both directions by the RPN control message. This however rapidly gets nasty as we then have to remap modem signals each way according to whether our virtual cable is null modem etc .. */ tty_termios_copy_hw(&tty->termios, old); } static void gsmtty_throttle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; /* Send an MSC with RTS cleared */ gsm_modem_update(dlci, 0); } static void gsmtty_unthrottle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; /* Send an MSC with RTS set */ gsm_modem_update(dlci, 0); } static int gsmtty_break_ctl(struct tty_struct *tty, int state) { struct gsm_dlci *dlci = tty->driver_data; int encode = 0; /* Off */ if (dlci->state == DLCI_CLOSED) return -EINVAL; if (state == -1) /* "On indefinitely" - we can't encode this properly */ encode = 0x0F; else if (state > 0) { encode = state / 200; /* mS to encoding */ if (encode > 0x0F) encode = 0x0F; /* Best effort */ } return gsm_modem_update(dlci, encode); } static void gsmtty_cleanup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; struct gsm_mux *gsm = dlci->gsm; dlci_put(dlci); dlci_put(gsm->dlci[0]); mux_put(gsm); } /* Virtual ttys for the demux */ static const struct tty_operations gsmtty_ops = { .install = gsmtty_install, .open = gsmtty_open, .close = gsmtty_close, .write = gsmtty_write, .write_room = gsmtty_write_room, .chars_in_buffer = gsmtty_chars_in_buffer, .flush_buffer = gsmtty_flush_buffer, .ioctl = gsmtty_ioctl, .throttle = gsmtty_throttle, .unthrottle = gsmtty_unthrottle, .set_termios = gsmtty_set_termios, .hangup = gsmtty_hangup, .wait_until_sent = gsmtty_wait_until_sent, .tiocmget = gsmtty_tiocmget, .tiocmset = gsmtty_tiocmset, .break_ctl = gsmtty_break_ctl, .cleanup = gsmtty_cleanup, }; static int __init gsm_init(void) { /* Fill in our line protocol discipline, and register it */ int status = tty_register_ldisc(&tty_ldisc_packet); if (status != 0) { pr_err("n_gsm: can't register line discipline (err = %d)\n", status); return status; } gsm_tty_driver = tty_alloc_driver(GSM_TTY_MINORS, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); if (IS_ERR(gsm_tty_driver)) { pr_err("gsm_init: tty allocation failed.\n"); status = PTR_ERR(gsm_tty_driver); goto err_unreg_ldisc; } gsm_tty_driver->driver_name = "gsmtty"; gsm_tty_driver->name = "gsmtty"; gsm_tty_driver->major = 0; /* Dynamic */ gsm_tty_driver->minor_start = 0; gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL; gsm_tty_driver->init_termios = tty_std_termios; /* Fixme */ gsm_tty_driver->init_termios.c_lflag &= ~ECHO; tty_set_operations(gsm_tty_driver, &gsmtty_ops); if (tty_register_driver(gsm_tty_driver)) { pr_err("gsm_init: tty registration failed.\n"); status = -EBUSY; goto err_put_driver; } pr_debug("gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start); return 0; err_put_driver: tty_driver_kref_put(gsm_tty_driver); err_unreg_ldisc: tty_unregister_ldisc(&tty_ldisc_packet); return status; } static void __exit gsm_exit(void) { tty_unregister_ldisc(&tty_ldisc_packet); tty_unregister_driver(gsm_tty_driver); tty_driver_kref_put(gsm_tty_driver); } module_init(gsm_init); module_exit(gsm_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_GSM0710); |
69 45 27 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_DCCP_H #define _UAPI_LINUX_DCCP_H #include <linux/types.h> #include <asm/byteorder.h> /** * struct dccp_hdr - generic part of DCCP packet header * * @dccph_sport - Relevant port on the endpoint that sent this packet * @dccph_dport - Relevant port on the other endpoint * @dccph_doff - Data Offset from the start of the DCCP header, in 32-bit words * @dccph_ccval - Used by the HC-Sender CCID * @dccph_cscov - Parts of the packet that are covered by the Checksum field * @dccph_checksum - Internet checksum, depends on dccph_cscov * @dccph_x - 0 = 24 bit sequence number, 1 = 48 * @dccph_type - packet type, see DCCP_PKT_ prefixed macros * @dccph_seq - sequence number high or low order 24 bits, depends on dccph_x */ struct dccp_hdr { __be16 dccph_sport, dccph_dport; __u8 dccph_doff; #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 dccph_cscov:4, dccph_ccval:4; #elif defined(__BIG_ENDIAN_BITFIELD) __u8 dccph_ccval:4, dccph_cscov:4; #else #error "Adjust your <asm/byteorder.h> defines" #endif __sum16 dccph_checksum; #if defined(__LITTLE_ENDIAN_BITFIELD) __u8 dccph_x:1, dccph_type:4, dccph_reserved:3; #elif defined(__BIG_ENDIAN_BITFIELD) __u8 dccph_reserved:3, dccph_type:4, dccph_x:1; #else #error "Adjust your <asm/byteorder.h> defines" #endif __u8 dccph_seq2; __be16 dccph_seq; }; /** * struct dccp_hdr_ext - the low bits of a 48 bit seq packet * * @dccph_seq_low - low 24 bits of a 48 bit seq packet */ struct dccp_hdr_ext { __be32 dccph_seq_low; }; /** * struct dccp_hdr_request - Connection initiation request header * * @dccph_req_service - Service to which the client app wants to connect */ struct dccp_hdr_request { __be32 dccph_req_service; }; /** * struct dccp_hdr_ack_bits - acknowledgment bits common to most packets * * @dccph_resp_ack_nr_high - 48 bit ack number high order bits, contains GSR * @dccph_resp_ack_nr_low - 48 bit ack number low order bits, contains GSR */ struct dccp_hdr_ack_bits { __be16 dccph_reserved1; __be16 dccph_ack_nr_high; __be32 dccph_ack_nr_low; }; /** * struct dccp_hdr_response - Connection initiation response header * * @dccph_resp_ack - 48 bit Acknowledgment Number Subheader (5.3) * @dccph_resp_service - Echoes the Service Code on a received DCCP-Request */ struct dccp_hdr_response { struct dccp_hdr_ack_bits dccph_resp_ack; __be32 dccph_resp_service; }; /** * struct dccp_hdr_reset - Unconditionally shut down a connection * * @dccph_reset_ack - 48 bit Acknowledgment Number Subheader (5.6) * @dccph_reset_code - one of %dccp_reset_codes * @dccph_reset_data - the Data 1 ... Data 3 fields from 5.6 */ struct dccp_hdr_reset { struct dccp_hdr_ack_bits dccph_reset_ack; __u8 dccph_reset_code, dccph_reset_data[3]; }; enum dccp_pkt_type { DCCP_PKT_REQUEST = 0, DCCP_PKT_RESPONSE, DCCP_PKT_DATA, DCCP_PKT_ACK, DCCP_PKT_DATAACK, DCCP_PKT_CLOSEREQ, DCCP_PKT_CLOSE, DCCP_PKT_RESET, DCCP_PKT_SYNC, DCCP_PKT_SYNCACK, DCCP_PKT_INVALID, }; #define DCCP_NR_PKT_TYPES DCCP_PKT_INVALID static inline unsigned int dccp_packet_hdr_len(const __u8 type) { if (type == DCCP_PKT_DATA) return 0; if (type == DCCP_PKT_DATAACK || type == DCCP_PKT_ACK || type == DCCP_PKT_SYNC || type == DCCP_PKT_SYNCACK || type == DCCP_PKT_CLOSE || type == DCCP_PKT_CLOSEREQ) return sizeof(struct dccp_hdr_ack_bits); if (type == DCCP_PKT_REQUEST) return sizeof(struct dccp_hdr_request); if (type == DCCP_PKT_RESPONSE) return sizeof(struct dccp_hdr_response); return sizeof(struct dccp_hdr_reset); } enum dccp_reset_codes { DCCP_RESET_CODE_UNSPECIFIED = 0, DCCP_RESET_CODE_CLOSED, DCCP_RESET_CODE_ABORTED, DCCP_RESET_CODE_NO_CONNECTION, DCCP_RESET_CODE_PACKET_ERROR, DCCP_RESET_CODE_OPTION_ERROR, DCCP_RESET_CODE_MANDATORY_ERROR, DCCP_RESET_CODE_CONNECTION_REFUSED, DCCP_RESET_CODE_BAD_SERVICE_CODE, DCCP_RESET_CODE_TOO_BUSY, DCCP_RESET_CODE_BAD_INIT_COOKIE, DCCP_RESET_CODE_AGGRESSION_PENALTY, DCCP_MAX_RESET_CODES /* Leave at the end! */ }; /* DCCP options */ enum { DCCPO_PADDING = 0, DCCPO_MANDATORY = 1, DCCPO_MIN_RESERVED = 3, DCCPO_MAX_RESERVED = 31, DCCPO_CHANGE_L = 32, DCCPO_CONFIRM_L = 33, DCCPO_CHANGE_R = 34, DCCPO_CONFIRM_R = 35, DCCPO_NDP_COUNT = 37, DCCPO_ACK_VECTOR_0 = 38, DCCPO_ACK_VECTOR_1 = 39, DCCPO_TIMESTAMP = 41, DCCPO_TIMESTAMP_ECHO = 42, DCCPO_ELAPSED_TIME = 43, DCCPO_MAX = 45, DCCPO_MIN_RX_CCID_SPECIFIC = 128, /* from sender to receiver */ DCCPO_MAX_RX_CCID_SPECIFIC = 191, DCCPO_MIN_TX_CCID_SPECIFIC = 192, /* from receiver to sender */ DCCPO_MAX_TX_CCID_SPECIFIC = 255, }; /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ #define DCCP_SINGLE_OPT_MAXLEN 253 /* DCCP CCIDS */ enum { DCCPC_CCID2 = 2, DCCPC_CCID3 = 3, }; /* DCCP features (RFC 4340 section 6.4) */ enum dccp_feature_numbers { DCCPF_RESERVED = 0, DCCPF_CCID = 1, DCCPF_SHORT_SEQNOS = 2, DCCPF_SEQUENCE_WINDOW = 3, DCCPF_ECN_INCAPABLE = 4, DCCPF_ACK_RATIO = 5, DCCPF_SEND_ACK_VECTOR = 6, DCCPF_SEND_NDP_COUNT = 7, DCCPF_MIN_CSUM_COVER = 8, DCCPF_DATA_CHECKSUM = 9, /* 10-127 reserved */ DCCPF_MIN_CCID_SPECIFIC = 128, DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */ DCCPF_MAX_CCID_SPECIFIC = 255, }; /* DCCP socket control message types for cmsg */ enum dccp_cmsg_type { DCCP_SCM_PRIORITY = 1, DCCP_SCM_QPOLICY_MAX = 0xFFFF, /* ^-- Up to here reserved exclusively for qpolicy parameters */ DCCP_SCM_MAX }; /* DCCP priorities for outgoing/queued packets */ enum dccp_packet_dequeueing_policy { DCCPQ_POLICY_SIMPLE, DCCPQ_POLICY_PRIO, DCCPQ_POLICY_MAX }; /* DCCP socket options */ #define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ #define DCCP_SOCKOPT_SERVICE 2 #define DCCP_SOCKOPT_CHANGE_L 3 #define DCCP_SOCKOPT_CHANGE_R 4 #define DCCP_SOCKOPT_GET_CUR_MPS 5 #define DCCP_SOCKOPT_SERVER_TIMEWAIT 6 #define DCCP_SOCKOPT_SEND_CSCOV 10 #define DCCP_SOCKOPT_RECV_CSCOV 11 #define DCCP_SOCKOPT_AVAILABLE_CCIDS 12 #define DCCP_SOCKOPT_CCID 13 #define DCCP_SOCKOPT_TX_CCID 14 #define DCCP_SOCKOPT_RX_CCID 15 #define DCCP_SOCKOPT_QPOLICY_ID 16 #define DCCP_SOCKOPT_QPOLICY_TXQLEN 17 #define DCCP_SOCKOPT_CCID_RX_INFO 128 #define DCCP_SOCKOPT_CCID_TX_INFO 192 /* maximum number of services provided on the same listening port */ #define DCCP_SERVICE_LIST_MAX_LEN 32 #endif /* _UAPI_LINUX_DCCP_H */ |
44 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | // SPDX-License-Identifier: GPL-2.0 #include <linux/fs.h> #include <linux/export.h> /* * fs on-disk file type to dirent file type conversion */ static const unsigned char fs_dtype_by_ftype[FT_MAX] = { [FT_UNKNOWN] = DT_UNKNOWN, [FT_REG_FILE] = DT_REG, [FT_DIR] = DT_DIR, [FT_CHRDEV] = DT_CHR, [FT_BLKDEV] = DT_BLK, [FT_FIFO] = DT_FIFO, [FT_SOCK] = DT_SOCK, [FT_SYMLINK] = DT_LNK }; /** * fs_ftype_to_dtype() - fs on-disk file type to dirent type. * @filetype: The on-disk file type to convert. * * This function converts the on-disk file type value (FT_*) to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_ftype_to_dtype(unsigned int filetype) { if (filetype >= FT_MAX) return DT_UNKNOWN; return fs_dtype_by_ftype[filetype]; } EXPORT_SYMBOL_GPL(fs_ftype_to_dtype); /* * dirent file type to fs on-disk file type conversion * Values not initialized explicitly are FT_UNKNOWN (0). */ static const unsigned char fs_ftype_by_dtype[DT_MAX] = { [DT_REG] = FT_REG_FILE, [DT_DIR] = FT_DIR, [DT_LNK] = FT_SYMLINK, [DT_CHR] = FT_CHRDEV, [DT_BLK] = FT_BLKDEV, [DT_FIFO] = FT_FIFO, [DT_SOCK] = FT_SOCK, }; /** * fs_umode_to_ftype() - file mode to on-disk file type. * @mode: The file mode to convert. * * This function converts the file mode value to the on-disk file type (FT_*). * * Context: Any context. * Return: * * FT_UNKNOWN - Unknown type * * FT_REG_FILE - Regular file * * FT_DIR - Directory * * FT_CHRDEV - Character device * * FT_BLKDEV - Block device * * FT_FIFO - FIFO * * FT_SOCK - Local-domain socket * * FT_SYMLINK - Symbolic link */ unsigned char fs_umode_to_ftype(umode_t mode) { return fs_ftype_by_dtype[S_DT(mode)]; } EXPORT_SYMBOL_GPL(fs_umode_to_ftype); /** * fs_umode_to_dtype() - file mode to dirent file type. * @mode: The file mode to convert. * * This function converts the file mode value to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_umode_to_dtype(umode_t mode) { return fs_ftype_to_dtype(fs_umode_to_ftype(mode)); } EXPORT_SYMBOL_GPL(fs_umode_to_dtype); |
7 1 1 1 2 1 1 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 | // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/capability.h> #include <linux/if.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <net/ip.h> #include <net/tcp.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include <linux/netfilter/nfnetlink_osf.h> /* * Indexed by dont-fragment bit. * It is the only constant value in the fingerprint. */ struct list_head nf_osf_fingers[2]; EXPORT_SYMBOL_GPL(nf_osf_fingers); static inline int nf_osf_ttl(const struct sk_buff *skb, int ttl_check, unsigned char f_ttl) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); const struct iphdr *ip = ip_hdr(skb); const struct in_ifaddr *ifa; int ret = 0; if (ttl_check == NF_OSF_TTL_TRUE) return ip->ttl == f_ttl; if (ttl_check == NF_OSF_TTL_NOCHECK) return 1; else if (ip->ttl <= f_ttl) return 1; in_dev_for_each_ifa_rcu(ifa, in_dev) { if (inet_ifa_match(ip->saddr, ifa)) { ret = (ip->ttl == f_ttl); break; } } return ret; } struct nf_osf_hdr_ctx { bool df; u16 window; u16 totlen; const unsigned char *optp; unsigned int optsize; }; static bool nf_osf_match_one(const struct sk_buff *skb, const struct nf_osf_user_finger *f, int ttl_check, struct nf_osf_hdr_ctx *ctx) { const __u8 *optpinit = ctx->optp; unsigned int check_WSS = 0; int fmatch = FMATCH_WRONG; int foptsize, optnum; u16 mss = 0; if (ctx->totlen != f->ss || !nf_osf_ttl(skb, ttl_check, f->ttl)) return false; /* * Should not happen if userspace parser was written correctly. */ if (f->wss.wc >= OSF_WSS_MAX) return false; /* Check options */ foptsize = 0; for (optnum = 0; optnum < f->opt_num; ++optnum) foptsize += f->opt[optnum].length; if (foptsize > MAX_IPOPTLEN || ctx->optsize > MAX_IPOPTLEN || ctx->optsize != foptsize) return false; check_WSS = f->wss.wc; for (optnum = 0; optnum < f->opt_num; ++optnum) { if (f->opt[optnum].kind == *ctx->optp) { __u32 len = f->opt[optnum].length; const __u8 *optend = ctx->optp + len; fmatch = FMATCH_OK; switch (*ctx->optp) { case OSFOPT_MSS: mss = ctx->optp[3]; mss <<= 8; mss |= ctx->optp[2]; mss = ntohs((__force __be16)mss); break; case OSFOPT_TS: break; } ctx->optp = optend; } else fmatch = FMATCH_OPT_WRONG; if (fmatch != FMATCH_OK) break; } if (fmatch != FMATCH_OPT_WRONG) { fmatch = FMATCH_WRONG; switch (check_WSS) { case OSF_WSS_PLAIN: if (f->wss.val == 0 || ctx->window == f->wss.val) fmatch = FMATCH_OK; break; case OSF_WSS_MSS: /* * Some smart modems decrease mangle MSS to * SMART_MSS_2, so we check standard, decreased * and the one provided in the fingerprint MSS * values. */ #define SMART_MSS_1 1460 #define SMART_MSS_2 1448 if (ctx->window == f->wss.val * mss || ctx->window == f->wss.val * SMART_MSS_1 || ctx->window == f->wss.val * SMART_MSS_2) fmatch = FMATCH_OK; break; case OSF_WSS_MTU: if (ctx->window == f->wss.val * (mss + 40) || ctx->window == f->wss.val * (SMART_MSS_1 + 40) || ctx->window == f->wss.val * (SMART_MSS_2 + 40)) fmatch = FMATCH_OK; break; case OSF_WSS_MODULO: if ((ctx->window % f->wss.val) == 0) fmatch = FMATCH_OK; break; } } if (fmatch != FMATCH_OK) ctx->optp = optpinit; return fmatch == FMATCH_OK; } static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx, const struct sk_buff *skb, const struct iphdr *ip, unsigned char *opts, struct tcphdr *_tcph) { const struct tcphdr *tcp; tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph); if (!tcp) return NULL; if (!tcp->syn) return NULL; ctx->totlen = ntohs(ip->tot_len); ctx->df = ntohs(ip->frag_off) & IP_DF; ctx->window = ntohs(tcp->window); if (tcp->doff * 4 > sizeof(struct tcphdr)) { ctx->optsize = tcp->doff * 4 - sizeof(struct tcphdr); ctx->optp = skb_header_pointer(skb, ip_hdrlen(skb) + sizeof(struct tcphdr), ctx->optsize, opts); if (!ctx->optp) return NULL; } return tcp; } bool nf_osf_match(const struct sk_buff *skb, u_int8_t family, int hooknum, struct net_device *in, struct net_device *out, const struct nf_osf_info *info, struct net *net, const struct list_head *nf_osf_fingers) { const struct iphdr *ip = ip_hdr(skb); const struct nf_osf_user_finger *f; unsigned char opts[MAX_IPOPTLEN]; const struct nf_osf_finger *kf; int fcount = 0, ttl_check; int fmatch = FMATCH_WRONG; struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; struct tcphdr _tcph; memset(&ctx, 0, sizeof(ctx)); tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return false; ttl_check = (info->flags & NF_OSF_TTL) ? info->ttl : 0; list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) { f = &kf->finger; if (!(info->flags & NF_OSF_LOG) && strcmp(info->genre, f->genre)) continue; if (!nf_osf_match_one(skb, f, ttl_check, &ctx)) continue; fmatch = FMATCH_OK; fcount++; if (info->flags & NF_OSF_LOG) nf_log_packet(net, family, hooknum, skb, in, out, NULL, "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n", f->genre, f->version, f->subtype, &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest), f->ttl - ip->ttl); if ((info->flags & NF_OSF_LOG) && info->loglevel == NF_OSF_LOGLEVEL_FIRST) break; } if (!fcount && (info->flags & NF_OSF_LOG)) nf_log_packet(net, family, hooknum, skb, in, out, NULL, "Remote OS is not known: %pI4:%u -> %pI4:%u\n", &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest)); if (fcount) fmatch = FMATCH_OK; return fmatch == FMATCH_OK; } EXPORT_SYMBOL_GPL(nf_osf_match); bool nf_osf_find(const struct sk_buff *skb, const struct list_head *nf_osf_fingers, const int ttl_check, struct nf_osf_data *data) { const struct iphdr *ip = ip_hdr(skb); const struct nf_osf_user_finger *f; unsigned char opts[MAX_IPOPTLEN]; const struct nf_osf_finger *kf; struct nf_osf_hdr_ctx ctx; const struct tcphdr *tcp; struct tcphdr _tcph; bool found = false; memset(&ctx, 0, sizeof(ctx)); tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph); if (!tcp) return false; list_for_each_entry_rcu(kf, &nf_osf_fingers[ctx.df], finger_entry) { f = &kf->finger; if (!nf_osf_match_one(skb, f, ttl_check, &ctx)) continue; data->genre = f->genre; data->version = f->version; found = true; break; } return found; } EXPORT_SYMBOL_GPL(nf_osf_find); static const struct nla_policy nfnl_osf_policy[OSF_ATTR_MAX + 1] = { [OSF_ATTR_FINGER] = { .len = sizeof(struct nf_osf_user_finger) }, }; static int nfnl_osf_add_callback(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const osf_attrs[]) { struct nf_osf_user_finger *f; struct nf_osf_finger *kf = NULL, *sf; int err = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; if (!(info->nlh->nlmsg_flags & NLM_F_CREATE)) return -EINVAL; f = nla_data(osf_attrs[OSF_ATTR_FINGER]); if (f->opt_num > ARRAY_SIZE(f->opt)) return -EINVAL; if (!memchr(f->genre, 0, MAXGENRELEN) || !memchr(f->subtype, 0, MAXGENRELEN) || !memchr(f->version, 0, MAXGENRELEN)) return -EINVAL; kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL); if (!kf) return -ENOMEM; memcpy(&kf->finger, f, sizeof(struct nf_osf_user_finger)); list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) { if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger))) continue; kfree(kf); kf = NULL; if (info->nlh->nlmsg_flags & NLM_F_EXCL) err = -EEXIST; break; } /* * We are protected by nfnl mutex. */ if (kf) list_add_tail_rcu(&kf->finger_entry, &nf_osf_fingers[!!f->df]); return err; } static int nfnl_osf_remove_callback(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const osf_attrs[]) { struct nf_osf_user_finger *f; struct nf_osf_finger *sf; int err = -ENOENT; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; f = nla_data(osf_attrs[OSF_ATTR_FINGER]); list_for_each_entry(sf, &nf_osf_fingers[!!f->df], finger_entry) { if (memcmp(&sf->finger, f, sizeof(struct nf_osf_user_finger))) continue; /* * We are protected by nfnl mutex. */ list_del_rcu(&sf->finger_entry); kfree_rcu(sf, rcu_head); err = 0; break; } return err; } static const struct nfnl_callback nfnl_osf_callbacks[OSF_MSG_MAX] = { [OSF_MSG_ADD] = { .call = nfnl_osf_add_callback, .type = NFNL_CB_MUTEX, .attr_count = OSF_ATTR_MAX, .policy = nfnl_osf_policy, }, [OSF_MSG_REMOVE] = { .call = nfnl_osf_remove_callback, .type = NFNL_CB_MUTEX, .attr_count = OSF_ATTR_MAX, .policy = nfnl_osf_policy, }, }; static const struct nfnetlink_subsystem nfnl_osf_subsys = { .name = "osf", .subsys_id = NFNL_SUBSYS_OSF, .cb_count = OSF_MSG_MAX, .cb = nfnl_osf_callbacks, }; static int __init nfnl_osf_init(void) { int err = -EINVAL; int i; for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) INIT_LIST_HEAD(&nf_osf_fingers[i]); err = nfnetlink_subsys_register(&nfnl_osf_subsys); if (err < 0) { pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err); goto err_out_exit; } return 0; err_out_exit: return err; } static void __exit nfnl_osf_fini(void) { struct nf_osf_finger *f; int i; nfnetlink_subsys_unregister(&nfnl_osf_subsys); rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(nf_osf_fingers); ++i) { list_for_each_entry_rcu(f, &nf_osf_fingers[i], finger_entry) { list_del_rcu(&f->finger_entry); kfree_rcu(f, rcu_head); } } rcu_read_unlock(); rcu_barrier(); } module_init(nfnl_osf_init); module_exit(nfnl_osf_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Passive OS fingerprint matching"); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF); |
135 608 4031 1411 251 1874 88 155 2016 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_DCACHE_H #define __LINUX_DCACHE_H #include <linux/atomic.h> #include <linux/list.h> #include <linux/math.h> #include <linux/rculist.h> #include <linux/rculist_bl.h> #include <linux/spinlock.h> #include <linux/seqlock.h> #include <linux/cache.h> #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> #include <linux/wait.h> struct path; struct file; struct vfsmount; /* * linux/include/linux/dcache.h * * Dirent cache data structures * * (C) Copyright 1997 Thomas Schoebel-Theuer, * with heavy changes by Linus Torvalds */ #define IS_ROOT(x) ((x) == (x)->d_parent) /* The hash is always the low bits of hash_len */ #ifdef __LITTLE_ENDIAN #define HASH_LEN_DECLARE u32 hash; u32 len #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8)) #else #define HASH_LEN_DECLARE u32 len; u32 hash #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8)) #endif /* * "quick string" -- eases parameter passing, but more importantly * saves "metadata" about the string (ie length and the hash). * * hash comes first so it snuggles against d_parent in the * dentry. */ struct qstr { union { struct { HASH_LEN_DECLARE; }; u64 hash_len; }; const unsigned char *name; }; #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } extern const struct qstr empty_name; extern const struct qstr slash_name; extern const struct qstr dotdot_name; /* * Try to keep struct dentry aligned on 64 byte cachelines (this will * give reasonable cacheline footprint with larger lines without the * large memory footprint increase). */ #ifdef CONFIG_64BIT # define DNAME_INLINE_LEN 32 /* 192 bytes */ #else # ifdef CONFIG_SMP # define DNAME_INLINE_LEN 36 /* 128 bytes */ # else # define DNAME_INLINE_LEN 40 /* 128 bytes */ # endif #endif #define d_lock d_lockref.lock struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ /* Ref lookup also touches following */ struct lockref d_lockref; /* per-dentry lock and refcount */ const struct dentry_operations *d_op; struct super_block *d_sb; /* The root of the dentry tree */ unsigned long d_time; /* used by d_revalidate */ void *d_fsdata; /* fs-specific data */ union { struct list_head d_lru; /* LRU list */ wait_queue_head_t *d_wait; /* in-lookup ones only */ }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ /* * d_alias and d_rcu can share memory */ union { struct hlist_node d_alias; /* inode alias list */ struct hlist_bl_node d_in_lookup_hash; /* only for in-lookup ones */ struct rcu_head d_rcu; } d_u; } __randomize_layout; /* * dentry->d_lock spinlock nesting subclasses: * * 0: normal * 1: nested */ enum dentry_d_lock_class { DENTRY_D_LOCK_NORMAL, /* implicitly used by plain spin_lock() APIs. */ DENTRY_D_LOCK_NESTED }; struct dentry_operations { int (*d_revalidate)(struct dentry *, unsigned int); int (*d_weak_revalidate)(struct dentry *, unsigned int); int (*d_hash)(const struct dentry *, struct qstr *); int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *); int (*d_delete)(const struct dentry *); int (*d_init)(struct dentry *); void (*d_release)(struct dentry *); void (*d_prune)(struct dentry *); void (*d_iput)(struct dentry *, struct inode *); char *(*d_dname)(struct dentry *, char *, int); struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(const struct path *, bool); struct dentry *(*d_real)(struct dentry *, const struct inode *); } ____cacheline_aligned; /* * Locking rules for dentry_operations callbacks are to be found in * Documentation/filesystems/locking.rst. Keep it updated! * * FUrther descriptions are found in Documentation/filesystems/vfs.rst. * Keep it updated too! */ /* d_flags entries */ #define DCACHE_OP_HASH 0x00000001 #define DCACHE_OP_COMPARE 0x00000002 #define DCACHE_OP_REVALIDATE 0x00000004 #define DCACHE_OP_DELETE 0x00000008 #define DCACHE_OP_PRUNE 0x00000010 #define DCACHE_DISCONNECTED 0x00000020 /* This dentry is possibly not currently connected to the dcache tree, in * which case its parent will either be itself, or will have this flag as * well. nfsd will not use a dentry with this bit set, but will first * endeavour to clear the bit either by discovering that it is connected, * or by performing lookup operations. Any filesystem which supports * nfsd_operations MUST have a lookup function which, if it finds a * directory inode with a DCACHE_DISCONNECTED dentry, will d_move that * dentry into place and return that dentry rather than the passed one, * typically using d_splice_alias. */ #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ #define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */ #define DCACHE_CANT_MOUNT 0x00000100 #define DCACHE_GENOCIDE 0x00000200 #define DCACHE_SHRINK_LIST 0x00000400 #define DCACHE_OP_WEAK_REVALIDATE 0x00000800 #define DCACHE_NFSFS_RENAMED 0x00001000 /* this dentry has been "silly renamed" and has to be deleted on the last * dput() */ #define DCACHE_COOKIE 0x00002000 /* For use by dcookie subsystem */ #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x00004000 /* Parent inode is watched by some fsnotify listener */ #define DCACHE_DENTRY_KILLED 0x00008000 #define DCACHE_MOUNTED 0x00010000 /* is a mountpoint */ #define DCACHE_NEED_AUTOMOUNT 0x00020000 /* handle automount on this dir */ #define DCACHE_MANAGE_TRANSIT 0x00040000 /* manage transit from this dirent */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) #define DCACHE_LRU_LIST 0x00080000 #define DCACHE_ENTRY_TYPE 0x00700000 #define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */ #define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ #define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ #define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ #define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ #define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ #define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ #define DCACHE_NOKEY_NAME 0x02000000 /* Encrypted name encoded without key */ #define DCACHE_OP_REAL 0x04000000 #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ #define DCACHE_DENTRY_CURSOR 0x20000000 #define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */ extern seqlock_t rename_lock; /* * These are the low-level FS interfaces to the dcache.. */ extern void d_instantiate(struct dentry *, struct inode *); extern void d_instantiate_new(struct dentry *, struct inode *); extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); extern void __d_drop(struct dentry *dentry); extern void d_drop(struct dentry *dentry); extern void d_delete(struct dentry *); extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); /* allocate/de-allocate */ extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent, const struct qstr *name); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); extern struct dentry *d_find_any_alias(struct inode *inode); extern struct dentry * d_obtain_alias(struct inode *); extern struct dentry * d_obtain_root(struct inode *); extern void shrink_dcache_sb(struct super_block *); extern void shrink_dcache_parent(struct dentry *); extern void shrink_dcache_for_umount(struct super_block *); extern void d_invalidate(struct dentry *); /* only used at mount-time */ extern struct dentry * d_make_root(struct inode *); /* <clickety>-<click> the ramfs-type tree */ extern void d_genocide(struct dentry *); extern void d_mark_tmpfile(struct file *, struct inode *); extern void d_tmpfile(struct file *, struct inode *); extern struct dentry *d_find_alias(struct inode *); extern void d_prune_aliases(struct inode *); extern struct dentry *d_find_alias_rcu(struct inode *); /* test whether we have any submounts in a subdir tree */ extern int path_has_submounts(const struct path *); /* * This adds the entry to the hash queues. */ extern void d_rehash(struct dentry *); extern void d_add(struct dentry *, struct inode *); /* used for rename() and baskets */ extern void d_move(struct dentry *, struct dentry *); extern void d_exchange(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); /* appendix may either be NULL or be used for transname suffixes */ extern struct dentry *d_lookup(const struct dentry *, const struct qstr *); extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *); extern struct dentry *__d_lookup_rcu(const struct dentry *parent, const struct qstr *name, unsigned *seq); static inline unsigned d_count(const struct dentry *dentry) { return dentry->d_lockref.count; } /* * helper function for dentry_operations.d_dname() members */ extern __printf(3, 4) char *dynamic_dname(char *, int, const char *, ...); extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); /* Allocation counts.. */ /** * dget, dget_dlock - get a reference to a dentry * @dentry: dentry to get a reference to * * Given a dentry or %NULL pointer increment the reference count * if appropriate and return the dentry. A dentry will not be * destroyed when it has references. */ static inline struct dentry *dget_dlock(struct dentry *dentry) { if (dentry) dentry->d_lockref.count++; return dentry; } static inline struct dentry *dget(struct dentry *dentry) { if (dentry) lockref_get(&dentry->d_lockref); return dentry; } extern struct dentry *dget_parent(struct dentry *dentry); /** * d_unhashed - is dentry hashed * @dentry: entry to check * * Returns true if the dentry passed is not currently hashed. */ static inline int d_unhashed(const struct dentry *dentry) { return hlist_bl_unhashed(&dentry->d_hash); } static inline int d_unlinked(const struct dentry *dentry) { return d_unhashed(dentry) && !IS_ROOT(dentry); } static inline int cant_mount(const struct dentry *dentry) { return (dentry->d_flags & DCACHE_CANT_MOUNT); } static inline void dont_mount(struct dentry *dentry) { spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_CANT_MOUNT; spin_unlock(&dentry->d_lock); } extern void __d_lookup_unhash_wake(struct dentry *dentry); static inline int d_in_lookup(const struct dentry *dentry) { return dentry->d_flags & DCACHE_PAR_LOOKUP; } static inline void d_lookup_done(struct dentry *dentry) { if (unlikely(d_in_lookup(dentry))) __d_lookup_unhash_wake(dentry); } extern void dput(struct dentry *); static inline bool d_managed(const struct dentry *dentry) { return dentry->d_flags & DCACHE_MANAGED_DENTRY; } static inline bool d_mountpoint(const struct dentry *dentry) { return dentry->d_flags & DCACHE_MOUNTED; } /* * Directory cache entry type accessor functions. */ static inline unsigned __d_entry_type(const struct dentry *dentry) { return dentry->d_flags & DCACHE_ENTRY_TYPE; } static inline bool d_is_miss(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_MISS_TYPE; } static inline bool d_is_whiteout(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; } static inline bool d_can_lookup(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; } static inline bool d_is_autodir(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_AUTODIR_TYPE; } static inline bool d_is_dir(const struct dentry *dentry) { return d_can_lookup(dentry) || d_is_autodir(dentry); } static inline bool d_is_symlink(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; } static inline bool d_is_reg(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; } static inline bool d_is_special(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; } static inline bool d_is_file(const struct dentry *dentry) { return d_is_reg(dentry) || d_is_special(dentry); } static inline bool d_is_negative(const struct dentry *dentry) { // TODO: check d_is_whiteout(dentry) also. return d_is_miss(dentry); } static inline bool d_flags_negative(unsigned flags) { return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE; } static inline bool d_is_positive(const struct dentry *dentry) { return !d_is_negative(dentry); } /** * d_really_is_negative - Determine if a dentry is really negative (ignoring fallthroughs) * @dentry: The dentry in question * * Returns true if the dentry represents either an absent name or a name that * doesn't map to an inode (ie. ->d_inode is NULL). The dentry could represent * a true miss, a whiteout that isn't represented by a 0,0 chardev or a * fallthrough marker in an opaque directory. * * Note! (1) This should be used *only* by a filesystem to examine its own * dentries. It should not be used to look at some other filesystem's * dentries. (2) It should also be used in combination with d_inode() to get * the inode. (3) The dentry may have something attached to ->d_lower and the * type field of the flags may be set to something other than miss or whiteout. */ static inline bool d_really_is_negative(const struct dentry *dentry) { return dentry->d_inode == NULL; } /** * d_really_is_positive - Determine if a dentry is really positive (ignoring fallthroughs) * @dentry: The dentry in question * * Returns true if the dentry represents a name that maps to an inode * (ie. ->d_inode is not NULL). The dentry might still represent a whiteout if * that is represented on medium as a 0,0 chardev. * * Note! (1) This should be used *only* by a filesystem to examine its own * dentries. It should not be used to look at some other filesystem's * dentries. (2) It should also be used in combination with d_inode() to get * the inode. */ static inline bool d_really_is_positive(const struct dentry *dentry) { return dentry->d_inode != NULL; } static inline int simple_positive(const struct dentry *dentry) { return d_really_is_positive(dentry) && !d_unhashed(dentry); } extern void d_set_fallthru(struct dentry *dentry); static inline bool d_is_fallthru(const struct dentry *dentry) { return dentry->d_flags & DCACHE_FALLTHRU; } extern int sysctl_vfs_cache_pressure; static inline unsigned long vfs_pressure_ratio(unsigned long val) { return mult_frac(val, sysctl_vfs_cache_pressure, 100); } /** * d_inode - Get the actual inode of this dentry * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes * in their own dentries and ignore the layering superimposed upon them. */ static inline struct inode *d_inode(const struct dentry *dentry) { return dentry->d_inode; } /** * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE() * @dentry: The dentry to query * * This is the helper normal filesystems should use to get at their own inodes * in their own dentries and ignore the layering superimposed upon them. */ static inline struct inode *d_inode_rcu(const struct dentry *dentry) { return READ_ONCE(dentry->d_inode); } /** * d_backing_inode - Get upper or lower inode we should be using * @upper: The upper layer * * This is the helper that should be used to get at the inode that will be used * if this dentry were to be opened as a file. The inode may be on the upper * dentry or it may be on a lower dentry pinned by the upper. * * Normal filesystems should not use this to access their own inodes. */ static inline struct inode *d_backing_inode(const struct dentry *upper) { struct inode *inode = upper->d_inode; return inode; } /** * d_backing_dentry - Get upper or lower dentry we should be using * @upper: The upper layer * * This is the helper that should be used to get the dentry of the inode that * will be used if this dentry were opened as a file. It may be the upper * dentry or it may be a lower dentry pinned by the upper. * * Normal filesystems should not use this to access their own dentries. */ static inline struct dentry *d_backing_dentry(struct dentry *upper) { return upper; } /** * d_real - Return the real dentry * @dentry: the dentry to query * @inode: inode to select the dentry from multiple layers (can be NULL) * * If dentry is on a union/overlay, then return the underlying, real dentry. * Otherwise return the dentry itself. * * See also: Documentation/filesystems/vfs.rst */ static inline struct dentry *d_real(struct dentry *dentry, const struct inode *inode) { if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) return dentry->d_op->d_real(dentry, inode); else return dentry; } /** * d_real_inode - Return the real inode * @dentry: The dentry to query * * If dentry is on a union/overlay, then return the underlying, real inode. * Otherwise return d_inode(). */ static inline struct inode *d_real_inode(const struct dentry *dentry) { /* This usage of d_real() results in const dentry */ return d_backing_inode(d_real((struct dentry *) dentry, NULL)); } struct name_snapshot { struct qstr name; unsigned char inline_name[DNAME_INLINE_LEN]; }; void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *); void release_dentry_name_snapshot(struct name_snapshot *); #endif /* __LINUX_DCACHE_H */ |
1 3 1 2 11 28 28 6 10 37 37 4 1 26 37 7 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018, 2021-2023 Intel Corporation */ #ifndef __CFG80211_RDEV_OPS #define __CFG80211_RDEV_OPS #include <linux/rtnetlink.h> #include <net/cfg80211.h> #include "core.h" #include "trace.h" static inline int rdev_suspend(struct cfg80211_registered_device *rdev, struct cfg80211_wowlan *wowlan) { int ret; trace_rdev_suspend(&rdev->wiphy, wowlan); ret = rdev->ops->suspend(&rdev->wiphy, wowlan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_resume(struct cfg80211_registered_device *rdev) { int ret; trace_rdev_resume(&rdev->wiphy); ret = rdev->ops->resume(&rdev->wiphy); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_set_wakeup(struct cfg80211_registered_device *rdev, bool enabled) { trace_rdev_set_wakeup(&rdev->wiphy, enabled); rdev->ops->set_wakeup(&rdev->wiphy, enabled); trace_rdev_return_void(&rdev->wiphy); } static inline struct wireless_dev *rdev_add_virtual_intf(struct cfg80211_registered_device *rdev, char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct wireless_dev *ret; trace_rdev_add_virtual_intf(&rdev->wiphy, name, type); ret = rdev->ops->add_virtual_intf(&rdev->wiphy, name, name_assign_type, type, params); trace_rdev_return_wdev(&rdev->wiphy, ret); return ret; } static inline int rdev_del_virtual_intf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_del_virtual_intf(&rdev->wiphy, wdev); ret = rdev->ops->del_virtual_intf(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_virtual_intf(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { int ret; trace_rdev_change_virtual_intf(&rdev->wiphy, dev, type); ret = rdev->ops->change_virtual_intf(&rdev->wiphy, dev, type, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { int ret; trace_rdev_add_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, params->mode); ret = rdev->ops->add_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)) { int ret; trace_rdev_get_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); ret = rdev->ops->get_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr, cookie, callback); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr) { int ret; trace_rdev_del_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); ret = rdev->ops->del_key(&rdev->wiphy, netdev, link_id, key_index, pairwise, mac_addr); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index, bool unicast, bool multicast) { int ret; trace_rdev_set_default_key(&rdev->wiphy, netdev, link_id, key_index, unicast, multicast); ret = rdev->ops->set_default_key(&rdev->wiphy, netdev, link_id, key_index, unicast, multicast); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_mgmt_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index) { int ret; trace_rdev_set_default_mgmt_key(&rdev->wiphy, netdev, link_id, key_index); ret = rdev->ops->set_default_mgmt_key(&rdev->wiphy, netdev, link_id, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_default_beacon_key(struct cfg80211_registered_device *rdev, struct net_device *netdev, int link_id, u8 key_index) { int ret; trace_rdev_set_default_beacon_key(&rdev->wiphy, netdev, link_id, key_index); ret = rdev->ops->set_default_beacon_key(&rdev->wiphy, netdev, link_id, key_index); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_settings *settings) { int ret; trace_rdev_start_ap(&rdev->wiphy, dev, settings); ret = rdev->ops->start_ap(&rdev->wiphy, dev, settings); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_beacon(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ap_update *info) { int ret; trace_rdev_change_beacon(&rdev->wiphy, dev, info); ret = rdev->ops->change_beacon(&rdev->wiphy, dev, info); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_stop_ap(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id) { int ret; trace_rdev_stop_ap(&rdev->wiphy, dev, link_id); ret = rdev->ops->stop_ap(&rdev->wiphy, dev, link_id); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_add_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->add_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct station_del_parameters *params) { int ret; trace_rdev_del_station(&rdev->wiphy, dev, params); ret = rdev->ops->del_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_station(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *mac, struct station_parameters *params) { int ret; trace_rdev_change_station(&rdev->wiphy, dev, mac, params); ret = rdev->ops->change_station(&rdev->wiphy, dev, mac, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_station(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_get_station(&rdev->wiphy, dev, mac); ret = rdev->ops->get_station(&rdev->wiphy, dev, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_dump_station(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { int ret; trace_rdev_dump_station(&rdev->wiphy, dev, idx, mac); ret = rdev->ops->dump_station(&rdev->wiphy, dev, idx, mac, sinfo); trace_rdev_return_int_station_info(&rdev->wiphy, ret, sinfo); return ret; } static inline int rdev_add_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_add_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst) { int ret; trace_rdev_del_mpath(&rdev->wiphy, dev, dst); ret = rdev->ops->del_mpath(&rdev->wiphy, dev, dst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop) { int ret; trace_rdev_change_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpath(&rdev->wiphy, dev, dst, next_hop); ret = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_get_mpp(&rdev->wiphy, dev, dst, mpp); ret = rdev->ops->get_mpp(&rdev->wiphy, dev, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpath(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop); ret = rdev->ops->dump_mpath(&rdev->wiphy, dev, idx, dst, next_hop, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_dump_mpp(struct cfg80211_registered_device *rdev, struct net_device *dev, int idx, u8 *dst, u8 *mpp, struct mpath_info *pinfo) { int ret; trace_rdev_dump_mpp(&rdev->wiphy, dev, idx, dst, mpp); ret = rdev->ops->dump_mpp(&rdev->wiphy, dev, idx, dst, mpp, pinfo); trace_rdev_return_int_mpath_info(&rdev->wiphy, ret, pinfo); return ret; } static inline int rdev_get_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, struct mesh_config *conf) { int ret; trace_rdev_get_mesh_config(&rdev->wiphy, dev); ret = rdev->ops->get_mesh_config(&rdev->wiphy, dev, conf); trace_rdev_return_int_mesh_config(&rdev->wiphy, ret, conf); return ret; } static inline int rdev_update_mesh_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { int ret; trace_rdev_update_mesh_config(&rdev->wiphy, dev, mask, nconf); ret = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, nconf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { int ret; trace_rdev_join_mesh(&rdev->wiphy, dev, conf, setup); ret = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_mesh(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_mesh(&rdev->wiphy, dev); ret = rdev->ops->leave_mesh(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { int ret; trace_rdev_join_ocb(&rdev->wiphy, dev, setup); ret = rdev->ops->join_ocb(&rdev->wiphy, dev, setup); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ocb(&rdev->wiphy, dev); ret = rdev->ops->leave_ocb(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_change_bss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct bss_parameters *params) { int ret; trace_rdev_change_bss(&rdev->wiphy, dev, params); ret = rdev->ops->change_bss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_inform_bss(struct cfg80211_registered_device *rdev, struct cfg80211_bss *bss, const struct cfg80211_bss_ies *ies, void *drv_data) { trace_rdev_inform_bss(&rdev->wiphy, bss); if (rdev->ops->inform_bss) rdev->ops->inform_bss(&rdev->wiphy, bss, ies, drv_data); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_txq_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_txq_params *params) { int ret; trace_rdev_set_txq_params(&rdev->wiphy, dev, params); ret = rdev->ops->set_txq_params(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_libertas_set_mesh_channel(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ieee80211_channel *chan) { int ret; trace_rdev_libertas_set_mesh_channel(&rdev->wiphy, dev, chan); ret = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy, dev, chan); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_monitor_channel(&rdev->wiphy, chandef); ret = rdev->ops->set_monitor_channel(&rdev->wiphy, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_scan(struct cfg80211_registered_device *rdev, struct cfg80211_scan_request *request) { int ret; trace_rdev_scan(&rdev->wiphy, request); ret = rdev->ops->scan(&rdev->wiphy, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_abort_scan(&rdev->wiphy, wdev); rdev->ops->abort_scan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_auth_request *req) { int ret; trace_rdev_auth(&rdev->wiphy, dev, req); ret = rdev->ops->auth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_assoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_assoc_request *req) { int ret; trace_rdev_assoc(&rdev->wiphy, dev, req); ret = rdev->ops->assoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_deauth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_deauth_request *req) { int ret; trace_rdev_deauth(&rdev->wiphy, dev, req); ret = rdev->ops->deauth(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disassoc(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_disassoc_request *req) { int ret; trace_rdev_disassoc(&rdev->wiphy, dev, req); ret = rdev->ops->disassoc(&rdev->wiphy, dev, req); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_connect(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme) { int ret; trace_rdev_connect(&rdev->wiphy, dev, sme); ret = rdev->ops->connect(&rdev->wiphy, dev, sme); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_connect_params(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_connect_params *sme, u32 changed) { int ret; trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed); ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_disconnect(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 reason_code) { int ret; trace_rdev_disconnect(&rdev->wiphy, dev, reason_code); ret = rdev->ops->disconnect(&rdev->wiphy, dev, reason_code); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_join_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ibss_params *params) { int ret; trace_rdev_join_ibss(&rdev->wiphy, dev, params); ret = rdev->ops->join_ibss(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, struct net_device *dev) { int ret; trace_rdev_leave_ibss(&rdev->wiphy, dev); ret = rdev->ops->leave_ibss(&rdev->wiphy, dev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { int ret; if (!rdev->ops->set_wiphy_params) return -EOPNOTSUPP; trace_rdev_set_wiphy_params(&rdev->wiphy, changed); ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm); ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int *dbm) { int ret; trace_rdev_get_tx_power(&rdev->wiphy, wdev); ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm); trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm); return ret; } static inline int rdev_set_multicast_to_unicast(struct cfg80211_registered_device *rdev, struct net_device *dev, const bool enabled) { int ret; trace_rdev_set_multicast_to_unicast(&rdev->wiphy, dev, enabled); ret = rdev->ops->set_multicast_to_unicast(&rdev->wiphy, dev, enabled); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_txq_stats(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_txq_stats *txqstats) { int ret; trace_rdev_get_txq_stats(&rdev->wiphy, wdev); ret = rdev->ops->get_txq_stats(&rdev->wiphy, wdev, txqstats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev) { trace_rdev_rfkill_poll(&rdev->wiphy); rdev->ops->rfkill_poll(&rdev->wiphy); trace_rdev_return_void(&rdev->wiphy); } #ifdef CONFIG_NL80211_TESTMODE static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, void *data, int len) { int ret; trace_rdev_testmode_cmd(&rdev->wiphy, wdev); ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_testmode_dump(struct cfg80211_registered_device *rdev, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { int ret; trace_rdev_testmode_dump(&rdev->wiphy); ret = rdev->ops->testmode_dump(&rdev->wiphy, skb, cb, data, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } #endif static inline int rdev_set_bitrate_mask(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { int ret; trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, link_id, peer, mask); ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, link_id, peer, mask); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_dump_survey(struct cfg80211_registered_device *rdev, struct net_device *netdev, int idx, struct survey_info *info) { int ret; trace_rdev_dump_survey(&rdev->wiphy, netdev, idx); ret = rdev->ops->dump_survey(&rdev->wiphy, netdev, idx, info); if (ret < 0) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_survey_info(&rdev->wiphy, ret, info); return ret; } static inline int rdev_set_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_set_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->set_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { int ret; trace_rdev_del_pmksa(&rdev->wiphy, netdev, pmksa); ret = rdev->ops->del_pmksa(&rdev->wiphy, netdev, pmksa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_flush_pmksa(struct cfg80211_registered_device *rdev, struct net_device *netdev) { int ret; trace_rdev_flush_pmksa(&rdev->wiphy, netdev); ret = rdev->ops->flush_pmksa(&rdev->wiphy, netdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { int ret; trace_rdev_remain_on_channel(&rdev->wiphy, wdev, chan, duration); ret = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan, duration, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_cancel_remain_on_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); ret = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { int ret; trace_rdev_mgmt_tx(&rdev->wiphy, wdev, params); ret = rdev->ops->mgmt_tx(&rdev->wiphy, wdev, params, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_tx_control_port(struct cfg80211_registered_device *rdev, struct net_device *dev, const void *buf, size_t len, const u8 *dest, __be16 proto, const bool noencrypt, int link, u64 *cookie) { int ret; trace_rdev_tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt, link); ret = rdev->ops->tx_control_port(&rdev->wiphy, dev, buf, len, dest, proto, noencrypt, link, cookie); if (cookie) trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); else trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mgmt_tx_cancel_wait(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { int ret; trace_rdev_mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); ret = rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_power_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, bool enabled, int timeout) { int ret; trace_rdev_set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); ret = rdev->ops->set_power_mgmt(&rdev->wiphy, dev, enabled, timeout); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { int ret; trace_rdev_set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); ret = rdev->ops->set_cqm_rssi_config(&rdev->wiphy, dev, rssi_thold, rssi_hyst); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_rssi_range_config(struct cfg80211_registered_device *rdev, struct net_device *dev, s32 low, s32 high) { int ret; trace_rdev_set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); ret = rdev->ops->set_cqm_rssi_range_config(&rdev->wiphy, dev, low, high); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_cqm_txe_config(struct cfg80211_registered_device *rdev, struct net_device *dev, u32 rate, u32 pkts, u32 intvl) { int ret; trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_update_mgmt_frame_registrations(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct mgmt_frame_regs *upd) { might_sleep(); trace_rdev_update_mgmt_frame_registrations(&rdev->wiphy, wdev, upd); if (rdev->ops->update_mgmt_frame_registrations) rdev->ops->update_mgmt_frame_registrations(&rdev->wiphy, wdev, upd); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_antenna(struct cfg80211_registered_device *rdev, u32 tx_ant, u32 rx_ant) { int ret; trace_rdev_set_antenna(&rdev->wiphy, tx_ant, rx_ant); ret = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_antenna(struct cfg80211_registered_device *rdev, u32 *tx_ant, u32 *rx_ant) { int ret; trace_rdev_get_antenna(&rdev->wiphy); ret = rdev->ops->get_antenna(&rdev->wiphy, tx_ant, rx_ant); if (ret) trace_rdev_return_int(&rdev->wiphy, ret); else trace_rdev_return_int_tx_rx(&rdev->wiphy, ret, *tx_ant, *rx_ant); return ret; } static inline int rdev_sched_scan_start(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_sched_scan_request *request) { int ret; trace_rdev_sched_scan_start(&rdev->wiphy, dev, request->reqid); ret = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_sched_scan_stop(struct cfg80211_registered_device *rdev, struct net_device *dev, u64 reqid) { int ret; trace_rdev_sched_scan_stop(&rdev->wiphy, dev, reqid); ret = rdev->ops->sched_scan_stop(&rdev->wiphy, dev, reqid); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_rekey_data(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_gtk_rekey_data *data) { int ret; trace_rdev_set_rekey_data(&rdev->wiphy, dev); ret = rdev->ops->set_rekey_data(&rdev->wiphy, dev, data); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_mgmt(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len) { int ret; trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_oper(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 *peer, enum nl80211_tdls_operation oper) { int ret; trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_probe_client(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, u64 *cookie) { int ret; trace_rdev_probe_client(&rdev->wiphy, dev, peer); ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie); trace_rdev_return_int_cookie(&rdev->wiphy, ret, *cookie); return ret; } static inline int rdev_set_noack_map(struct cfg80211_registered_device *rdev, struct net_device *dev, u16 noack_map) { int ret; trace_rdev_set_noack_map(&rdev->wiphy, dev, noack_map); ret = rdev->ops->set_noack_map(&rdev->wiphy, dev, noack_map); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_get_channel(&rdev->wiphy, wdev, link_id); ret = rdev->ops->get_channel(&rdev->wiphy, wdev, link_id, chandef); trace_rdev_return_chandef(&rdev->wiphy, ret, chandef); return ret; } static inline int rdev_start_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { int ret; trace_rdev_start_p2p_device(&rdev->wiphy, wdev); ret = rdev->ops->start_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_p2p_device(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_p2p_device(&rdev->wiphy, wdev); rdev->ops->stop_p2p_device(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf) { int ret; trace_rdev_start_nan(&rdev->wiphy, wdev, conf); ret = rdev->ops->start_nan(&rdev->wiphy, wdev, conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_stop_nan(&rdev->wiphy, wdev); rdev->ops->stop_nan(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_add_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_func *nan_func) { int ret; trace_rdev_add_nan_func(&rdev->wiphy, wdev, nan_func); ret = rdev->ops->add_nan_func(&rdev->wiphy, wdev, nan_func); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_del_nan_func(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, u64 cookie) { trace_rdev_del_nan_func(&rdev->wiphy, wdev, cookie); rdev->ops->del_nan_func(&rdev->wiphy, wdev, cookie); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_nan_change_conf(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes) { int ret; trace_rdev_nan_change_conf(&rdev->wiphy, wdev, conf, changes); if (rdev->ops->nan_change_conf) ret = rdev->ops->nan_change_conf(&rdev->wiphy, wdev, conf, changes); else ret = -ENOTSUPP; trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_acl_data *params) { int ret; trace_rdev_set_mac_acl(&rdev->wiphy, dev, params); ret = rdev->ops->set_mac_acl(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_update_ft_ies(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_update_ft_ies_params *ftie) { int ret; trace_rdev_update_ft_ies(&rdev->wiphy, dev, ftie); ret = rdev->ops->update_ft_ies(&rdev->wiphy, dev, ftie); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_crit_proto_start(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration) { int ret; trace_rdev_crit_proto_start(&rdev->wiphy, wdev, protocol, duration); ret = rdev->ops->crit_proto_start(&rdev->wiphy, wdev, protocol, duration); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev) { trace_rdev_crit_proto_stop(&rdev->wiphy, wdev); rdev->ops->crit_proto_stop(&rdev->wiphy, wdev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_csa_settings *params) { int ret; trace_rdev_channel_switch(&rdev->wiphy, dev, params); ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_qos_map(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_qos_map *qos_map) { int ret = -EOPNOTSUPP; if (rdev->ops->set_qos_map) { trace_rdev_set_qos_map(&rdev->wiphy, dev, qos_map); ret = rdev->ops->set_qos_map(&rdev->wiphy, dev, qos_map); trace_rdev_return_int(&rdev->wiphy, ret); } return ret; } static inline int rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev, struct net_device *dev, unsigned int link_id, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef); ret = rdev->ops->set_ap_chanwidth(&rdev->wiphy, dev, link_id, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_add_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time) { int ret = -EOPNOTSUPP; trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); if (rdev->ops->add_tx_ts) ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer, user_prio, admitted_time); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_tx_ts(struct cfg80211_registered_device *rdev, struct net_device *dev, u8 tsid, const u8 *peer) { int ret = -EOPNOTSUPP; trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer); if (rdev->ops->del_tx_ts) ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_tdls_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef) { int ret; trace_rdev_tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); ret = rdev->ops->tdls_channel_switch(&rdev->wiphy, dev, addr, oper_class, chandef); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_tdls_cancel_channel_switch(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *addr) { trace_rdev_tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); rdev->ops->tdls_cancel_channel_switch(&rdev->wiphy, dev, addr); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_start_radar_detection(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_chan_def *chandef, u32 cac_time_ms) { int ret = -ENOTSUPP; trace_rdev_start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms); if (rdev->ops->start_radar_detection) ret = rdev->ops->start_radar_detection(&rdev->wiphy, dev, chandef, cac_time_ms); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_end_cac(struct cfg80211_registered_device *rdev, struct net_device *dev) { trace_rdev_end_cac(&rdev->wiphy, dev); if (rdev->ops->end_cac) rdev->ops->end_cac(&rdev->wiphy, dev); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_set_mcast_rate(struct cfg80211_registered_device *rdev, struct net_device *dev, int mcast_rate[NUM_NL80211_BANDS]) { int ret = -ENOTSUPP; trace_rdev_set_mcast_rate(&rdev->wiphy, dev, mcast_rate); if (rdev->ops->set_mcast_rate) ret = rdev->ops->set_mcast_rate(&rdev->wiphy, dev, mcast_rate); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_coalesce(struct cfg80211_registered_device *rdev, struct cfg80211_coalesce *coalesce) { int ret = -ENOTSUPP; trace_rdev_set_coalesce(&rdev->wiphy, coalesce); if (rdev->ops->set_coalesce) ret = rdev->ops->set_coalesce(&rdev->wiphy, coalesce); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_pmk_conf *pmk_conf) { int ret = -EOPNOTSUPP; trace_rdev_set_pmk(&rdev->wiphy, dev, pmk_conf); if (rdev->ops->set_pmk) ret = rdev->ops->set_pmk(&rdev->wiphy, dev, pmk_conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_pmk(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *aa) { int ret = -EOPNOTSUPP; trace_rdev_del_pmk(&rdev->wiphy, dev, aa); if (rdev->ops->del_pmk) ret = rdev->ops->del_pmk(&rdev->wiphy, dev, aa); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_external_auth(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_external_auth_params *params) { int ret = -EOPNOTSUPP; trace_rdev_external_auth(&rdev->wiphy, dev, params); if (rdev->ops->external_auth) ret = rdev->ops->external_auth(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_ftm_responder_stats *ftm_stats) { int ret = -EOPNOTSUPP; trace_rdev_get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); if (rdev->ops->get_ftm_responder_stats) ret = rdev->ops->get_ftm_responder_stats(&rdev->wiphy, dev, ftm_stats); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_start_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { int ret = -EOPNOTSUPP; trace_rdev_start_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->start_pmsr) ret = rdev->ops->start_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_abort_pmsr(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_pmsr_request *request) { trace_rdev_abort_pmsr(&rdev->wiphy, wdev, request->cookie); if (rdev->ops->abort_pmsr) rdev->ops->abort_pmsr(&rdev->wiphy, wdev, request); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_update_owe_info(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_update_owe_info *oweinfo) { int ret = -EOPNOTSUPP; trace_rdev_update_owe_info(&rdev->wiphy, dev, oweinfo); if (rdev->ops->update_owe_info) ret = rdev->ops->update_owe_info(&rdev->wiphy, dev, oweinfo); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_probe_mesh_link(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *dest, const void *buf, size_t len) { int ret; trace_rdev_probe_mesh_link(&rdev->wiphy, dev, dest, buf, len); ret = rdev->ops->probe_mesh_link(&rdev->wiphy, dev, buf, len); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_tid_config(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_tid_config *tid_conf) { int ret; trace_rdev_set_tid_config(&rdev->wiphy, dev, tid_conf); ret = rdev->ops->set_tid_config(&rdev->wiphy, dev, tid_conf); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_reset_tid_config(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *peer, u8 tids) { int ret; trace_rdev_reset_tid_config(&rdev->wiphy, dev, peer, tids); ret = rdev->ops->reset_tid_config(&rdev->wiphy, dev, peer, tids); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_sar_specs(struct cfg80211_registered_device *rdev, struct cfg80211_sar_specs *sar) { int ret; trace_rdev_set_sar_specs(&rdev->wiphy, sar); ret = rdev->ops->set_sar_specs(&rdev->wiphy, sar); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_color_change(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_color_change_settings *params) { int ret; trace_rdev_color_change(&rdev->wiphy, dev, params); ret = rdev->ops->color_change(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_fils_aad(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_fils_aad *fils_aad) { int ret = -EOPNOTSUPP; trace_rdev_set_fils_aad(&rdev->wiphy, dev, fils_aad); if (rdev->ops->set_fils_aad) ret = rdev->ops->set_fils_aad(&rdev->wiphy, dev, fils_aad); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_radar_background(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { struct wiphy *wiphy = &rdev->wiphy; int ret; if (!rdev->ops->set_radar_background) return -EOPNOTSUPP; trace_rdev_set_radar_background(wiphy, chandef); ret = rdev->ops->set_radar_background(wiphy, chandef); trace_rdev_return_int(wiphy, ret); return ret; } static inline int rdev_add_intf_link(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id) { int ret = 0; trace_rdev_add_intf_link(&rdev->wiphy, wdev, link_id); if (rdev->ops->add_intf_link) ret = rdev->ops->add_intf_link(&rdev->wiphy, wdev, link_id); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline void rdev_del_intf_link(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, unsigned int link_id) { trace_rdev_del_intf_link(&rdev->wiphy, wdev, link_id); if (rdev->ops->del_intf_link) rdev->ops->del_intf_link(&rdev->wiphy, wdev, link_id); trace_rdev_return_void(&rdev->wiphy); } static inline int rdev_add_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { int ret; if (!rdev->ops->add_link_station) return -EOPNOTSUPP; trace_rdev_add_link_station(&rdev->wiphy, dev, params); ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_mod_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { int ret; if (!rdev->ops->mod_link_station) return -EOPNOTSUPP; trace_rdev_mod_link_station(&rdev->wiphy, dev, params); ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_del_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_del_parameters *params) { int ret; if (!rdev->ops->del_link_station) return -EOPNOTSUPP; trace_rdev_del_link_station(&rdev->wiphy, dev, params); ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } static inline int rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts) { struct wiphy *wiphy = &rdev->wiphy; int ret; if (!rdev->ops->set_hw_timestamp) return -EOPNOTSUPP; trace_rdev_set_hw_timestamp(wiphy, dev, hwts); ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts); trace_rdev_return_int(wiphy, ret); return ret; } #endif /* __CFG80211_RDEV_OPS */ |
5 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FILEATTR_H #define _LINUX_FILEATTR_H /* Flags shared betwen flags/xflags */ #define FS_COMMON_FL \ (FS_SYNC_FL | FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NODUMP_FL | FS_NOATIME_FL | FS_DAX_FL | \ FS_PROJINHERIT_FL) #define FS_XFLAG_COMMON \ (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | FS_XFLAG_APPEND | \ FS_XFLAG_NODUMP | FS_XFLAG_NOATIME | FS_XFLAG_DAX | \ FS_XFLAG_PROJINHERIT) /* * Merged interface for miscellaneous file attributes. 'flags' originates from * ext* and 'fsx_flags' from xfs. There's some overlap between the two, which * is handled by the VFS helpers, so filesystems are free to implement just one * or both of these sub-interfaces. */ struct fileattr { u32 flags; /* flags (FS_IOC_GETFLAGS/FS_IOC_SETFLAGS) */ /* struct fsxattr: */ u32 fsx_xflags; /* xflags field value (get/set) */ u32 fsx_extsize; /* extsize field value (get/set)*/ u32 fsx_nextents; /* nextents field value (get) */ u32 fsx_projid; /* project identifier (get/set) */ u32 fsx_cowextsize; /* CoW extsize field value (get/set)*/ /* selectors: */ bool flags_valid:1; bool fsx_valid:1; }; int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa); void fileattr_fill_xflags(struct fileattr *fa, u32 xflags); void fileattr_fill_flags(struct fileattr *fa, u32 flags); /** * fileattr_has_fsx - check for extended flags/attributes * @fa: fileattr pointer * * Return: true if any attributes are present that are not represented in * ->flags. */ static inline bool fileattr_has_fsx(const struct fileattr *fa) { return fa->fsx_valid && ((fa->fsx_xflags & ~FS_XFLAG_COMMON) || fa->fsx_extsize != 0 || fa->fsx_projid != 0 || fa->fsx_cowextsize != 0); } int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa); int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); #endif /* _LINUX_FILEATTR_H */ |
9 1 8 7 8 6 7 6 5 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 | /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <linux/uaccess.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> #include <drm/drm_framebuffer.h> #include <drm/drm_managed.h> #include <drm/drm_mode_config.h> #include <drm/drm_print.h> #include <linux/dma-resv.h> #include "drm_crtc_internal.h" #include "drm_internal.h" int drm_modeset_register_all(struct drm_device *dev) { int ret; ret = drm_plane_register_all(dev); if (ret) goto err_plane; ret = drm_crtc_register_all(dev); if (ret) goto err_crtc; ret = drm_encoder_register_all(dev); if (ret) goto err_encoder; ret = drm_connector_register_all(dev); if (ret) goto err_connector; return 0; err_connector: drm_encoder_unregister_all(dev); err_encoder: drm_crtc_unregister_all(dev); err_crtc: drm_plane_unregister_all(dev); err_plane: return ret; } void drm_modeset_unregister_all(struct drm_device *dev) { drm_connector_unregister_all(dev); drm_encoder_unregister_all(dev); drm_crtc_unregister_all(dev); drm_plane_unregister_all(dev); } /** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int count, ret = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; struct drm_connector_list_iter conn_iter; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; mutex_lock(&file_priv->fbs_lock); count = 0; fb_id = u64_to_user_ptr(card_res->fb_id_ptr); list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (count < card_res->count_fbs && put_user(fb->base.id, fb_id + count)) { mutex_unlock(&file_priv->fbs_lock); return -EFAULT; } count++; } card_res->count_fbs = count; mutex_unlock(&file_priv->fbs_lock); card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; count = 0; crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr); drm_for_each_crtc(crtc, dev) { if (drm_lease_held(file_priv, crtc->base.id)) { if (count < card_res->count_crtcs && put_user(crtc->base.id, crtc_id + count)) return -EFAULT; count++; } } card_res->count_crtcs = count; count = 0; encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr); drm_for_each_encoder(encoder, dev) { if (count < card_res->count_encoders && put_user(encoder->base.id, encoder_id + count)) return -EFAULT; count++; } card_res->count_encoders = count; drm_connector_list_iter_begin(dev, &conn_iter); count = 0; connector_id = u64_to_user_ptr(card_res->connector_id_ptr); drm_for_each_connector_iter(connector, &conn_iter) { /* only expose writeback connectors if userspace understands them */ if (!file_priv->writeback_connectors && (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)) continue; if (drm_lease_held(file_priv, connector->base.id)) { if (count < card_res->count_connectors && put_user(connector->base.id, connector_id + count)) { drm_connector_list_iter_end(&conn_iter); return -EFAULT; } count++; } } card_res->count_connectors = count; drm_connector_list_iter_end(&conn_iter); return ret; } /** * drm_mode_config_reset - call ->reset callbacks * @dev: drm device * * This functions calls all the crtc's, encoder's and connector's ->reset * callback. Drivers can use this in e.g. their driver load or resume code to * reset hardware and software state. */ void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); drm_for_each_crtc(crtc, dev) if (crtc->funcs->reset) crtc->funcs->reset(crtc); drm_for_each_encoder(encoder, dev) if (encoder->funcs && encoder->funcs->reset) encoder->funcs->reset(encoder); drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->funcs->reset) connector->funcs->reset(connector); drm_connector_list_iter_end(&conn_iter); } EXPORT_SYMBOL(drm_mode_config_reset); /* * Global properties */ static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { { DRM_PLANE_TYPE_OVERLAY, "Overlay" }, { DRM_PLANE_TYPE_PRIMARY, "Primary" }, { DRM_PLANE_TYPE_CURSOR, "Cursor" }, }; static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; int ret; ret = drm_connector_create_standard_properties(dev); if (ret) return ret; prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "type", drm_plane_type_enum_list, ARRAY_SIZE(drm_plane_type_enum_list)); if (!prop) return -ENOMEM; dev->mode_config.plane_type_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_X", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_x = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_Y", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_W", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_H", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_h = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_X", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_x = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_Y", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_W", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_H", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_h = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "FB_ID", DRM_MODE_OBJECT_FB); if (!prop) return -ENOMEM; dev->mode_config.prop_fb_id = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "IN_FENCE_FD", -1, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_in_fence_fd = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "OUT_FENCE_PTR", 0, U64_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_out_fence_ptr = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "CRTC_ID", DRM_MODE_OBJECT_CRTC); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_id = prop; prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS", 0); if (!prop) return -ENOMEM; dev->mode_config.prop_fb_damage_clips = prop; prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, "ACTIVE"); if (!prop) return -ENOMEM; dev->mode_config.prop_active = prop; prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, "MODE_ID", 0); if (!prop) return -ENOMEM; dev->mode_config.prop_mode_id = prop; prop = drm_property_create_bool(dev, 0, "VRR_ENABLED"); if (!prop) return -ENOMEM; dev->mode_config.prop_vrr_enabled = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "DEGAMMA_LUT", 0); if (!prop) return -ENOMEM; dev->mode_config.degamma_lut_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "DEGAMMA_LUT_SIZE", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.degamma_lut_size_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CTM", 0); if (!prop) return -ENOMEM; dev->mode_config.ctm_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "GAMMA_LUT", 0); if (!prop) return -ENOMEM; dev->mode_config.gamma_lut_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "GAMMA_LUT_SIZE", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.gamma_lut_size_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_BLOB, "IN_FORMATS", 0); if (!prop) return -ENOMEM; dev->mode_config.modifiers_property = prop; return 0; } static void drm_mode_config_init_release(struct drm_device *dev, void *ptr) { drm_mode_config_cleanup(dev); } /** * drmm_mode_config_init - managed DRM mode_configuration structure * initialization * @dev: DRM device * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. * * Since this initializes the modeset locks, no locking is possible. Which is no * problem, since this should happen single threaded at init time. It is the * driver's problem to ensure this guarantee. * * Cleanup is automatically handled through registering drm_mode_config_cleanup * with drmm_add_action(). * * Returns: 0 on success, negative error value on failure. */ int drmm_mode_config_init(struct drm_device *dev) { int ret; mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); mutex_init(&dev->mode_config.idr_mutex); mutex_init(&dev->mode_config.fb_lock); mutex_init(&dev->mode_config.blob_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); INIT_LIST_HEAD(&dev->mode_config.privobj_list); idr_init_base(&dev->mode_config.object_idr, 1); idr_init_base(&dev->mode_config.tile_idr, 1); ida_init(&dev->mode_config.connector_ida); spin_lock_init(&dev->mode_config.connector_list_lock); init_llist_head(&dev->mode_config.connector_free_list); INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); ret = drm_mode_create_standard_properties(dev); if (ret) { drm_mode_config_cleanup(dev); return ret; } /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; dev->mode_config.num_total_plane = 0; if (IS_ENABLED(CONFIG_LOCKDEP)) { struct drm_modeset_acquire_ctx modeset_ctx; struct ww_acquire_ctx resv_ctx; struct dma_resv resv; int ret; dma_resv_init(&resv); drm_modeset_acquire_init(&modeset_ctx, 0); ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &modeset_ctx); if (ret == -EDEADLK) ret = drm_modeset_backoff(&modeset_ctx); ww_acquire_init(&resv_ctx, &reservation_ww_class); ret = dma_resv_lock(&resv, &resv_ctx); if (ret == -EDEADLK) dma_resv_lock_slow(&resv, &resv_ctx); dma_resv_unlock(&resv); ww_acquire_fini(&resv_ctx); drm_modeset_drop_locks(&modeset_ctx); drm_modeset_acquire_fini(&modeset_ctx); dma_resv_fini(&resv); } return drmm_add_action_or_reset(dev, drm_mode_config_init_release, NULL); } EXPORT_SYMBOL(drmm_mode_config_init); /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. * * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for * drivers to explicitly call this function. */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; struct drm_plane *plane, *plt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { /* drm_connector_list_iter holds an full reference to the * current connector itself, which means it is inherently safe * against unreferencing the current connector - but not against * deleting it right away. */ drm_connector_put(connector); } drm_connector_list_iter_end(&conn_iter); /* connector_iter drops references in a work item. */ flush_work(&dev->mode_config.connector_free_work); if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) DRM_ERROR("connector %s leaked!\n", connector->name); drm_connector_list_iter_end(&conn_iter); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, head_global) { drm_property_blob_put(blob); } /* * Single-threaded teardown context, so it's not required to grab the * fb_lock to protect against concurrent fb_list access. Contrary, it * would actually deadlock with the drm_framebuffer_cleanup function. * * Also, if there are any framebuffers left, that's a driver leak now, * so politely WARN about this. */ WARN_ON(!list_empty(&dev->mode_config.fb_list)); list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { struct drm_printer p = drm_debug_printer("[leaked fb]"); drm_printf(&p, "framebuffer[%u]:\n", fb->base.id); drm_framebuffer_print_info(&p, 1, fb); drm_framebuffer_free(&fb->base.refcount); } ida_destroy(&dev->mode_config.connector_ida); idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.object_idr); drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); static u32 full_encoder_mask(struct drm_device *dev) { struct drm_encoder *encoder; u32 encoder_mask = 0; drm_for_each_encoder(encoder, dev) encoder_mask |= drm_encoder_mask(encoder); return encoder_mask; } /* * For some reason we want the encoder itself included in * possible_clones. Make life easy for drivers by allowing them * to leave possible_clones unset if no cloning is possible. */ static void fixup_encoder_possible_clones(struct drm_encoder *encoder) { if (encoder->possible_clones == 0) encoder->possible_clones = drm_encoder_mask(encoder); } static void validate_encoder_possible_clones(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; u32 encoder_mask = full_encoder_mask(dev); struct drm_encoder *other; drm_for_each_encoder(other, dev) { WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) != !!(other->possible_clones & drm_encoder_mask(encoder)), "possible_clones mismatch: " "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. " "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n", encoder->base.id, encoder->name, drm_encoder_mask(encoder), encoder->possible_clones, other->base.id, other->name, drm_encoder_mask(other), other->possible_clones); } WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 || (encoder->possible_clones & ~encoder_mask) != 0, "Bogus possible_clones: " "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n", encoder->base.id, encoder->name, encoder->possible_clones, encoder_mask); } static u32 full_crtc_mask(struct drm_device *dev) { struct drm_crtc *crtc; u32 crtc_mask = 0; drm_for_each_crtc(crtc, dev) crtc_mask |= drm_crtc_mask(crtc); return crtc_mask; } static void validate_encoder_possible_crtcs(struct drm_encoder *encoder) { u32 crtc_mask = full_crtc_mask(encoder->dev); WARN((encoder->possible_crtcs & crtc_mask) == 0 || (encoder->possible_crtcs & ~crtc_mask) != 0, "Bogus possible_crtcs: " "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n", encoder->base.id, encoder->name, encoder->possible_crtcs, crtc_mask); } void drm_mode_config_validate(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_crtc *crtc; struct drm_plane *plane; u32 primary_with_crtc = 0, cursor_with_crtc = 0; unsigned int num_primary = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; drm_for_each_encoder(encoder, dev) fixup_encoder_possible_clones(encoder); drm_for_each_encoder(encoder, dev) { validate_encoder_possible_clones(encoder); validate_encoder_possible_crtcs(encoder); } drm_for_each_crtc(crtc, dev) { WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_set, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_set2, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func", crtc->base.id, crtc->name); WARN(crtc->cursor && crtc->funcs->cursor_move, "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func", crtc->base.id, crtc->name); if (crtc->primary) { WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)), "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", crtc->primary->base.id, crtc->primary->name, crtc->base.id, crtc->name); WARN(primary_with_crtc & drm_plane_mask(crtc->primary), "Primary plane [PLANE:%d:%s] used for multiple CRTCs", crtc->primary->base.id, crtc->primary->name); primary_with_crtc |= drm_plane_mask(crtc->primary); } if (crtc->cursor) { WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)), "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", crtc->cursor->base.id, crtc->cursor->name, crtc->base.id, crtc->name); WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor), "Cursor plane [PLANE:%d:%s] used for multiple CRTCs", crtc->cursor->base.id, crtc->cursor->name); cursor_with_crtc |= drm_plane_mask(crtc->cursor); } } drm_for_each_plane(plane, dev) { if (plane->type == DRM_PLANE_TYPE_PRIMARY) num_primary++; } WARN(num_primary != dev->mode_config.num_crtc, "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs", num_primary, dev->mode_config.num_crtc); } |
3409 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips * Copyright (c) 2008-2009 Marvell Semiconductor */ #ifndef __LINUX_NET_DSA_H #define __LINUX_NET_DSA_H #include <linux/if.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/of.h> #include <linux/ethtool.h> #include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/platform_data/dsa.h> #include <linux/phylink.h> #include <net/devlink.h> #include <net/switchdev.h> struct dsa_8021q_context; struct tc_action; struct phy_device; struct fixed_phy_status; struct phylink_link_state; #define DSA_TAG_PROTO_NONE_VALUE 0 #define DSA_TAG_PROTO_BRCM_VALUE 1 #define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2 #define DSA_TAG_PROTO_DSA_VALUE 3 #define DSA_TAG_PROTO_EDSA_VALUE 4 #define DSA_TAG_PROTO_GSWIP_VALUE 5 #define DSA_TAG_PROTO_KSZ9477_VALUE 6 #define DSA_TAG_PROTO_KSZ9893_VALUE 7 #define DSA_TAG_PROTO_LAN9303_VALUE 8 #define DSA_TAG_PROTO_MTK_VALUE 9 #define DSA_TAG_PROTO_QCA_VALUE 10 #define DSA_TAG_PROTO_TRAILER_VALUE 11 #define DSA_TAG_PROTO_8021Q_VALUE 12 #define DSA_TAG_PROTO_SJA1105_VALUE 13 #define DSA_TAG_PROTO_KSZ8795_VALUE 14 #define DSA_TAG_PROTO_OCELOT_VALUE 15 #define DSA_TAG_PROTO_AR9331_VALUE 16 #define DSA_TAG_PROTO_RTL4_A_VALUE 17 #define DSA_TAG_PROTO_HELLCREEK_VALUE 18 #define DSA_TAG_PROTO_XRS700X_VALUE 19 #define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20 #define DSA_TAG_PROTO_SEVILLE_VALUE 21 #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 #define DSA_TAG_PROTO_SJA1110_VALUE 23 #define DSA_TAG_PROTO_RTL8_4_VALUE 24 #define DSA_TAG_PROTO_RTL8_4T_VALUE 25 #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26 #define DSA_TAG_PROTO_LAN937X_VALUE 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE, DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE, DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE, DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE, DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE, DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE, DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE, DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE, DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE, DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE, DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE, DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE, DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE, DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE, DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE, DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE, DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE, DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE, DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE, DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE, DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE, DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE, DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE, DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE, }; struct dsa_switch; struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, int *offset); int (*connect)(struct dsa_switch *ds); void (*disconnect)(struct dsa_switch *ds); unsigned int needed_headroom; unsigned int needed_tailroom; const char *name; enum dsa_tag_protocol proto; /* Some tagging protocols either mangle or shift the destination MAC * address, in which case the DSA conduit would drop packets on ingress * if what it understands out of the destination MAC address is not in * its RX filter. */ bool promisc_on_conduit; }; struct dsa_lag { struct net_device *dev; unsigned int id; struct mutex fdb_lock; struct list_head fdbs; refcount_t refcount; }; struct dsa_switch_tree { struct list_head list; /* List of switch ports */ struct list_head ports; /* Notifier chain for switch-wide events */ struct raw_notifier_head nh; /* Tree identifier */ unsigned int index; /* Number of switches attached to this tree */ struct kref refcount; /* Maps offloaded LAG netdevs to a zero-based linear ID for * drivers that need it. */ struct dsa_lag **lags; /* Tagging protocol operations */ const struct dsa_device_ops *tag_ops; /* Default tagging protocol preferred by the switches in this * tree. */ enum dsa_tag_protocol default_proto; /* Has this tree been applied to the hardware? */ bool setup; /* * Configuration data for the platform device that owns * this dsa switch tree instance. */ struct dsa_platform_data *pd; /* List of DSA links composing the routing table */ struct list_head rtable; /* Length of "lags" array */ unsigned int lags_len; /* Track the largest switch index within a tree */ unsigned int last_switch; }; /* LAG IDs are one-based, the dst->lags array is zero-based */ #define dsa_lags_foreach_id(_id, _dst) \ for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \ if ((_dst)->lags[(_id) - 1]) #define dsa_lag_foreach_port(_dp, _dst, _lag) \ list_for_each_entry((_dp), &(_dst)->ports, list) \ if (dsa_port_offloads_lag((_dp), (_lag))) #define dsa_hsr_foreach_port(_dp, _ds, _hsr) \ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr)) static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst, unsigned int id) { /* DSA LAG IDs are one-based, dst->lags is zero-based */ return dst->lags[id - 1]; } static inline int dsa_lag_id(struct dsa_switch_tree *dst, struct net_device *lag_dev) { unsigned int id; dsa_lags_foreach_id(id, dst) { struct dsa_lag *lag = dsa_lag_by_id(dst, id); if (lag->dev == lag_dev) return lag->id; } return -ENODEV; } /* TC matchall action types */ enum dsa_port_mall_action_type { DSA_PORT_MALL_MIRROR, DSA_PORT_MALL_POLICER, }; /* TC mirroring entry */ struct dsa_mall_mirror_tc_entry { u8 to_local_port; bool ingress; }; /* TC port policer entry */ struct dsa_mall_policer_tc_entry { u32 burst; u64 rate_bytes_per_sec; }; /* TC matchall entry */ struct dsa_mall_tc_entry { struct list_head list; unsigned long cookie; enum dsa_port_mall_action_type type; union { struct dsa_mall_mirror_tc_entry mirror; struct dsa_mall_policer_tc_entry policer; }; }; struct dsa_bridge { struct net_device *dev; unsigned int num; bool tx_fwd_offload; refcount_t refcount; }; struct dsa_port { /* A CPU port is physically connected to a conduit device. A user port * exposes a network device to user-space, called 'user' here. */ union { struct net_device *conduit; struct net_device *user; }; /* Copy of the tagging protocol operations, for quicker access * in the data path. Valid only for the CPU ports. */ const struct dsa_device_ops *tag_ops; /* Copies for faster access in conduit receive hot path */ struct dsa_switch_tree *dst; struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev); struct dsa_switch *ds; unsigned int index; enum { DSA_PORT_TYPE_UNUSED = 0, DSA_PORT_TYPE_CPU, DSA_PORT_TYPE_DSA, DSA_PORT_TYPE_USER, } type; const char *name; struct dsa_port *cpu_dp; u8 mac[ETH_ALEN]; u8 stp_state; /* Warning: the following bit fields are not atomic, and updating them * can only be done from code paths where concurrency is not possible * (probe time or under rtnl_lock). */ u8 vlan_filtering:1; /* Managed by DSA on user ports and by drivers on CPU and DSA ports */ u8 learning:1; u8 lag_tx_enabled:1; /* conduit state bits, valid only on CPU ports */ u8 conduit_admin_up:1; u8 conduit_oper_up:1; /* Valid only on user ports */ u8 cpu_port_in_lag:1; u8 setup:1; struct device_node *dn; unsigned int ageing_time; struct dsa_bridge *bridge; struct devlink_port devlink_port; struct phylink *pl; struct phylink_config pl_config; struct dsa_lag *lag; struct net_device *hsr_dev; struct list_head list; /* * Original copy of the conduit netdev ethtool_ops */ const struct ethtool_ops *orig_ethtool_ops; /* List of MAC addresses that must be forwarded on this port. * These are only valid on CPU ports and DSA links. */ struct mutex addr_lists_lock; struct list_head fdbs; struct list_head mdbs; struct mutex vlans_lock; union { /* List of VLANs that CPU and DSA ports are members of. * Access to this is serialized by the sleepable @vlans_lock. */ struct list_head vlans; /* List of VLANs that user ports are members of. * Access to this is serialized by netif_addr_lock_bh(). */ struct list_head user_vlans; }; }; /* TODO: ideally DSA ports would have a single dp->link_dp member, * and no dst->rtable nor this struct dsa_link would be needed, * but this would require some more complex tree walking, * so keep it stupid at the moment and list them all. */ struct dsa_link { struct dsa_port *dp; struct dsa_port *link_dp; struct list_head list; }; enum dsa_db_type { DSA_DB_PORT, DSA_DB_LAG, DSA_DB_BRIDGE, }; struct dsa_db { enum dsa_db_type type; union { const struct dsa_port *dp; struct dsa_lag lag; struct dsa_bridge bridge; }; }; struct dsa_mac_addr { unsigned char addr[ETH_ALEN]; u16 vid; refcount_t refcount; struct list_head list; struct dsa_db db; }; struct dsa_vlan { u16 vid; refcount_t refcount; struct list_head list; }; struct dsa_switch { struct device *dev; /* * Parent switch tree, and switch index. */ struct dsa_switch_tree *dst; unsigned int index; /* Warning: the following bit fields are not atomic, and updating them * can only be done from code paths where concurrency is not possible * (probe time or under rtnl_lock). */ u32 setup:1; /* Disallow bridge core from requesting different VLAN awareness * settings on ports if not hardware-supported */ u32 vlan_filtering_is_global:1; /* Keep VLAN filtering enabled on ports not offloading any upper */ u32 needs_standalone_vlan_filtering:1; /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges * that have vlan_filtering=0. All drivers should ideally set this (and * then the option would get removed), but it is unknown whether this * would break things or not. */ u32 configure_vlan_while_not_filtering:1; /* If the switch driver always programs the CPU port as egress tagged * despite the VLAN configuration indicating otherwise, then setting * @untag_bridge_pvid will force the DSA receive path to pop the * bridge's default_pvid VLAN tagged frames to offer a consistent * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge * device. */ u32 untag_bridge_pvid:1; /* Let DSA manage the FDB entries towards the * CPU, based on the software bridge database. */ u32 assisted_learning_on_cpu_port:1; /* In case vlan_filtering_is_global is set, the VLAN awareness state * should be retrieved from here and not from the per-port settings. */ u32 vlan_filtering:1; /* For switches that only have the MRU configurable. To ensure the * configured MTU is not exceeded, normalization of MRU on all bridged * interfaces is needed. */ u32 mtu_enforcement_ingress:1; /* Drivers that isolate the FDBs of multiple bridges must set this * to true to receive the bridge as an argument in .port_fdb_{add,del} * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be * passed as zero. */ u32 fdb_isolation:1; /* Listener for switch fabric events */ struct notifier_block nb; /* * Give the switch driver somewhere to hang its private data * structure. */ void *priv; void *tagger_data; /* * Configuration data for this switch. */ struct dsa_chip_data *cd; /* * The switch operations. */ const struct dsa_switch_ops *ops; /* * User mii_bus and devices for the individual ports. */ u32 phys_mii_mask; struct mii_bus *user_mii_bus; /* Ageing Time limits in msecs */ unsigned int ageing_time_min; unsigned int ageing_time_max; /* Storage for drivers using tag_8021q */ struct dsa_8021q_context *tag_8021q_ctx; /* devlink used to represent this switch device */ struct devlink *devlink; /* Number of switch port queues */ unsigned int num_tx_queues; /* Drivers that benefit from having an ID associated with each * offloaded LAG should set this to the maximum number of * supported IDs. DSA will then maintain a mapping of _at * least_ these many IDs, accessible to drivers via * dsa_lag_id(). */ unsigned int num_lag_ids; /* Drivers that support bridge forwarding offload or FDB isolation * should set this to the maximum number of bridges spanning the same * switch tree (or all trees, in the case of cross-tree bridging * support) that can be offloaded. */ unsigned int max_num_bridges; unsigned int num_ports; }; static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p) { struct dsa_switch_tree *dst = ds->dst; struct dsa_port *dp; list_for_each_entry(dp, &dst->ports, list) if (dp->ds == ds && dp->index == p) return dp; return NULL; } static inline bool dsa_port_is_dsa(struct dsa_port *port) { return port->type == DSA_PORT_TYPE_DSA; } static inline bool dsa_port_is_cpu(struct dsa_port *port) { return port->type == DSA_PORT_TYPE_CPU; } static inline bool dsa_port_is_user(struct dsa_port *dp) { return dp->type == DSA_PORT_TYPE_USER; } static inline bool dsa_port_is_unused(struct dsa_port *dp) { return dp->type == DSA_PORT_TYPE_UNUSED; } static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp) { return dsa_port_is_cpu(dp) && dp->conduit_admin_up && dp->conduit_oper_up; } static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; } static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU; } static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA; } static inline bool dsa_is_user_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER; } #define dsa_tree_for_each_user_port(_dp, _dst) \ list_for_each_entry((_dp), &(_dst)->ports, list) \ if (dsa_port_is_user((_dp))) #define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \ list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \ if (dsa_port_is_user((_dp))) #define dsa_tree_for_each_cpu_port(_dp, _dst) \ list_for_each_entry((_dp), &(_dst)->ports, list) \ if (dsa_port_is_cpu((_dp))) #define dsa_switch_for_each_port(_dp, _ds) \ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ if ((_dp)->ds == (_ds)) #define dsa_switch_for_each_port_safe(_dp, _next, _ds) \ list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \ if ((_dp)->ds == (_ds)) #define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \ list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \ if ((_dp)->ds == (_ds)) #define dsa_switch_for_each_available_port(_dp, _ds) \ dsa_switch_for_each_port((_dp), (_ds)) \ if (!dsa_port_is_unused((_dp))) #define dsa_switch_for_each_user_port(_dp, _ds) \ dsa_switch_for_each_port((_dp), (_ds)) \ if (dsa_port_is_user((_dp))) #define dsa_switch_for_each_cpu_port(_dp, _ds) \ dsa_switch_for_each_port((_dp), (_ds)) \ if (dsa_port_is_cpu((_dp))) #define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \ dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \ if (dsa_port_is_cpu((_dp))) static inline u32 dsa_user_ports(struct dsa_switch *ds) { struct dsa_port *dp; u32 mask = 0; dsa_switch_for_each_user_port(dp, ds) mask |= BIT(dp->index); return mask; } static inline u32 dsa_cpu_ports(struct dsa_switch *ds) { struct dsa_port *cpu_dp; u32 mask = 0; dsa_switch_for_each_cpu_port(cpu_dp, ds) mask |= BIT(cpu_dp->index); return mask; } /* Return the local port used to reach an arbitrary switch device */ static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device) { struct dsa_switch_tree *dst = ds->dst; struct dsa_link *dl; list_for_each_entry(dl, &dst->rtable, list) if (dl->dp->ds == ds && dl->link_dp->ds->index == device) return dl->dp->index; return ds->num_ports; } /* Return the local port used to reach an arbitrary switch port */ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device, int port) { if (device == ds->index) return port; else return dsa_routing_port(ds, device); } /* Return the local port used to reach the dedicated CPU port */ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port) { const struct dsa_port *dp = dsa_to_port(ds, port); const struct dsa_port *cpu_dp = dp->cpu_dp; if (!cpu_dp) return port; return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index); } /* Return true if this is the local port used to reach the CPU port */ static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port) { if (dsa_is_unused_port(ds, port)) return false; return port == dsa_upstream_port(ds, port); } /* Return true if this is a DSA port leading away from the CPU */ static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port) { return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port); } /* Return the local port used to reach the CPU port */ static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds) { struct dsa_port *dp; dsa_switch_for_each_available_port(dp, ds) { return dsa_upstream_port(ds, dp->index); } return ds->num_ports; } /* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning * that the routing port from @downstream_ds to @upstream_ds is also the port * which @downstream_ds uses to reach its dedicated CPU. */ static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds, struct dsa_switch *downstream_ds) { int routing_port; if (upstream_ds == downstream_ds) return true; routing_port = dsa_routing_port(downstream_ds, upstream_ds->index); return dsa_is_upstream_port(downstream_ds, routing_port); } static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) { const struct dsa_switch *ds = dp->ds; if (ds->vlan_filtering_is_global) return ds->vlan_filtering; else return dp->vlan_filtering; } static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp) { return dp->lag ? dp->lag->id : 0; } static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp) { return dp->lag ? dp->lag->dev : NULL; } static inline bool dsa_port_offloads_lag(struct dsa_port *dp, const struct dsa_lag *lag) { return dsa_port_lag_dev_get(dp) == lag->dev; } static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp) { if (dp->cpu_port_in_lag) return dsa_port_lag_dev_get(dp->cpu_dp); return dp->cpu_dp->conduit; } static inline struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) { if (!dp->bridge) return NULL; if (dp->lag) return dp->lag->dev; else if (dp->hsr_dev) return dp->hsr_dev; return dp->user; } static inline struct net_device * dsa_port_bridge_dev_get(const struct dsa_port *dp) { return dp->bridge ? dp->bridge->dev : NULL; } static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp) { return dp->bridge ? dp->bridge->num : 0; } static inline bool dsa_port_bridge_same(const struct dsa_port *a, const struct dsa_port *b) { struct net_device *br_a = dsa_port_bridge_dev_get(a); struct net_device *br_b = dsa_port_bridge_dev_get(b); /* Standalone ports are not in the same bridge with one another */ return (!br_a || !br_b) ? false : (br_a == br_b); } static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp, const struct net_device *dev) { return dsa_port_to_bridge_port(dp) == dev; } static inline bool dsa_port_offloads_bridge_dev(struct dsa_port *dp, const struct net_device *bridge_dev) { /* DSA ports connected to a bridge, and event was emitted * for the bridge. */ return dsa_port_bridge_dev_get(dp) == bridge_dev; } static inline bool dsa_port_offloads_bridge(struct dsa_port *dp, const struct dsa_bridge *bridge) { return dsa_port_bridge_dev_get(dp) == bridge->dev; } /* Returns true if any port of this tree offloads the given net_device */ static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst, const struct net_device *dev) { struct dsa_port *dp; list_for_each_entry(dp, &dst->ports, list) if (dsa_port_offloads_bridge_port(dp, dev)) return true; return false; } /* Returns true if any port of this tree offloads the given bridge */ static inline bool dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst, const struct net_device *bridge_dev) { struct dsa_port *dp; list_for_each_entry(dp, &dst->ports, list) if (dsa_port_offloads_bridge_dev(dp, bridge_dev)) return true; return false; } static inline bool dsa_port_tree_same(const struct dsa_port *a, const struct dsa_port *b) { return a->ds->dst == b->ds->dst; } typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, bool is_static, void *data); struct dsa_switch_ops { /* * Tagging protocol helpers called for the CPU ports and DSA links. * @get_tag_protocol retrieves the initial tagging protocol and is * mandatory. Switches which can operate using multiple tagging * protocols should implement @change_tag_protocol and report in * @get_tag_protocol the tagger in current use. */ enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds, int port, enum dsa_tag_protocol mprot); int (*change_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); /* * Method for switch drivers to connect to the tagging protocol driver * in current use. The switch driver can provide handlers for certain * types of packets for switch management. */ int (*connect_tag_protocol)(struct dsa_switch *ds, enum dsa_tag_protocol proto); int (*port_change_conduit)(struct dsa_switch *ds, int port, struct net_device *conduit, struct netlink_ext_ack *extack); /* Optional switch-wide initialization and destruction methods */ int (*setup)(struct dsa_switch *ds); void (*teardown)(struct dsa_switch *ds); /* Per-port initialization and destruction methods. Mandatory if the * driver registers devlink port regions, optional otherwise. */ int (*port_setup)(struct dsa_switch *ds, int port); void (*port_teardown)(struct dsa_switch *ds, int port); u32 (*get_phy_flags)(struct dsa_switch *ds, int port); /* * Access to the switch's PHY registers. */ int (*phy_read)(struct dsa_switch *ds, int port, int regnum); int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val); /* * Link state adjustment (called from libphy) */ void (*adjust_link)(struct dsa_switch *ds, int port, struct phy_device *phydev); void (*fixed_link_update)(struct dsa_switch *ds, int port, struct fixed_phy_status *st); /* * PHYLINK integration */ void (*phylink_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, int port, phy_interface_t iface); int (*phylink_mac_prepare)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); void (*phylink_mac_config)(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state); int (*phylink_mac_finish)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); void (*phylink_mac_link_up)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface, struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause); void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* * Port statistics counters. */ void (*get_strings)(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data); void (*get_ethtool_stats)(struct dsa_switch *ds, int port, uint64_t *data); int (*get_sset_count)(struct dsa_switch *ds, int port, int sset); void (*get_ethtool_phy_stats)(struct dsa_switch *ds, int port, uint64_t *data); void (*get_eth_phy_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_phy_stats *phy_stats); void (*get_eth_mac_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_mac_stats *mac_stats); void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_ctrl_stats *ctrl_stats); void (*get_rmon_stats)(struct dsa_switch *ds, int port, struct ethtool_rmon_stats *rmon_stats, const struct ethtool_rmon_hist_range **ranges); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); void (*get_pause_stats)(struct dsa_switch *ds, int port, struct ethtool_pause_stats *pause_stats); void (*self_test)(struct dsa_switch *ds, int port, struct ethtool_test *etest, u64 *data); /* * ethtool Wake-on-LAN */ void (*get_wol)(struct dsa_switch *ds, int port, struct ethtool_wolinfo *w); int (*set_wol)(struct dsa_switch *ds, int port, struct ethtool_wolinfo *w); /* * ethtool timestamp info */ int (*get_ts_info)(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts); /* * ethtool MAC merge layer */ int (*get_mm)(struct dsa_switch *ds, int port, struct ethtool_mm_state *state); int (*set_mm)(struct dsa_switch *ds, int port, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack); void (*get_mm_stats)(struct dsa_switch *ds, int port, struct ethtool_mm_stats *stats); /* * DCB ops */ int (*port_get_default_prio)(struct dsa_switch *ds, int port); int (*port_set_default_prio)(struct dsa_switch *ds, int port, u8 prio); int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp); int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp, u8 prio); int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp, u8 prio); /* * Suspend and resume */ int (*suspend)(struct dsa_switch *ds); int (*resume)(struct dsa_switch *ds); /* * Port enable/disable */ int (*port_enable)(struct dsa_switch *ds, int port, struct phy_device *phy); void (*port_disable)(struct dsa_switch *ds, int port); /* * Notification for MAC address changes on user ports. Drivers can * currently only veto operations. They should not use the method to * program the hardware, since the operation is not rolled back in case * of other errors. */ int (*port_set_mac_address)(struct dsa_switch *ds, int port, const unsigned char *addr); /* * Compatibility between device trees defining multiple CPU ports and * drivers which are not OK to use by default the numerically smallest * CPU port of a switch for its local ports. This can return NULL, * meaning "don't know/don't care". */ struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds); /* * Port's MAC EEE settings */ int (*set_mac_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); int (*get_mac_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); /* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); int (*get_eeprom)(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data); int (*set_eeprom)(struct dsa_switch *ds, struct ethtool_eeprom *eeprom, u8 *data); /* * Register access. */ int (*get_regs_len)(struct dsa_switch *ds, int port); void (*get_regs)(struct dsa_switch *ds, int port, struct ethtool_regs *regs, void *p); /* * Upper device tracking. */ int (*port_prechangeupper)(struct dsa_switch *ds, int port, struct netdev_notifier_changeupper_info *info); /* * Bridge integration */ int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack); void (*port_bridge_leave)(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); int (*port_mst_state_set)(struct dsa_switch *ds, int port, const struct switchdev_mst_state *state); void (*port_fast_age)(struct dsa_switch *ds, int port); int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid); int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); int (*port_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); void (*port_set_host_flood)(struct dsa_switch *ds, int port, bool uc, bool mc); /* * VLAN support */ int (*port_vlan_filtering)(struct dsa_switch *ds, int port, bool vlan_filtering, struct netlink_ext_ack *extack); int (*port_vlan_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge, const struct switchdev_vlan_msti *msti); /* * Forwarding database */ int (*port_fdb_add)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db); int (*port_fdb_del)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db); int (*port_fdb_dump)(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag, const unsigned char *addr, u16 vid, struct dsa_db db); int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag, const unsigned char *addr, u16 vid, struct dsa_db db); /* * Multicast database */ int (*port_mdb_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db); int (*port_mdb_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db); /* * RXNFC */ int (*get_rxnfc)(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc, u32 *rule_locs); int (*set_rxnfc)(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc); /* * TC integration */ int (*cls_flower_add)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*cls_flower_del)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*cls_flower_stats)(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress); int (*port_mirror_add)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress, struct netlink_ext_ack *extack); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); int (*port_policer_add)(struct dsa_switch *ds, int port, struct dsa_mall_policer_tc_entry *policer); void (*port_policer_del)(struct dsa_switch *ds, int port); int (*port_setup_tc)(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data); /* * Cross-chip operations */ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct dsa_bridge bridge, struct netlink_ext_ack *extack); void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct dsa_bridge bridge); int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, int port); int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info, struct netlink_ext_ack *extack); int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index, int port, struct dsa_lag lag); /* * PTP functionality */ int (*port_hwtstamp_get)(struct dsa_switch *ds, int port, struct ifreq *ifr); int (*port_hwtstamp_set)(struct dsa_switch *ds, int port, struct ifreq *ifr); void (*port_txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb); bool (*port_rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb, unsigned int type); /* Devlink parameters, etc */ int (*devlink_param_get)(struct dsa_switch *ds, u32 id, struct devlink_param_gset_ctx *ctx); int (*devlink_param_set)(struct dsa_switch *ds, u32 id, struct devlink_param_gset_ctx *ctx); int (*devlink_info_get)(struct dsa_switch *ds, struct devlink_info_req *req, struct netlink_ext_ack *extack); int (*devlink_sb_pool_get)(struct dsa_switch *ds, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info); int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index, u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type, struct netlink_ext_ack *extack); int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 *p_threshold); int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack); int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 *p_pool_index, u32 *p_threshold); int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack); int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds, unsigned int sb_index); int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds, unsigned int sb_index); int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 *p_cur, u32 *p_max); int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u32 *p_cur, u32 *p_max); /* * MTU change functionality. Switches can also adjust their MRU through * this method. By MTU, one understands the SDU (L2 payload) length. * If the switch needs to account for the DSA tag on the CPU port, this * method needs to do so privately. */ int (*port_change_mtu)(struct dsa_switch *ds, int port, int new_mtu); int (*port_max_mtu)(struct dsa_switch *ds, int port); /* * LAG integration */ int (*port_lag_change)(struct dsa_switch *ds, int port); int (*port_lag_join)(struct dsa_switch *ds, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info, struct netlink_ext_ack *extack); int (*port_lag_leave)(struct dsa_switch *ds, int port, struct dsa_lag lag); /* * HSR integration */ int (*port_hsr_join)(struct dsa_switch *ds, int port, struct net_device *hsr, struct netlink_ext_ack *extack); int (*port_hsr_leave)(struct dsa_switch *ds, int port, struct net_device *hsr); /* * MRP integration */ int (*port_mrp_add)(struct dsa_switch *ds, int port, const struct switchdev_obj_mrp *mrp); int (*port_mrp_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_mrp *mrp); int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port, const struct switchdev_obj_ring_role_mrp *mrp); int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port, const struct switchdev_obj_ring_role_mrp *mrp); /* * tag_8021q operations */ int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags); int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); /* * DSA conduit tracking operations */ void (*conduit_state_change)(struct dsa_switch *ds, const struct net_device *conduit, bool operational); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \ dsa_devlink_param_get, dsa_devlink_param_set, NULL) int dsa_devlink_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx); int dsa_devlink_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx); int dsa_devlink_params_register(struct dsa_switch *ds, const struct devlink_param *params, size_t params_count); void dsa_devlink_params_unregister(struct dsa_switch *ds, const struct devlink_param *params, size_t params_count); int dsa_devlink_resource_register(struct dsa_switch *ds, const char *resource_name, u64 resource_size, u64 resource_id, u64 parent_resource_id, const struct devlink_resource_size_params *size_params); void dsa_devlink_resources_unregister(struct dsa_switch *ds); void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds, u64 resource_id, devlink_resource_occ_get_t *occ_get, void *occ_get_priv); void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds, u64 resource_id); struct devlink_region * dsa_devlink_region_create(struct dsa_switch *ds, const struct devlink_region_ops *ops, u32 region_max_snapshots, u64 region_size); struct devlink_region * dsa_devlink_port_region_create(struct dsa_switch *ds, int port, const struct devlink_port_region_ops *ops, u32 region_max_snapshots, u64 region_size); void dsa_devlink_region_destroy(struct devlink_region *region); struct dsa_port *dsa_port_from_netdev(struct net_device *netdev); struct dsa_devlink_priv { struct dsa_switch *ds; }; static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl) { struct dsa_devlink_priv *dl_priv = devlink_priv(dl); return dl_priv->ds; } static inline struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port) { struct devlink *dl = port->devlink; struct dsa_devlink_priv *dl_priv = devlink_priv(dl); return dl_priv->ds; } static inline int dsa_devlink_port_to_port(struct devlink_port *port) { return port->index; } struct dsa_switch_driver { struct list_head list; const struct dsa_switch_ops *ops; }; bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db); bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db); /* Keep inline for faster access in hot path */ static inline bool netdev_uses_dsa(const struct net_device *dev) { #if IS_ENABLED(CONFIG_NET_DSA) return dev->dsa_ptr && dev->dsa_ptr->rcv; #endif return false; } /* All DSA tags that push the EtherType to the right (basically all except tail * tags, which don't break dissection) can be treated the same from the * perspective of the flow dissector. * * We need to return: * - offset: the (B - A) difference between: * A. the position of the real EtherType and * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes * after the normal EtherType was supposed to be) * The offset in bytes is exactly equal to the tagger overhead (and half of * that, in __be16 shorts). * * - proto: the value of the real EtherType. */ static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb, __be16 *proto, int *offset) { #if IS_ENABLED(CONFIG_NET_DSA) const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops; int tag_len = ops->needed_headroom; *offset = tag_len; *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1]; #endif } void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds); void dsa_switch_shutdown(struct dsa_switch *ds); struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); void dsa_flush_workqueue(void); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); #else static inline int dsa_switch_suspend(struct dsa_switch *ds) { return 0; } static inline int dsa_switch_resume(struct dsa_switch *ds) { return 0; } #endif /* CONFIG_PM_SLEEP */ #if IS_ENABLED(CONFIG_NET_DSA) bool dsa_user_dev_check(const struct net_device *dev); #else static inline bool dsa_user_dev_check(const struct net_device *dev) { return false; } #endif netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev); void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up); #endif |
1228 1226 415 1228 1124 1121 1124 29 1124 304 1124 1124 1124 81 3 104 304 2 3 86 6 68 33 13 10 7 6 7 5 5 11 10 1 10 81 2 19 3 23 19 31 20 63 10 10 10 10 6 3 4 1 1 2 6 8 6 17 17 17 2 1 1 2 2 12 12 13 4 11 15 1216 1083 1122 1123 1083 1124 1217 1217 1176 2 1077 1078 1036 1079 1124 1112 1078 1122 1123 415 415 415 415 415 1 1111 415 1123 1112 1123 1124 1124 1123 1123 1 304 1 302 1216 1215 304 303 304 304 415 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 | // SPDX-License-Identifier: GPL-2.0 /* * /proc/sys support */ #include <linux/init.h> #include <linux/sysctl.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/printk.h> #include <linux/security.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/namei.h> #include <linux/mm.h> #include <linux/uio.h> #include <linux/module.h> #include <linux/bpf-cgroup.h> #include <linux/mount.h> #include <linux/kmemleak.h> #include "internal.h" #define list_for_each_table_entry(entry, header) \ entry = header->ctl_table; \ for (size_t i = 0 ; i < header->ctl_table_size && entry->procname; ++i, entry++) static const struct dentry_operations proc_sys_dentry_operations; static const struct file_operations proc_sys_file_operations; static const struct inode_operations proc_sys_inode_operations; static const struct file_operations proc_sys_dir_file_operations; static const struct inode_operations proc_sys_dir_operations; /* Support for permanently empty directories */ static struct ctl_table sysctl_mount_point[] = { {.type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY } }; /** * register_sysctl_mount_point() - registers a sysctl mount point * @path: path for the mount point * * Used to create a permanently empty directory to serve as mount point. * There are some subtle but important permission checks this allows in the * case of unprivileged mounts. */ struct ctl_table_header *register_sysctl_mount_point(const char *path) { return register_sysctl_sz(path, sysctl_mount_point, 0); } EXPORT_SYMBOL(register_sysctl_mount_point); #define sysctl_is_perm_empty_ctl_table(tptr) \ (tptr[0].type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) #define sysctl_is_perm_empty_ctl_header(hptr) \ (sysctl_is_perm_empty_ctl_table(hptr->ctl_table)) #define sysctl_set_perm_empty_ctl_header(hptr) \ (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) #define sysctl_clear_perm_empty_ctl_header(hptr) \ (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_DEFAULT) void proc_sys_poll_notify(struct ctl_table_poll *poll) { if (!poll) return; atomic_inc(&poll->event); wake_up_interruptible(&poll->wait); } static struct ctl_table root_table[] = { { .procname = "", .mode = S_IFDIR|S_IRUGO|S_IXUGO, }, { } }; static struct ctl_table_root sysctl_table_root = { .default_set.dir.header = { {{.count = 1, .nreg = 1, .ctl_table = root_table }}, .ctl_table_arg = root_table, .root = &sysctl_table_root, .set = &sysctl_table_root.default_set, }, }; static DEFINE_SPINLOCK(sysctl_lock); static void drop_sysctl_table(struct ctl_table_header *header); static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry); static int insert_links(struct ctl_table_header *head); static void put_links(struct ctl_table_header *header); static void sysctl_print_dir(struct ctl_dir *dir) { if (dir->header.parent) sysctl_print_dir(dir->header.parent); pr_cont("%s/", dir->header.ctl_table[0].procname); } static int namecmp(const char *name1, int len1, const char *name2, int len2) { int cmp; cmp = memcmp(name1, name2, min(len1, len2)); if (cmp == 0) cmp = len1 - len2; return cmp; } /* Called under sysctl_lock */ static struct ctl_table *find_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; struct rb_node *node = dir->root.rb_node; while (node) { struct ctl_node *ctl_node; const char *procname; int cmp; ctl_node = rb_entry(node, struct ctl_node, node); head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; procname = entry->procname; cmp = namecmp(name, namelen, procname, strlen(procname)); if (cmp < 0) node = node->rb_left; else if (cmp > 0) node = node->rb_right; else { *phead = head; return entry; } } return NULL; } static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; struct rb_node **p = &head->parent->root.rb_node; struct rb_node *parent = NULL; const char *name = entry->procname; int namelen = strlen(name); while (*p) { struct ctl_table_header *parent_head; struct ctl_table *parent_entry; struct ctl_node *parent_node; const char *parent_name; int cmp; parent = *p; parent_node = rb_entry(parent, struct ctl_node, node); parent_head = parent_node->header; parent_entry = &parent_head->ctl_table[parent_node - parent_head->node]; parent_name = parent_entry->procname; cmp = namecmp(name, namelen, parent_name, strlen(parent_name)); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) p = &(*p)->rb_right; else { pr_err("sysctl duplicate entry: "); sysctl_print_dir(head->parent); pr_cont("%s\n", entry->procname); return -EEXIST; } } rb_link_node(node, parent, p); rb_insert_color(node, &head->parent->root); return 0; } static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) { struct rb_node *node = &head->node[entry - head->ctl_table].node; rb_erase(node, &head->parent->root); } static void init_header(struct ctl_table_header *head, struct ctl_table_root *root, struct ctl_table_set *set, struct ctl_node *node, struct ctl_table *table, size_t table_size) { head->ctl_table = table; head->ctl_table_size = table_size; head->ctl_table_arg = table; head->used = 0; head->count = 1; head->nreg = 1; head->unregistering = NULL; head->root = root; head->set = set; head->parent = NULL; head->node = node; INIT_HLIST_HEAD(&head->inodes); if (node) { struct ctl_table *entry; list_for_each_table_entry(entry, head) { node->header = head; node++; } } } static void erase_header(struct ctl_table_header *head) { struct ctl_table *entry; list_for_each_table_entry(entry, head) erase_entry(head, entry); } static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header) { struct ctl_table *entry; struct ctl_table_header *dir_h = &dir->header; int err; /* Is this a permanently empty directory? */ if (sysctl_is_perm_empty_ctl_header(dir_h)) return -EROFS; /* Am I creating a permanently empty directory? */ if (sysctl_is_perm_empty_ctl_table(header->ctl_table)) { if (!RB_EMPTY_ROOT(&dir->root)) return -EINVAL; sysctl_set_perm_empty_ctl_header(dir_h); } dir_h->nreg++; header->parent = dir; err = insert_links(header); if (err) goto fail_links; list_for_each_table_entry(entry, header) { err = insert_entry(header, entry); if (err) goto fail; } return 0; fail: erase_header(header); put_links(header); fail_links: if (header->ctl_table == sysctl_mount_point) sysctl_clear_perm_empty_ctl_header(dir_h); header->parent = NULL; drop_sysctl_table(dir_h); return err; } /* called under sysctl_lock */ static int use_table(struct ctl_table_header *p) { if (unlikely(p->unregistering)) return 0; p->used++; return 1; } /* called under sysctl_lock */ static void unuse_table(struct ctl_table_header *p) { if (!--p->used) if (unlikely(p->unregistering)) complete(p->unregistering); } static void proc_sys_invalidate_dcache(struct ctl_table_header *head) { proc_invalidate_siblings_dcache(&head->inodes, &sysctl_lock); } /* called under sysctl_lock, will reacquire if has to wait */ static void start_unregistering(struct ctl_table_header *p) { /* * if p->used is 0, nobody will ever touch that entry again; * we'll eliminate all paths to it before dropping sysctl_lock */ if (unlikely(p->used)) { struct completion wait; init_completion(&wait); p->unregistering = &wait; spin_unlock(&sysctl_lock); wait_for_completion(&wait); } else { /* anything non-NULL; we'll never dereference it */ p->unregistering = ERR_PTR(-EINVAL); spin_unlock(&sysctl_lock); } /* * Invalidate dentries for unregistered sysctls: namespaced sysctls * can have duplicate names and contaminate dcache very badly. */ proc_sys_invalidate_dcache(p); /* * do not remove from the list until nobody holds it; walking the * list in do_sysctl() relies on that. */ spin_lock(&sysctl_lock); erase_header(p); } static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head) { BUG_ON(!head); spin_lock(&sysctl_lock); if (!use_table(head)) head = ERR_PTR(-ENOENT); spin_unlock(&sysctl_lock); return head; } static void sysctl_head_finish(struct ctl_table_header *head) { if (!head) return; spin_lock(&sysctl_lock); unuse_table(head); spin_unlock(&sysctl_lock); } static struct ctl_table_set * lookup_header_set(struct ctl_table_root *root) { struct ctl_table_set *set = &root->default_set; if (root->lookup) set = root->lookup(root); return set; } static struct ctl_table *lookup_entry(struct ctl_table_header **phead, struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; spin_lock(&sysctl_lock); entry = find_entry(&head, dir, name, namelen); if (entry && use_table(head)) *phead = head; else entry = NULL; spin_unlock(&sysctl_lock); return entry; } static struct ctl_node *first_usable_entry(struct rb_node *node) { struct ctl_node *ctl_node; for (;node; node = rb_next(node)) { ctl_node = rb_entry(node, struct ctl_node, node); if (use_table(ctl_node->header)) return ctl_node; } return NULL; } static void first_entry(struct ctl_dir *dir, struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = NULL; struct ctl_table *entry = NULL; struct ctl_node *ctl_node; spin_lock(&sysctl_lock); ctl_node = first_usable_entry(rb_first(&dir->root)); spin_unlock(&sysctl_lock); if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head = *phead; struct ctl_table *entry = *pentry; struct ctl_node *ctl_node = &head->node[entry - head->ctl_table]; spin_lock(&sysctl_lock); unuse_table(head); ctl_node = first_usable_entry(rb_next(&ctl_node->node)); spin_unlock(&sysctl_lock); head = NULL; if (ctl_node) { head = ctl_node->header; entry = &head->ctl_table[ctl_node - head->node]; } *phead = head; *pentry = entry; } /* * sysctl_perm does NOT grant the superuser all rights automatically, because * some sysctl variables are readonly even to root. */ static int test_perm(int mode, int op) { if (uid_eq(current_euid(), GLOBAL_ROOT_UID)) mode >>= 6; else if (in_egroup_p(GLOBAL_ROOT_GID)) mode >>= 3; if ((op & ~mode & (MAY_READ|MAY_WRITE|MAY_EXEC)) == 0) return 0; return -EACCES; } static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op) { struct ctl_table_root *root = head->root; int mode; if (root->permissions) mode = root->permissions(head, table); else mode = table->mode; return test_perm(mode, op); } static struct inode *proc_sys_make_inode(struct super_block *sb, struct ctl_table_header *head, struct ctl_table *table) { struct ctl_table_root *root = head->root; struct inode *inode; struct proc_inode *ei; inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); inode->i_ino = get_next_ino(); ei = PROC_I(inode); spin_lock(&sysctl_lock); if (unlikely(head->unregistering)) { spin_unlock(&sysctl_lock); iput(inode); return ERR_PTR(-ENOENT); } ei->sysctl = head; ei->sysctl_entry = table; hlist_add_head_rcu(&ei->sibling_inodes, &head->inodes); head->count++; spin_unlock(&sysctl_lock); simple_inode_init_ts(inode); inode->i_mode = table->mode; if (!S_ISDIR(table->mode)) { inode->i_mode |= S_IFREG; inode->i_op = &proc_sys_inode_operations; inode->i_fop = &proc_sys_file_operations; } else { inode->i_mode |= S_IFDIR; inode->i_op = &proc_sys_dir_operations; inode->i_fop = &proc_sys_dir_file_operations; if (sysctl_is_perm_empty_ctl_header(head)) make_empty_dir_inode(inode); } if (root->set_ownership) root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; } return inode; } void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head) { spin_lock(&sysctl_lock); hlist_del_init_rcu(&PROC_I(inode)->sibling_inodes); if (!--head->count) kfree_rcu(head, rcu); spin_unlock(&sysctl_lock); } static struct ctl_table_header *grab_header(struct inode *inode) { struct ctl_table_header *head = PROC_I(inode)->sysctl; if (!head) head = &sysctl_table_root.default_set.dir.header; return sysctl_head_grab(head); } static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ctl_table_header *head = grab_header(dir); struct ctl_table_header *h = NULL; const struct qstr *name = &dentry->d_name; struct ctl_table *p; struct inode *inode; struct dentry *err = ERR_PTR(-ENOENT); struct ctl_dir *ctl_dir; int ret; if (IS_ERR(head)) return ERR_CAST(head); ctl_dir = container_of(head, struct ctl_dir, header); p = lookup_entry(&h, ctl_dir, name->name, name->len); if (!p) goto out; if (S_ISLNK(p->mode)) { ret = sysctl_follow_link(&h, &p); err = ERR_PTR(ret); if (ret) goto out; } inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); if (IS_ERR(inode)) { err = ERR_CAST(inode); goto out; } d_set_d_op(dentry, &proc_sys_dentry_operations); err = d_splice_alias(inode, dentry); out: if (h) sysctl_head_finish(h); sysctl_head_finish(head); return err; } static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter, int write) { struct inode *inode = file_inode(iocb->ki_filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; size_t count = iov_iter_count(iter); char *kbuf; ssize_t error; if (IS_ERR(head)) return PTR_ERR(head); /* * At this point we know that the sysctl was not unregistered * and won't be until we finish. */ error = -EPERM; if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ)) goto out; /* if that can happen at all, it should be -EINVAL, not -EISDIR */ error = -EINVAL; if (!table->proc_handler) goto out; /* don't even try if the size is too large */ error = -ENOMEM; if (count >= KMALLOC_MAX_SIZE) goto out; kbuf = kvzalloc(count + 1, GFP_KERNEL); if (!kbuf) goto out; if (write) { error = -EFAULT; if (!copy_from_iter_full(kbuf, count, iter)) goto out_free_buf; kbuf[count] = '\0'; } error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count, &iocb->ki_pos); if (error) goto out_free_buf; /* careful: calling conventions are nasty here */ error = table->proc_handler(table, write, kbuf, &count, &iocb->ki_pos); if (error) goto out_free_buf; if (!write) { error = -EFAULT; if (copy_to_iter(kbuf, count, iter) < count) goto out_free_buf; } error = count; out_free_buf: kvfree(kbuf); out: sysctl_head_finish(head); return error; } static ssize_t proc_sys_read(struct kiocb *iocb, struct iov_iter *iter) { return proc_sys_call_handler(iocb, iter, 0); } static ssize_t proc_sys_write(struct kiocb *iocb, struct iov_iter *iter) { return proc_sys_call_handler(iocb, iter, 1); } static int proc_sys_open(struct inode *inode, struct file *filp) { struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; /* sysctl was unregistered */ if (IS_ERR(head)) return PTR_ERR(head); if (table->poll) filp->private_data = proc_sys_poll_event(table->poll); sysctl_head_finish(head); return 0; } static __poll_t proc_sys_poll(struct file *filp, poll_table *wait) { struct inode *inode = file_inode(filp); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; __poll_t ret = DEFAULT_POLLMASK; unsigned long event; /* sysctl was unregistered */ if (IS_ERR(head)) return EPOLLERR | EPOLLHUP; if (!table->proc_handler) goto out; if (!table->poll) goto out; event = (unsigned long)filp->private_data; poll_wait(filp, &table->poll->wait, wait); if (event != atomic_read(&table->poll->event)) { filp->private_data = proc_sys_poll_event(table->poll); ret = EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; } out: sysctl_head_finish(head); return ret; } static bool proc_sys_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { struct dentry *child, *dir = file->f_path.dentry; struct inode *inode; struct qstr qname; ino_t ino = 0; unsigned type = DT_UNKNOWN; qname.name = table->procname; qname.len = strlen(table->procname); qname.hash = full_name_hash(dir, qname.name, qname.len); child = d_lookup(dir, &qname); if (!child) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) return false; if (d_in_lookup(child)) { struct dentry *res; inode = proc_sys_make_inode(dir->d_sb, head, table); if (IS_ERR(inode)) { d_lookup_done(child); dput(child); return false; } d_set_d_op(child, &proc_sys_dentry_operations); res = d_splice_alias(inode, child); d_lookup_done(child); if (unlikely(res)) { if (IS_ERR(res)) { dput(child); return false; } dput(child); child = res; } } } inode = d_inode(child); ino = inode->i_ino; type = inode->i_mode >> 12; dput(child); return dir_emit(ctx, qname.name, qname.len, ino, type); } static bool proc_sys_link_fill_cache(struct file *file, struct dir_context *ctx, struct ctl_table_header *head, struct ctl_table *table) { bool ret = true; head = sysctl_head_grab(head); if (IS_ERR(head)) return false; /* It is not an error if we can not follow the link ignore it */ if (sysctl_follow_link(&head, &table)) goto out; ret = proc_sys_fill_cache(file, ctx, head, table); out: sysctl_head_finish(head); return ret; } static int scan(struct ctl_table_header *head, struct ctl_table *table, unsigned long *pos, struct file *file, struct dir_context *ctx) { bool res; if ((*pos)++ < ctx->pos) return true; if (unlikely(S_ISLNK(table->mode))) res = proc_sys_link_fill_cache(file, ctx, head, table); else res = proc_sys_fill_cache(file, ctx, head, table); if (res) ctx->pos = *pos; return res; } static int proc_sys_readdir(struct file *file, struct dir_context *ctx) { struct ctl_table_header *head = grab_header(file_inode(file)); struct ctl_table_header *h = NULL; struct ctl_table *entry; struct ctl_dir *ctl_dir; unsigned long pos; if (IS_ERR(head)) return PTR_ERR(head); ctl_dir = container_of(head, struct ctl_dir, header); if (!dir_emit_dots(file, ctx)) goto out; pos = 2; for (first_entry(ctl_dir, &h, &entry); h; next_entry(&h, &entry)) { if (!scan(h, entry, &pos, file, ctx)) { sysctl_head_finish(h); break; } } out: sysctl_head_finish(head); return 0; } static int proc_sys_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { /* * sysctl entries that are not writeable, * are _NOT_ writeable, capabilities or not. */ struct ctl_table_header *head; struct ctl_table *table; int error; /* Executable files are not allowed under /proc/sys/ */ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) return -EACCES; head = grab_header(inode); if (IS_ERR(head)) return PTR_ERR(head); table = PROC_I(inode)->sysctl_entry; if (!table) /* global root - r-xr-xr-x */ error = mask & MAY_WRITE ? -EACCES : 0; else /* Use the permissions on the sysctl table entry */ error = sysctl_perm(head, table, mask & ~MAY_NOT_BLOCK); sysctl_head_finish(head); return error; } static int proc_sys_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int error; if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) return -EPERM; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) return error; setattr_copy(&nop_mnt_idmap, inode, attr); return 0; } static int proc_sys_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct ctl_table_header *head = grab_header(inode); struct ctl_table *table = PROC_I(inode)->sysctl_entry; if (IS_ERR(head)) return PTR_ERR(head); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); if (table) stat->mode = (stat->mode & S_IFMT) | table->mode; sysctl_head_finish(head); return 0; } static const struct file_operations proc_sys_file_operations = { .open = proc_sys_open, .poll = proc_sys_poll, .read_iter = proc_sys_read, .write_iter = proc_sys_write, .splice_read = copy_splice_read, .splice_write = iter_file_splice_write, .llseek = default_llseek, }; static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, .iterate_shared = proc_sys_readdir, .llseek = generic_file_llseek, }; static const struct inode_operations proc_sys_inode_operations = { .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static const struct inode_operations proc_sys_dir_operations = { .lookup = proc_sys_lookup, .permission = proc_sys_permission, .setattr = proc_sys_setattr, .getattr = proc_sys_getattr, }; static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; return !PROC_I(d_inode(dentry))->sysctl->unregistering; } static int proc_sys_delete(const struct dentry *dentry) { return !!PROC_I(d_inode(dentry))->sysctl->unregistering; } static int sysctl_is_seen(struct ctl_table_header *p) { struct ctl_table_set *set = p->set; int res; spin_lock(&sysctl_lock); if (p->unregistering) res = 0; else if (!set->is_seen) res = 1; else res = set->is_seen(set); spin_unlock(&sysctl_lock); return res; } static int proc_sys_compare(const struct dentry *dentry, unsigned int len, const char *str, const struct qstr *name) { struct ctl_table_header *head; struct inode *inode; /* Although proc doesn't have negative dentries, rcu-walk means * that inode here can be NULL */ /* AV: can it, indeed? */ inode = d_inode_rcu(dentry); if (!inode) return 1; if (name->len != len) return 1; if (memcmp(name->name, str, len)) return 1; head = rcu_dereference(PROC_I(inode)->sysctl); return !head || !sysctl_is_seen(head); } static const struct dentry_operations proc_sys_dentry_operations = { .d_revalidate = proc_sys_revalidate, .d_delete = proc_sys_delete, .d_compare = proc_sys_compare, }; static struct ctl_dir *find_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_header *head; struct ctl_table *entry; entry = find_entry(&head, dir, name, namelen); if (!entry) return ERR_PTR(-ENOENT); if (!S_ISDIR(entry->mode)) return ERR_PTR(-ENOTDIR); return container_of(head, struct ctl_dir, header); } static struct ctl_dir *new_dir(struct ctl_table_set *set, const char *name, int namelen) { struct ctl_table *table; struct ctl_dir *new; struct ctl_node *node; char *new_name; new = kzalloc(sizeof(*new) + sizeof(struct ctl_node) + sizeof(struct ctl_table)*2 + namelen + 1, GFP_KERNEL); if (!new) return NULL; node = (struct ctl_node *)(new + 1); table = (struct ctl_table *)(node + 1); new_name = (char *)(table + 2); memcpy(new_name, name, namelen); table[0].procname = new_name; table[0].mode = S_IFDIR|S_IRUGO|S_IXUGO; init_header(&new->header, set->dir.header.root, set, node, table, 1); return new; } /** * get_subdir - find or create a subdir with the specified name. * @dir: Directory to create the subdirectory in * @name: The name of the subdirectory to find or create * @namelen: The length of name * * Takes a directory with an elevated reference count so we know that * if we drop the lock the directory will not go away. Upon success * the reference is moved from @dir to the returned subdirectory. * Upon error an error code is returned and the reference on @dir is * simply dropped. */ static struct ctl_dir *get_subdir(struct ctl_dir *dir, const char *name, int namelen) { struct ctl_table_set *set = dir->header.set; struct ctl_dir *subdir, *new = NULL; int err; spin_lock(&sysctl_lock); subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; spin_unlock(&sysctl_lock); new = new_dir(set, name, namelen); spin_lock(&sysctl_lock); subdir = ERR_PTR(-ENOMEM); if (!new) goto failed; /* Was the subdir added while we dropped the lock? */ subdir = find_subdir(dir, name, namelen); if (!IS_ERR(subdir)) goto found; if (PTR_ERR(subdir) != -ENOENT) goto failed; /* Nope. Use the our freshly made directory entry. */ err = insert_header(dir, &new->header); subdir = ERR_PTR(err); if (err) goto failed; subdir = new; found: subdir->header.nreg++; failed: if (IS_ERR(subdir)) { pr_err("sysctl could not get directory: "); sysctl_print_dir(dir); pr_cont("%*.*s %ld\n", namelen, namelen, name, PTR_ERR(subdir)); } drop_sysctl_table(&dir->header); if (new) drop_sysctl_table(&new->header); spin_unlock(&sysctl_lock); return subdir; } static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir) { struct ctl_dir *parent; const char *procname; if (!dir->header.parent) return &set->dir; parent = xlate_dir(set, dir->header.parent); if (IS_ERR(parent)) return parent; procname = dir->header.ctl_table[0].procname; return find_subdir(parent, procname, strlen(procname)); } static int sysctl_follow_link(struct ctl_table_header **phead, struct ctl_table **pentry) { struct ctl_table_header *head; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_table *entry; struct ctl_dir *dir; int ret; spin_lock(&sysctl_lock); root = (*pentry)->data; set = lookup_header_set(root); dir = xlate_dir(set, (*phead)->parent); if (IS_ERR(dir)) ret = PTR_ERR(dir); else { const char *procname = (*pentry)->procname; head = NULL; entry = find_entry(&head, dir, procname, strlen(procname)); ret = -ENOENT; if (entry && use_table(head)) { unuse_table(*phead); *phead = head; *pentry = entry; ret = 0; } } spin_unlock(&sysctl_lock); return ret; } static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("sysctl table check failed: %s/%s %pV\n", path, table->procname, &vaf); va_end(args); return -EINVAL; } static int sysctl_check_table_array(const char *path, struct ctl_table *table) { int err = 0; if ((table->proc_handler == proc_douintvec) || (table->proc_handler == proc_douintvec_minmax)) { if (table->maxlen != sizeof(unsigned int)) err |= sysctl_err(path, table, "array not allowed"); } if (table->proc_handler == proc_dou8vec_minmax) { if (table->maxlen != sizeof(u8)) err |= sysctl_err(path, table, "array not allowed"); } if (table->proc_handler == proc_dobool) { if (table->maxlen != sizeof(bool)) err |= sysctl_err(path, table, "array not allowed"); } return err; } static int sysctl_check_table(const char *path, struct ctl_table_header *header) { struct ctl_table *entry; int err = 0; list_for_each_table_entry(entry, header) { if ((entry->proc_handler == proc_dostring) || (entry->proc_handler == proc_dobool) || (entry->proc_handler == proc_dointvec) || (entry->proc_handler == proc_douintvec) || (entry->proc_handler == proc_douintvec_minmax) || (entry->proc_handler == proc_dointvec_minmax) || (entry->proc_handler == proc_dou8vec_minmax) || (entry->proc_handler == proc_dointvec_jiffies) || (entry->proc_handler == proc_dointvec_userhz_jiffies) || (entry->proc_handler == proc_dointvec_ms_jiffies) || (entry->proc_handler == proc_doulongvec_minmax) || (entry->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { if (!entry->data) err |= sysctl_err(path, entry, "No data"); if (!entry->maxlen) err |= sysctl_err(path, entry, "No maxlen"); else err |= sysctl_check_table_array(path, entry); } if (!entry->proc_handler) err |= sysctl_err(path, entry, "No proc_handler"); if ((entry->mode & (S_IRUGO|S_IWUGO)) != entry->mode) err |= sysctl_err(path, entry, "bogus .mode 0%o", entry->mode); } return err; } static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_header *head) { struct ctl_table *link_table, *entry, *link; struct ctl_table_header *links; struct ctl_node *node; char *link_name; int nr_entries, name_bytes; name_bytes = 0; nr_entries = 0; list_for_each_table_entry(entry, head) { nr_entries++; name_bytes += strlen(entry->procname) + 1; } links = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*nr_entries + sizeof(struct ctl_table)*(nr_entries + 1) + name_bytes, GFP_KERNEL); if (!links) return NULL; node = (struct ctl_node *)(links + 1); link_table = (struct ctl_table *)(node + nr_entries); link_name = (char *)&link_table[nr_entries + 1]; link = link_table; list_for_each_table_entry(entry, head) { int len = strlen(entry->procname) + 1; memcpy(link_name, entry->procname, len); link->procname = link_name; link->mode = S_IFLNK|S_IRWXUGO; link->data = head->root; link_name += len; link++; } init_header(links, dir->header.root, dir->header.set, node, link_table, head->ctl_table_size); links->nreg = nr_entries; return links; } static bool get_links(struct ctl_dir *dir, struct ctl_table_header *header, struct ctl_table_root *link_root) { struct ctl_table_header *tmp_head; struct ctl_table *entry, *link; /* Are there links available for every entry in table? */ list_for_each_table_entry(entry, header) { const char *procname = entry->procname; link = find_entry(&tmp_head, dir, procname, strlen(procname)); if (!link) return false; if (S_ISDIR(link->mode) && S_ISDIR(entry->mode)) continue; if (S_ISLNK(link->mode) && (link->data == link_root)) continue; return false; } /* The checks passed. Increase the registration count on the links */ list_for_each_table_entry(entry, header) { const char *procname = entry->procname; link = find_entry(&tmp_head, dir, procname, strlen(procname)); tmp_head->nreg++; } return true; } static int insert_links(struct ctl_table_header *head) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_dir *core_parent; struct ctl_table_header *links; int err; if (head->set == root_set) return 0; core_parent = xlate_dir(root_set, head->parent); if (IS_ERR(core_parent)) return 0; if (get_links(core_parent, head, head->root)) return 0; core_parent->header.nreg++; spin_unlock(&sysctl_lock); links = new_links(core_parent, head); spin_lock(&sysctl_lock); err = -ENOMEM; if (!links) goto out; err = 0; if (get_links(core_parent, head, head->root)) { kfree(links); goto out; } err = insert_header(core_parent, links); if (err) kfree(links); out: drop_sysctl_table(&core_parent->header); return err; } /* Find the directory for the ctl_table. If one is not found create it. */ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path) { const char *name, *nextname; for (name = path; name; name = nextname) { int namelen; nextname = strchr(name, '/'); if (nextname) { namelen = nextname - name; nextname++; } else { namelen = strlen(name); } if (namelen == 0) continue; /* * namelen ensures if name is "foo/bar/yay" only foo is * registered first. We traverse as if using mkdir -p and * return a ctl_dir for the last directory entry. */ dir = get_subdir(dir, name, namelen); if (IS_ERR(dir)) break; } return dir; } /** * __register_sysctl_table - register a leaf sysctl table * @set: Sysctl tree to register on * @path: The path to the directory the sysctl table is in. * @table: the top-level table structure without any child. This table * should not be free'd after registration. So it should not be * used on stack. It can either be a global or dynamically allocated * by the caller and free'd later after sysctl unregistration. * @table_size : The number of elements in table * * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * The members of the &struct ctl_table structure are used as follows: * * procname - the name of the sysctl file under /proc/sys. Set to %NULL to not * enter a sysctl file * * data - a pointer to data for use by proc_handler * * maxlen - the maximum size in bytes of the data * * mode - the file permissions for the /proc/sys file * * child - must be %NULL. * * proc_handler - the text handler routine (described below) * * extra1, extra2 - extra pointers usable by the proc handler routines * XXX: we should eventually modify these to use long min / max [0] * [0] https://lkml.kernel.org/87zgpte9o4.fsf@email.froward.int.ebiederm.org * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes (where child is not NULL) are not allowed, * sysctl_check_table() verifies this. * * There must be a proc_handler routine for any terminal nodes. * Several default handlers are available to cover common cases - * * proc_dostring(), proc_dointvec(), proc_dointvec_jiffies(), * proc_dointvec_userhz_jiffies(), proc_dointvec_minmax(), * proc_doulongvec_ms_jiffies_minmax(), proc_doulongvec_minmax() * * It is the handler's job to read the input buffer from user memory * and process it. The handler should return 0 on success. * * This routine returns %NULL on a failure to register, and a pointer * to the table header on success. */ struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, const char *path, struct ctl_table *table, size_t table_size) { struct ctl_table_root *root = set->dir.header.root; struct ctl_table_header *header; struct ctl_dir *dir; struct ctl_node *node; header = kzalloc(sizeof(struct ctl_table_header) + sizeof(struct ctl_node)*table_size, GFP_KERNEL_ACCOUNT); if (!header) return NULL; node = (struct ctl_node *)(header + 1); init_header(header, root, set, node, table, table_size); if (sysctl_check_table(path, header)) goto fail; spin_lock(&sysctl_lock); dir = &set->dir; /* Reference moved down the directory tree get_subdir */ dir->header.nreg++; spin_unlock(&sysctl_lock); dir = sysctl_mkdir_p(dir, path); if (IS_ERR(dir)) goto fail; spin_lock(&sysctl_lock); if (insert_header(dir, header)) goto fail_put_dir_locked; drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); return header; fail_put_dir_locked: drop_sysctl_table(&dir->header); spin_unlock(&sysctl_lock); fail: kfree(header); return NULL; } /** * register_sysctl_sz - register a sysctl table * @path: The path to the directory the sysctl table is in. If the path * doesn't exist we will create it for you. * @table: the table structure. The calller must ensure the life of the @table * will be kept during the lifetime use of the syctl. It must not be freed * until unregister_sysctl_table() is called with the given returned table * with this registration. If your code is non modular then you don't need * to call unregister_sysctl_table() and can instead use something like * register_sysctl_init() which does not care for the result of the syctl * registration. * @table_size: The number of elements in table. * * Register a sysctl table. @table should be a filled in ctl_table * array. A completely 0 filled entry terminates the table. * * See __register_sysctl_table for more details. */ struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table, size_t table_size) { return __register_sysctl_table(&sysctl_table_root.default_set, path, table, table_size); } EXPORT_SYMBOL(register_sysctl_sz); /** * __register_sysctl_init() - register sysctl table to path * @path: path name for sysctl base. If that path doesn't exist we will create * it for you. * @table: This is the sysctl table that needs to be registered to the path. * The caller must ensure the life of the @table will be kept during the * lifetime use of the sysctl. * @table_name: The name of sysctl table, only used for log printing when * registration fails * @table_size: The number of elements in table * * The sysctl interface is used by userspace to query or modify at runtime * a predefined value set on a variable. These variables however have default * values pre-set. Code which depends on these variables will always work even * if register_sysctl() fails. If register_sysctl() fails you'd just loose the * ability to query or modify the sysctls dynamically at run time. Chances of * register_sysctl() failing on init are extremely low, and so for both reasons * this function does not return any error as it is used by initialization code. * * Context: if your base directory does not exist it will be created for you. */ void __init __register_sysctl_init(const char *path, struct ctl_table *table, const char *table_name, size_t table_size) { struct ctl_table_header *hdr = register_sysctl_sz(path, table, table_size); if (unlikely(!hdr)) { pr_err("failed when register_sysctl_sz %s to %s\n", table_name, path); return; } kmemleak_not_leak(hdr); } static void put_links(struct ctl_table_header *header) { struct ctl_table_set *root_set = &sysctl_table_root.default_set; struct ctl_table_root *root = header->root; struct ctl_dir *parent = header->parent; struct ctl_dir *core_parent; struct ctl_table *entry; if (header->set == root_set) return; core_parent = xlate_dir(root_set, parent); if (IS_ERR(core_parent)) return; list_for_each_table_entry(entry, header) { struct ctl_table_header *link_head; struct ctl_table *link; const char *name = entry->procname; link = find_entry(&link_head, core_parent, name, strlen(name)); if (link && ((S_ISDIR(link->mode) && S_ISDIR(entry->mode)) || (S_ISLNK(link->mode) && (link->data == root)))) { drop_sysctl_table(link_head); } else { pr_err("sysctl link missing during unregister: "); sysctl_print_dir(parent); pr_cont("%s\n", name); } } } static void drop_sysctl_table(struct ctl_table_header *header) { struct ctl_dir *parent = header->parent; if (--header->nreg) return; if (parent) { put_links(header); start_unregistering(header); } if (!--header->count) kfree_rcu(header, rcu); if (parent) drop_sysctl_table(&parent->header); } /** * unregister_sysctl_table - unregister a sysctl table hierarchy * @header: the header returned from register_sysctl or __register_sysctl_table * * Unregisters the sysctl table and all children. proc entries may not * actually be removed until they are no longer used by anyone. */ void unregister_sysctl_table(struct ctl_table_header * header) { might_sleep(); if (header == NULL) return; spin_lock(&sysctl_lock); drop_sysctl_table(header); spin_unlock(&sysctl_lock); } EXPORT_SYMBOL(unregister_sysctl_table); void setup_sysctl_set(struct ctl_table_set *set, struct ctl_table_root *root, int (*is_seen)(struct ctl_table_set *)) { memset(set, 0, sizeof(*set)); set->is_seen = is_seen; init_header(&set->dir.header, root, set, NULL, root_table, 1); } void retire_sysctl_set(struct ctl_table_set *set) { WARN_ON(!RB_EMPTY_ROOT(&set->dir.root)); } int __init proc_sys_init(void) { struct proc_dir_entry *proc_sys_root; proc_sys_root = proc_mkdir("sys", NULL); proc_sys_root->proc_iops = &proc_sys_dir_operations; proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations; proc_sys_root->nlink = 0; return sysctl_init_bases(); } struct sysctl_alias { const char *kernel_param; const char *sysctl_param; }; /* * Historically some settings had both sysctl and a command line parameter. * With the generic sysctl. parameter support, we can handle them at a single * place and only keep the historical name for compatibility. This is not meant * to add brand new aliases. When adding existing aliases, consider whether * the possibly different moment of changing the value (e.g. from early_param * to the moment do_sysctl_args() is called) is an issue for the specific * parameter. */ static const struct sysctl_alias sysctl_aliases[] = { {"hardlockup_all_cpu_backtrace", "kernel.hardlockup_all_cpu_backtrace" }, {"hung_task_panic", "kernel.hung_task_panic" }, {"numa_zonelist_order", "vm.numa_zonelist_order" }, {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" }, { } }; static const char *sysctl_find_alias(char *param) { const struct sysctl_alias *alias; for (alias = &sysctl_aliases[0]; alias->kernel_param != NULL; alias++) { if (strcmp(alias->kernel_param, param) == 0) return alias->sysctl_param; } return NULL; } bool sysctl_is_alias(char *param) { const char *alias = sysctl_find_alias(param); return alias != NULL; } /* Set sysctl value passed on kernel command line. */ static int process_sysctl_arg(char *param, char *val, const char *unused, void *arg) { char *path; struct vfsmount **proc_mnt = arg; struct file_system_type *proc_fs_type; struct file *file; int len; int err; loff_t pos = 0; ssize_t wret; if (strncmp(param, "sysctl", sizeof("sysctl") - 1) == 0) { param += sizeof("sysctl") - 1; if (param[0] != '/' && param[0] != '.') return 0; param++; } else { param = (char *) sysctl_find_alias(param); if (!param) return 0; } if (!val) return -EINVAL; len = strlen(val); if (len == 0) return -EINVAL; /* * To set sysctl options, we use a temporary mount of proc, look up the * respective sys/ file and write to it. To avoid mounting it when no * options were given, we mount it only when the first sysctl option is * found. Why not a persistent mount? There are problems with a * persistent mount of proc in that it forces userspace not to use any * proc mount options. */ if (!*proc_mnt) { proc_fs_type = get_fs_type("proc"); if (!proc_fs_type) { pr_err("Failed to find procfs to set sysctl from command line\n"); return 0; } *proc_mnt = kern_mount(proc_fs_type); put_filesystem(proc_fs_type); if (IS_ERR(*proc_mnt)) { pr_err("Failed to mount procfs to set sysctl from command line\n"); return 0; } } path = kasprintf(GFP_KERNEL, "sys/%s", param); if (!path) panic("%s: Failed to allocate path for %s\n", __func__, param); strreplace(path, '.', '/'); file = file_open_root_mnt(*proc_mnt, path, O_WRONLY, 0); if (IS_ERR(file)) { err = PTR_ERR(file); if (err == -ENOENT) pr_err("Failed to set sysctl parameter '%s=%s': parameter not found\n", param, val); else if (err == -EACCES) pr_err("Failed to set sysctl parameter '%s=%s': permission denied (read-only?)\n", param, val); else pr_err("Error %pe opening proc file to set sysctl parameter '%s=%s'\n", file, param, val); goto out; } wret = kernel_write(file, val, len, &pos); if (wret < 0) { err = wret; if (err == -EINVAL) pr_err("Failed to set sysctl parameter '%s=%s': invalid value\n", param, val); else pr_err("Error %pe writing to proc file to set sysctl parameter '%s=%s'\n", ERR_PTR(err), param, val); } else if (wret != len) { pr_err("Wrote only %zd bytes of %d writing to proc file %s to set sysctl parameter '%s=%s\n", wret, len, path, param, val); } err = filp_close(file, NULL); if (err) pr_err("Error %pe closing proc file to set sysctl parameter '%s=%s\n", ERR_PTR(err), param, val); out: kfree(path); return 0; } void do_sysctl_args(void) { char *command_line; struct vfsmount *proc_mnt = NULL; command_line = kstrdup(saved_command_line, GFP_KERNEL); if (!command_line) panic("%s: Failed to allocate copy of command line\n", __func__); parse_args("Setting sysctl args", command_line, NULL, 0, -1, -1, &proc_mnt, process_sysctl_arg); if (proc_mnt) kern_unmount(proc_mnt); kfree(command_line); } |
56 45 44 5 39 34 2 3 29 195 196 48 13 130 158 134 97 40 1 7 3 1 1 1 1 1 1 3 1 1 1 1 373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 | // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2006-2010 Patrick McHardy <kaber@trash.net> */ #include <linux/types.h> #include <linux/timer.h> #include <linux/netfilter.h> #include <linux/in.h> #include <linux/icmp.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/checksum.h> #include <linux/netfilter_ipv4.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_timeout.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_log.h> #include "nf_internals.h" static const unsigned int nf_ct_icmp_timeout = 30*HZ; bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct net *net, struct nf_conntrack_tuple *tuple) { const struct icmphdr *hp; struct icmphdr _hdr; hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hp == NULL) return false; tuple->dst.u.icmp.type = hp->type; tuple->src.u.icmp.id = hp->un.echo.id; tuple->dst.u.icmp.code = hp->code; return true; } /* Add 1; spaces filled with 0. */ static const u_int8_t invmap[] = { [ICMP_ECHO] = ICMP_ECHOREPLY + 1, [ICMP_ECHOREPLY] = ICMP_ECHO + 1, [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1, [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1, [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1, [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1, [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1, [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1 }; bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { if (orig->dst.u.icmp.type >= sizeof(invmap) || !invmap[orig->dst.u.icmp.type]) return false; tuple->src.u.icmp.id = orig->src.u.icmp.id; tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1; tuple->dst.u.icmp.code = orig->dst.u.icmp.code; return true; } /* Returns verdict for packet, or -1 for invalid. */ int nf_conntrack_icmp_packet(struct nf_conn *ct, struct sk_buff *skb, enum ip_conntrack_info ctinfo, const struct nf_hook_state *state) { /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ unsigned int *timeout = nf_ct_timeout_lookup(ct); static const u_int8_t valid_new[] = { [ICMP_ECHO] = 1, [ICMP_TIMESTAMP] = 1, [ICMP_INFO_REQUEST] = 1, [ICMP_ADDRESS] = 1 }; if (state->pf != NFPROTO_IPV4) return -NF_ACCEPT; if (ct->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new) || !valid_new[ct->tuplehash[0].tuple.dst.u.icmp.type]) { /* Can't create a new ICMP `conn' with this. */ pr_debug("icmp: can't create new conn with type %u\n", ct->tuplehash[0].tuple.dst.u.icmp.type); nf_ct_dump_tuple_ip(&ct->tuplehash[0].tuple); return -NF_ACCEPT; } if (!timeout) timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout; nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Check inner header is related to any of the existing connections */ int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state, u8 l4proto, union nf_inet_addr *outer_daddr) { struct nf_conntrack_tuple innertuple, origtuple; const struct nf_conntrack_tuple_hash *h; const struct nf_conntrack_zone *zone; enum ip_conntrack_info ctinfo; struct nf_conntrack_zone tmp; union nf_inet_addr *ct_daddr; enum ip_conntrack_dir dir; struct nf_conn *ct; WARN_ON(skb_nfct(skb)); zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); /* Are they talking about one of our connections? */ if (!nf_ct_get_tuplepr(skb, dataoff, state->pf, state->net, &origtuple)) return -NF_ACCEPT; /* Ordinarily, we'd expect the inverted tupleproto, but it's been preserved inside the ICMP. */ if (!nf_ct_invert_tuple(&innertuple, &origtuple)) return -NF_ACCEPT; h = nf_conntrack_find_get(state->net, zone, &innertuple); if (!h) return -NF_ACCEPT; /* Consider: A -> T (=This machine) -> B * Conntrack entry will look like this: * Original: A->B * Reply: B->T (SNAT case) OR A * * When this function runs, we got packet that looks like this: * iphdr|icmphdr|inner_iphdr|l4header (tcp, udp, ..). * * Above nf_conntrack_find_get() makes lookup based on inner_hdr, * so we should expect that destination of the found connection * matches outer header destination address. * * In above example, we can consider these two cases: * 1. Error coming in reply direction from B or M (middle box) to * T (SNAT case) or A. * Inner saddr will be B, dst will be T or A. * The found conntrack will be reply tuple (B->T/A). * 2. Error coming in original direction from A or M to B. * Inner saddr will be A, inner daddr will be B. * The found conntrack will be original tuple (A->B). * * In both cases, conntrack[dir].dst == inner.dst. * * A bogus packet could look like this: * Inner: B->T * Outer: B->X (other machine reachable by T). * * In this case, lookup yields connection A->B and will * set packet from B->X as *RELATED*, even though no connection * from X was ever seen. */ ct = nf_ct_tuplehash_to_ctrack(h); dir = NF_CT_DIRECTION(h); ct_daddr = &ct->tuplehash[dir].tuple.dst.u3; if (!nf_inet_addr_cmp(outer_daddr, ct_daddr)) { if (state->pf == AF_INET) { nf_l4proto_log_invalid(skb, state, l4proto, "outer daddr %pI4 != inner %pI4", &outer_daddr->ip, &ct_daddr->ip); } else if (state->pf == AF_INET6) { nf_l4proto_log_invalid(skb, state, l4proto, "outer daddr %pI6 != inner %pI6", &outer_daddr->ip6, &ct_daddr->ip6); } nf_ct_put(ct); return -NF_ACCEPT; } ctinfo = IP_CT_RELATED; if (dir == IP_CT_DIR_REPLY) ctinfo += IP_CT_IS_REPLY; /* Update skb to refer to this connection */ nf_ct_set(skb, ct, ctinfo); return NF_ACCEPT; } static void icmp_error_log(const struct sk_buff *skb, const struct nf_hook_state *state, const char *msg) { nf_l4proto_log_invalid(skb, state, IPPROTO_ICMP, "%s", msg); } /* Small and modified version of icmp_rcv */ int nf_conntrack_icmpv4_error(struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, const struct nf_hook_state *state) { union nf_inet_addr outer_daddr; const struct icmphdr *icmph; struct icmphdr _ih; /* Not enough header? */ icmph = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); if (icmph == NULL) { icmp_error_log(skb, state, "short packet"); return -NF_ACCEPT; } /* See nf_conntrack_proto_tcp.c */ if (state->net->ct.sysctl_checksum && state->hook == NF_INET_PRE_ROUTING && nf_ip_checksum(skb, state->hook, dataoff, IPPROTO_ICMP)) { icmp_error_log(skb, state, "bad hw icmp checksum"); return -NF_ACCEPT; } /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) { icmp_error_log(skb, state, "invalid icmp type"); return -NF_ACCEPT; } /* Need to track icmp error message? */ if (!icmp_is_err(icmph->type)) return NF_ACCEPT; memset(&outer_daddr, 0, sizeof(outer_daddr)); outer_daddr.ip = ip_hdr(skb)->daddr; dataoff += sizeof(*icmph); return nf_conntrack_inet_error(tmpl, skb, dataoff, state, IPPROTO_ICMP, &outer_daddr); } #if IS_ENABLED(CONFIG_NF_CT_NETLINK) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> static int icmp_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *t) { if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) || nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) || nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static const struct nla_policy icmp_nla_policy[CTA_PROTO_MAX+1] = { [CTA_PROTO_ICMP_TYPE] = { .type = NLA_U8 }, [CTA_PROTO_ICMP_CODE] = { .type = NLA_U8 }, [CTA_PROTO_ICMP_ID] = { .type = NLA_U16 }, }; static int icmp_nlattr_to_tuple(struct nlattr *tb[], struct nf_conntrack_tuple *tuple, u_int32_t flags) { if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) { if (!tb[CTA_PROTO_ICMP_TYPE]) return -EINVAL; tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]); if (tuple->dst.u.icmp.type >= sizeof(invmap) || !invmap[tuple->dst.u.icmp.type]) return -EINVAL; } if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) { if (!tb[CTA_PROTO_ICMP_CODE]) return -EINVAL; tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]); } if (flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) { if (!tb[CTA_PROTO_ICMP_ID]) return -EINVAL; tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]); } return 0; } static unsigned int icmp_nlattr_tuple_size(void) { static unsigned int size __read_mostly; if (!size) size = nla_policy_len(icmp_nla_policy, CTA_PROTO_MAX + 1); return size; } #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeout = data; struct nf_icmp_net *in = nf_icmp_pernet(net); if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { if (!timeout) timeout = &in->timeout; *timeout = ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; } else if (timeout) { /* Set default ICMP timeout. */ *timeout = in->timeout; } return 0; } static int icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeout = data; if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ void nf_conntrack_icmp_init_net(struct net *net) { struct nf_icmp_net *in = nf_icmp_pernet(net); in->timeout = nf_ct_icmp_timeout; } const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = { .l4proto = IPPROTO_ICMP, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = icmp_tuple_to_nlattr, .nlattr_tuple_size = icmp_nlattr_tuple_size, .nlattr_to_tuple = icmp_nlattr_to_tuple, .nla_policy = icmp_nla_policy, #endif #ifdef CONFIG_NF_CONNTRACK_TIMEOUT .ctnl_timeout = { .nlattr_to_obj = icmp_timeout_nlattr_to_obj, .obj_to_nlattr = icmp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_ICMP_MAX, .obj_size = sizeof(unsigned int), .nla_policy = icmp_timeout_nla_policy, }, #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ }; |
28 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 | /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/dynamic_debug.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> #include "drm_crtc_helper_internal.h" DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0, "DRM_UT_CORE", "DRM_UT_DRIVER", "DRM_UT_KMS", "DRM_UT_PRIME", "DRM_UT_ATOMIC", "DRM_UT_VBL", "DRM_UT_STATE", "DRM_UT_LEASE", "DRM_UT_DP", "DRM_UT_DRMRES"); /** * DOC: overview * * The CRTC modeset helper library provides a default set_config implementation * in drm_crtc_helper_set_config(). Plus a few other convenience functions using * the same callbacks which drivers can use to e.g. restore the modeset * configuration on resume with drm_helper_resume_force_mode(). * * Note that this helper library doesn't track the current power state of CRTCs * and encoders. It can call callbacks like &drm_encoder_helper_funcs.dpms even * though the hardware is already in the desired state. This deficiency has been * fixed in the atomic helpers. * * The driver callbacks are mostly compatible with the atomic modeset helpers, * except for the handling of the primary plane: Atomic helpers require that the * primary plane is implemented as a real standalone plane and not directly tied * to the CRTC state. For easier transition this library provides functions to * implement the old semantics required by the CRTC helpers using the new plane * and atomic helper callbacks. * * Drivers are strongly urged to convert to the atomic helpers (by way of first * converting to the plane helpers). New drivers must not use these functions * but need to implement the atomic interface instead, potentially using the * atomic helpers for that. * * These legacy modeset helpers use the same function table structures as * all other modesetting helpers. See the documentation for struct * &drm_crtc_helper_funcs, &struct drm_encoder_helper_funcs and struct * &drm_connector_helper_funcs. */ /** * drm_helper_encoder_in_use - check if a given encoder is in use * @encoder: encoder to check * * Checks whether @encoder is with the current mode setting output configuration * in use by any connector. This doesn't mean that it is actually enabled since * the DPMS state is tracked separately. * * Returns: * True if @encoder is used, false otherwise. */ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; WARN_ON(drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) { WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); } drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder == encoder) { drm_connector_list_iter_end(&conn_iter); return true; } } drm_connector_list_iter_end(&conn_iter); return false; } EXPORT_SYMBOL(drm_helper_encoder_in_use); /** * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config * @crtc: CRTC to check * * Checks whether @crtc is with the current mode setting output configuration * in use by any connector. This doesn't mean that it is actually enabled since * the DPMS state is tracked separately. * * Returns: * True if @crtc is used, false otherwise. */ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev = crtc->dev; WARN_ON(drm_drv_uses_atomic_modeset(dev)); /* * We can expect this mutex to be locked if we are not panicking. * Locking is currently fubar in the panic handler. */ if (!oops_in_progress) WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); drm_for_each_encoder(encoder, dev) if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder)) return true; return false; } EXPORT_SYMBOL(drm_helper_crtc_in_use); static void drm_encoder_disable(struct drm_encoder *encoder) { const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (!encoder_funcs) return; if (encoder_funcs->disable) (*encoder_funcs->disable)(encoder); else if (encoder_funcs->dpms) (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); } static void __drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_crtc *crtc; drm_warn_on_modeset_not_all_locked(dev); drm_for_each_encoder(encoder, dev) { if (!drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnect encoder from any connector */ encoder->crtc = NULL; } } drm_for_each_crtc(crtc, dev) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { if (crtc_funcs->disable) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); crtc->primary->fb = NULL; } } } /** * drm_helper_disable_unused_functions - disable unused objects * @dev: DRM device * * This function walks through the entire mode setting configuration of @dev. It * will remove any CRTC links of unused encoders and encoder links of * disconnected connectors. Then it will disable all unused encoders and CRTCs * either by calling their disable callback if available or by calling their * dpms callback with DRM_MODE_DPMS_OFF. * * NOTE: * * This function is part of the legacy modeset helper library and will cause * major confusion with atomic drivers. This is because atomic helpers guarantee * to never call ->disable() hooks on a disabled function, or ->enable() hooks * on an enabled functions. drm_helper_disable_unused_functions() on the other * hand throws such guarantees into the wind and calls disable hooks * unconditionally on unused functions. */ void drm_helper_disable_unused_functions(struct drm_device *dev) { WARN_ON(drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_helper_disable_unused_functions); /* * Check the CRTC we're going to map each output to vs. its current * CRTC. If they don't match, we have to disable the output and the CRTC * since the driver will have to re-route things. */ static void drm_crtc_prepare_encoders(struct drm_device *dev) { const struct drm_encoder_helper_funcs *encoder_funcs; struct drm_encoder *encoder; drm_for_each_encoder(encoder, dev) { encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; /* Disable unused encoders */ if (encoder->crtc == NULL) drm_encoder_disable(encoder); } } /** * drm_crtc_helper_set_mode - internal helper to set a mode * @crtc: CRTC to program * @mode: mode to use * @x: horizontal offset into the surface * @y: vertical offset into the surface * @old_fb: old framebuffer, for cleanup * * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance * to fixup or reject the mode prior to trying to set it. This is an internal * helper that drivers could e.g. use to update properties that require the * entire output pipe to be disabled and re-enabled in a new configuration. For * example for changing whether audio is enabled on a hdmi link or for changing * panel fitter or dither attributes. It is also called by the * drm_crtc_helper_set_config() helper function to drive the mode setting * sequence. * * Returns: * True if the mode was set successfully, false otherwise. */ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; const struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; bool saved_enabled; struct drm_encoder *encoder; bool ret = true; WARN_ON(drm_drv_uses_atomic_modeset(dev)); drm_warn_on_modeset_not_all_locked(dev); saved_enabled = crtc->enabled; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) return true; adjusted_mode = drm_mode_duplicate(dev, mode); if (!adjusted_mode) { crtc->enabled = saved_enabled; return false; } drm_mode_init(&saved_mode, &crtc->mode); drm_mode_init(&saved_hwmode, &crtc->hwmode); saved_x = crtc->x; saved_y = crtc->y; /* Update crtc values up front so the driver can rely on them for mode * setting. */ drm_mode_copy(&crtc->mode, mode); crtc->x = x; crtc->y = y; /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; encoder_funcs = encoder->helper_private; if (encoder_funcs->mode_fixup) { if (!(ret = encoder_funcs->mode_fixup(encoder, mode, adjusted_mode))) { DRM_DEBUG_KMS("Encoder fixup failed\n"); goto done; } } } if (crtc_funcs->mode_fixup) { if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { DRM_DEBUG_KMS("CRTC fixup failed\n"); goto done; } } DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); drm_mode_copy(&crtc->hwmode, adjusted_mode); /* Prepare the encoders and CRTCs before setting the mode. */ drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; /* Disable the encoders as the first thing we do. */ if (encoder_funcs->prepare) encoder_funcs->prepare(encoder); } drm_crtc_prepare_encoders(dev); crtc_funcs->prepare(crtc); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. */ ret = !crtc_funcs->mode_set(crtc, mode, adjusted_mode, x, y, old_fb); if (!ret) goto done; drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%s]\n", encoder->base.id, encoder->name, mode->name); if (encoder_funcs->mode_set) encoder_funcs->mode_set(encoder, mode, adjusted_mode); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ crtc_funcs->commit(crtc); drm_for_each_encoder(encoder, dev) { if (encoder->crtc != crtc) continue; encoder_funcs = encoder->helper_private; if (!encoder_funcs) continue; if (encoder_funcs->commit) encoder_funcs->commit(encoder); } /* Calculate and store various constants which * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. */ drm_calc_timestamping_constants(crtc, &crtc->hwmode); /* FIXME: add subpixel order */ done: drm_mode_destroy(dev, adjusted_mode); if (!ret) { crtc->enabled = saved_enabled; drm_mode_copy(&crtc->mode, &saved_mode); drm_mode_copy(&crtc->hwmode, &saved_hwmode); crtc->x = saved_x; crtc->y = saved_y; } return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_mode); /** * drm_crtc_helper_atomic_check() - Helper to check CRTC atomic-state * @crtc: CRTC to check * @state: atomic state object * * Provides a default CRTC-state check handler for CRTCs that only have * one primary plane attached to it. * * This is often the case for the CRTC of simple framebuffers. See also * drm_plane_helper_atomic_check() for the respective plane-state check * helper function. * * RETURNS: * Zero on success, or an errno code otherwise. */ int drm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); if (!new_crtc_state->enable) return 0; return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state); } EXPORT_SYMBOL(drm_crtc_helper_atomic_check); static void drm_crtc_helper_disable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_connector *connector; struct drm_encoder *encoder; /* Decouple all encoders and their attached connectors from this crtc */ drm_for_each_encoder(encoder, dev) { struct drm_connector_list_iter conn_iter; if (encoder->crtc != crtc) continue; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder != encoder) continue; connector->encoder = NULL; /* * drm_helper_disable_unused_functions() ought to be * doing this, but since we've decoupled the encoder * from the connector above, the required connection * between them is henceforth no longer available. */ connector->dpms = DRM_MODE_DPMS_OFF; /* we keep a reference while the encoder is bound */ drm_connector_put(connector); } drm_connector_list_iter_end(&conn_iter); } __drm_helper_disable_unused_functions(dev); } /* * For connectors that support multiple encoders, either the * .atomic_best_encoder() or .best_encoder() operation must be implemented. */ struct drm_encoder * drm_connector_get_single_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; WARN_ON(hweight32(connector->possible_encoders) > 1); drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; } /** * drm_crtc_helper_set_config - set a new config from userspace * @set: mode set configuration * @ctx: lock acquire context, not used here * * The drm_crtc_helper_set_config() helper function implements the of * &drm_crtc_funcs.set_config callback for drivers using the legacy CRTC * helpers. * * It first tries to locate the best encoder for each connector by calling the * connector @drm_connector_helper_funcs.best_encoder helper operation. * * After locating the appropriate encoders, the helper function will call the * mode_fixup encoder and CRTC helper operations to adjust the requested mode, * or reject it completely in which case an error will be returned to the * application. If the new configuration after mode adjustment is identical to * the current configuration the helper function will return without performing * any other operation. * * If the adjusted mode is identical to the current mode but changes to the * frame buffer need to be applied, the drm_crtc_helper_set_config() function * will call the CRTC &drm_crtc_helper_funcs.mode_set_base helper operation. * * If the adjusted mode differs from the current mode, or if the * ->mode_set_base() helper operation is not provided, the helper function * performs a full mode set sequence by calling the ->prepare(), ->mode_set() * and ->commit() CRTC and encoder helper operations, in that order. * Alternatively it can also use the dpms and disable helper operations. For * details see &struct drm_crtc_helper_funcs and struct * &drm_encoder_helper_funcs. * * This function is deprecated. New drivers must implement atomic modeset * support, for which this function is unsuitable. Instead drivers should use * drm_atomic_helper_set_config(). * * Returns: * Returns 0 on success, negative errno numbers on failure. */ int drm_crtc_helper_set_config(struct drm_mode_set *set, struct drm_modeset_acquire_ctx *ctx) { struct drm_device *dev; struct drm_crtc **save_encoder_crtcs, *new_crtc; struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *connector; struct drm_connector_list_iter conn_iter; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; int ret; int i; DRM_DEBUG_KMS("\n"); BUG_ON(!set); BUG_ON(!set->crtc); BUG_ON(!set->crtc->helper_private); /* Enforce sane interface api - has been abused by the fb helper. */ BUG_ON(!set->mode && set->fb); BUG_ON(set->fb && set->num_connectors == 0); crtc_funcs = set->crtc->helper_private; dev = set->crtc->dev; WARN_ON(drm_drv_uses_atomic_modeset(dev)); if (!set->mode) set->fb = NULL; if (set->fb) { DRM_DEBUG_KMS("[CRTC:%d:%s] [FB:%d] #connectors=%d (x y) (%i %i)\n", set->crtc->base.id, set->crtc->name, set->fb->base.id, (int)set->num_connectors, set->x, set->y); } else { DRM_DEBUG_KMS("[CRTC:%d:%s] [NOFB]\n", set->crtc->base.id, set->crtc->name); drm_crtc_helper_disable(set->crtc); return 0; } drm_warn_on_modeset_not_all_locked(dev); /* * Allocate space for the backup of all (non-pointer) encoder and * connector data. */ save_encoder_crtcs = kcalloc(dev->mode_config.num_encoder, sizeof(struct drm_crtc *), GFP_KERNEL); if (!save_encoder_crtcs) return -ENOMEM; save_connector_encoders = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_encoder *), GFP_KERNEL); if (!save_connector_encoders) { kfree(save_encoder_crtcs); return -ENOMEM; } /* * Copy data. Note that driver private data is not affected. * Should anything bad happen only the expected state is * restored, not the drivers personal bookkeeping. */ count = 0; drm_for_each_encoder(encoder, dev) { save_encoder_crtcs[count++] = encoder->crtc; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) save_connector_encoders[count++] = connector->encoder; drm_connector_list_iter_end(&conn_iter); save_set.crtc = set->crtc; save_set.mode = &set->crtc->mode; save_set.x = set->crtc->x; save_set.y = set->crtc->y; save_set.fb = set->crtc->primary->fb; /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ if (set->crtc->primary->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->primary->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); mode_changed = true; } else if (set->fb->format != set->crtc->primary->fb->format) { mode_changed = true; } else fb_changed = true; } if (set->x != set->crtc->x || set->y != set->crtc->y) fb_changed = true; if (!drm_mode_equal(set->mode, &set->crtc->mode)) { DRM_DEBUG_KMS("modes are different, full mode set\n"); drm_mode_debug_printmodeline(&set->crtc->mode); drm_mode_debug_printmodeline(set->mode); mode_changed = true; } /* take a reference on all unbound connectors in set, reuse the * already taken reference for bound connectors */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro]->encoder) continue; drm_connector_get(set->connectors[ro]); } /* a) traverse passed in connector list and get encoders for them */ count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) { if (connector_funcs->best_encoder) new_encoder = connector_funcs->best_encoder(connector); else new_encoder = drm_connector_get_single_encoder(connector); /* if we can't get an encoder for a connector we are setting now - then fail */ if (new_encoder == NULL) /* don't break so fail path works correct */ fail = 1; if (connector->dpms != DRM_MODE_DPMS_ON) { DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); mode_changed = true; } break; } } if (new_encoder != connector->encoder) { DRM_DEBUG_KMS("encoder changed, full mode switch\n"); mode_changed = true; /* If the encoder is reused for another connector, then * the appropriate crtc will be set later. */ if (connector->encoder) connector->encoder->crtc = NULL; connector->encoder = new_encoder; } } drm_connector_list_iter_end(&conn_iter); if (fail) { ret = -EINVAL; goto fail; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->encoder) continue; if (connector->encoder->crtc == set->crtc) new_crtc = NULL; else new_crtc = connector->encoder->crtc; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) new_crtc = set->crtc; } /* Make sure the new CRTC will work with the encoder */ if (new_crtc && !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { ret = -EINVAL; drm_connector_list_iter_end(&conn_iter); goto fail; } if (new_crtc != connector->encoder->crtc) { DRM_DEBUG_KMS("crtc changed, full mode switch\n"); mode_changed = true; connector->encoder->crtc = new_crtc; } if (new_crtc) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d:%s]\n", connector->base.id, connector->name, new_crtc->base.id, new_crtc->name); } else { DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", connector->base.id, connector->name); } } drm_connector_list_iter_end(&conn_iter); /* mode_set_base is not a required function */ if (fb_changed && !crtc_funcs->mode_set_base) mode_changed = true; if (mode_changed) { if (drm_helper_crtc_in_use(set->crtc)) { DRM_DEBUG_KMS("attempting to set mode from" " userspace\n"); drm_mode_debug_printmodeline(set->mode); set->crtc->primary->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, save_set.fb)) { DRM_ERROR("failed to set mode on [CRTC:%d:%s]\n", set->crtc->base.id, set->crtc->name); set->crtc->primary->fb = save_set.fb; ret = -EINVAL; goto fail; } DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); for (i = 0; i < set->num_connectors; i++) { DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, set->connectors[i]->name); set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); } } __drm_helper_disable_unused_functions(dev); } else if (fb_changed) { set->crtc->x = set->x; set->crtc->y = set->y; set->crtc->primary->fb = set->fb; ret = crtc_funcs->mode_set_base(set->crtc, set->x, set->y, save_set.fb); if (ret != 0) { set->crtc->x = save_set.x; set->crtc->y = save_set.y; set->crtc->primary->fb = save_set.fb; goto fail; } } kfree(save_connector_encoders); kfree(save_encoder_crtcs); return 0; fail: /* Restore all previous data. */ count = 0; drm_for_each_encoder(encoder, dev) { encoder->crtc = save_encoder_crtcs[count++]; } count = 0; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) connector->encoder = save_connector_encoders[count++]; drm_connector_list_iter_end(&conn_iter); /* after fail drop reference on all unbound connectors in set, let * bound connectors keep their reference */ for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro]->encoder) continue; drm_connector_put(set->connectors[ro]); } /* Try to restore the config */ if (mode_changed && !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x, save_set.y, save_set.fb)) DRM_ERROR("failed to restore config after modeset failure\n"); kfree(save_connector_encoders); kfree(save_encoder_crtcs); return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_config); static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder == encoder) if (connector->dpms < dpms) dpms = connector->dpms; drm_connector_list_iter_end(&conn_iter); return dpms; } /* Helper which handles bridge ordering around encoder dpms */ static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) { const struct drm_encoder_helper_funcs *encoder_funcs; encoder_funcs = encoder->helper_private; if (!encoder_funcs) return; if (encoder_funcs->dpms) encoder_funcs->dpms(encoder, mode); } static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct drm_device *dev = crtc->dev; drm_connector_list_iter_begin(dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder && connector->encoder->crtc == crtc) if (connector->dpms < dpms) dpms = connector->dpms; drm_connector_list_iter_end(&conn_iter); return dpms; } /** * drm_helper_connector_dpms() - connector dpms helper implementation * @connector: affected connector * @mode: DPMS mode * * The drm_helper_connector_dpms() helper function implements the * &drm_connector_funcs.dpms callback for drivers using the legacy CRTC * helpers. * * This is the main helper function provided by the CRTC helper framework for * implementing the DPMS connector attribute. It computes the new desired DPMS * state for all encoders and CRTCs in the output mesh and calls the * &drm_crtc_helper_funcs.dpms and &drm_encoder_helper_funcs.dpms callbacks * provided by the driver. * * This function is deprecated. New drivers must implement atomic modeset * support, where DPMS is handled in the DRM core. * * Returns: * Always returns 0. */ int drm_helper_connector_dpms(struct drm_connector *connector, int mode) { struct drm_encoder *encoder = connector->encoder; struct drm_crtc *crtc = encoder ? encoder->crtc : NULL; int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF; WARN_ON(drm_drv_uses_atomic_modeset(connector->dev)); if (mode == connector->dpms) return 0; old_dpms = connector->dpms; connector->dpms = mode; if (encoder) encoder_dpms = drm_helper_choose_encoder_dpms(encoder); /* from off to on, do crtc then encoder */ if (mode < old_dpms) { if (crtc) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } if (encoder) drm_helper_encoder_dpms(encoder, encoder_dpms); } /* from on to off, do encoder then crtc */ if (mode > old_dpms) { if (encoder) drm_helper_encoder_dpms(encoder, encoder_dpms); if (crtc) { const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } return 0; } EXPORT_SYMBOL(drm_helper_connector_dpms); /** * drm_helper_resume_force_mode - force-restore mode setting configuration * @dev: drm_device which should be restored * * Drivers which use the mode setting helpers can use this function to * force-restore the mode setting configuration e.g. on resume or when something * else might have trampled over the hw state (like some overzealous old BIOSen * tended to do). * * This helper doesn't provide a error return value since restoring the old * config should never fail due to resource allocation issues since the driver * has successfully set the restored configuration already. Hence this should * boil down to the equivalent of a few dpms on calls, which also don't provide * an error code. * * Drivers where simply restoring an old configuration again might fail (e.g. * due to slight differences in allocating shared resources when the * configuration is restored in a different order than when userspace set it up) * need to use their own restore logic. * * This function is deprecated. New drivers should implement atomic mode- * setting and use the atomic suspend/resume helpers. * * See also: * drm_atomic_helper_suspend(), drm_atomic_helper_resume() */ void drm_helper_resume_force_mode(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; const struct drm_crtc_helper_funcs *crtc_funcs; int encoder_dpms; bool ret; WARN_ON(drm_drv_uses_atomic_modeset(dev)); drm_modeset_lock_all(dev); drm_for_each_crtc(crtc, dev) { if (!crtc->enabled) continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); /* Restoring the old config should never fail! */ if (ret == false) DRM_ERROR("failed to set mode on crtc %p\n", crtc); /* Turn off outputs that were already powered off */ if (drm_helper_choose_crtc_dpms(crtc)) { drm_for_each_encoder(encoder, dev) { if(encoder->crtc != crtc) continue; encoder_dpms = drm_helper_choose_encoder_dpms( encoder); drm_helper_encoder_dpms(encoder, encoder_dpms); } crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); } } /* disable the unused connectors while restoring the modesetting */ __drm_helper_disable_unused_functions(dev); drm_modeset_unlock_all(dev); } EXPORT_SYMBOL(drm_helper_resume_force_mode); /** * drm_helper_force_disable_all - Forcibly turn off all enabled CRTCs * @dev: DRM device whose CRTCs to turn off * * Drivers may want to call this on unload to ensure that all displays are * unlit and the GPU is in a consistent, low power state. Takes modeset locks. * * Note: This should only be used by non-atomic legacy drivers. For an atomic * version look at drm_atomic_helper_shutdown(). * * Returns: * Zero on success, error code on failure. */ int drm_helper_force_disable_all(struct drm_device *dev) { struct drm_crtc *crtc; int ret = 0; drm_modeset_lock_all(dev); drm_for_each_crtc(crtc, dev) if (crtc->enabled) { struct drm_mode_set set = { .crtc = crtc, }; ret = drm_mode_set_config_internal(&set); if (ret) goto out; } out: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_helper_force_disable_all); |
32 240 13 305 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/poll.h> #include <linux/ns_common.h> #include <linux/fs_pin.h> struct mnt_namespace { struct ns_common ns; struct mount * root; struct rb_root mounts; /* Protected by namespace_sem */ struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; /* Sequence number to prevent loops */ wait_queue_head_t poll; u64 event; unsigned int nr_mounts; /* # of mounts in the namespace */ unsigned int pending_mounts; } __randomize_layout; struct mnt_pcp { int mnt_count; int mnt_writers; }; struct mountpoint { struct hlist_node m_hash; struct dentry *m_dentry; struct hlist_head m_list; int m_count; }; struct mount { struct hlist_node mnt_hash; struct mount *mnt_parent; struct dentry *mnt_mountpoint; struct vfsmount mnt; union { struct rcu_head mnt_rcu; struct llist_node mnt_llist; }; #ifdef CONFIG_SMP struct mnt_pcp __percpu *mnt_pcp; #else int mnt_count; int mnt_writers; #endif struct list_head mnt_mounts; /* list of children, anchored here */ struct list_head mnt_child; /* and going through their mnt_child */ struct list_head mnt_instance; /* mount instance on sb->s_mounts */ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ union { struct rb_node mnt_node; /* Under ns->mounts */ struct list_head mnt_list; }; struct list_head mnt_expire; /* link in fs-specific expiry list */ struct list_head mnt_share; /* circular list of shared mounts */ struct list_head mnt_slave_list;/* list of slave mounts */ struct list_head mnt_slave; /* slave list entry */ struct mount *mnt_master; /* slave is on master->mnt_slave_list */ struct mnt_namespace *mnt_ns; /* containing namespace */ struct mountpoint *mnt_mp; /* where is it mounted */ union { struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */ struct hlist_node mnt_umount; }; struct list_head mnt_umounting; /* list entry for umount propagation */ #ifdef CONFIG_FSNOTIFY struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks; __u32 mnt_fsnotify_mask; #endif int mnt_id; /* mount identifier, reused */ u64 mnt_id_unique; /* mount ID unique until reboot */ int mnt_group_id; /* peer group identifier */ int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; struct hlist_head mnt_stuck_children; } __randomize_layout; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ static inline struct mount *real_mount(struct vfsmount *mnt) { return container_of(mnt, struct mount, mnt); } static inline int mnt_has_parent(struct mount *mnt) { return mnt != mnt->mnt_parent; } static inline int is_mounted(struct vfsmount *mnt) { /* neither detached nor internal? */ return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns); } extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *); extern int __legitimize_mnt(struct vfsmount *, unsigned); static inline bool __path_is_mountpoint(const struct path *path) { struct mount *m = __lookup_mnt(path->mnt, path->dentry); return m && likely(!(m->mnt.mnt_flags & MNT_SYNC_UMOUNT)); } extern void __detach_mounts(struct dentry *dentry); static inline void detach_mounts(struct dentry *dentry) { if (!d_mountpoint(dentry)) return; __detach_mounts(dentry); } static inline void get_mnt_ns(struct mnt_namespace *ns) { refcount_inc(&ns->ns.count); } extern seqlock_t mount_lock; struct proc_mounts { struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); }; extern const struct seq_operations mounts_op; extern bool __is_local_mountpoint(struct dentry *dentry); static inline bool is_local_mountpoint(struct dentry *dentry) { if (!d_mountpoint(dentry)) return false; return __is_local_mountpoint(dentry); } static inline bool is_anon_ns(struct mnt_namespace *ns) { return ns->seq == 0; } static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list) { WARN_ON(!(mnt->mnt.mnt_flags & MNT_ONRB)); mnt->mnt.mnt_flags &= ~MNT_ONRB; rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts); list_add_tail(&mnt->mnt_list, dt_list); } extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor); |
28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 | /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/nfs_fs.h * * Copyright (C) 1992 Rick Sladkey * * OS-specific nfs filesystem definitions and declarations */ #ifndef _LINUX_NFS_FS_H #define _LINUX_NFS_FS_H #include <uapi/linux/nfs_fs.h> /* * Enable dprintk() debugging support for nfs client. */ #ifdef CONFIG_NFS_DEBUG # define NFS_DEBUG #endif #include <linux/in.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/rbtree.h> #include <linux/refcount.h> #include <linux/rwsem.h> #include <linux/wait.h> #include <linux/sunrpc/debug.h> #include <linux/sunrpc/auth.h> #include <linux/sunrpc/clnt.h> #ifdef CONFIG_NFS_FSCACHE #include <linux/netfs.h> #endif #include <linux/nfs.h> #include <linux/nfs2.h> #include <linux/nfs3.h> #include <linux/nfs4.h> #include <linux/nfs_xdr.h> #include <linux/nfs_fs_sb.h> #include <linux/mempool.h> /* * These are the default for number of transports to different server IPs */ #define NFS_MAX_TRANSPORTS 16 /* * Size of the NFS directory verifier */ #define NFS_DIR_VERIFIER_SIZE 2 /* * NFSv3/v4 Access mode cache entry */ struct nfs_access_entry { struct rb_node rb_node; struct list_head lru; kuid_t fsuid; kgid_t fsgid; struct group_info *group_info; u64 timestamp; __u32 mask; struct rcu_head rcu_head; }; struct nfs_lock_context { refcount_t count; struct list_head list; struct nfs_open_context *open_context; fl_owner_t lockowner; atomic_t io_count; struct rcu_head rcu_head; }; struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; fl_owner_t flock_owner; struct dentry *dentry; const struct cred *cred; struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */ struct nfs4_state *state; fmode_t mode; unsigned long flags; #define NFS_CONTEXT_BAD (2) #define NFS_CONTEXT_UNLOCK (3) #define NFS_CONTEXT_FILE_OPEN (4) int error; struct list_head list; struct nfs4_threshold *mdsthreshold; struct rcu_head rcu_head; }; struct nfs_open_dir_context { struct list_head list; atomic_t cache_hits; atomic_t cache_misses; unsigned long attr_gencount; __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 last_cookie; pgoff_t page_index; unsigned int dtsize; bool force_clear; bool eof; struct rcu_head rcu_head; }; /* * NFSv4 delegation */ struct nfs_delegation; struct posix_acl; struct nfs4_xattr_cache; /* * nfs fs inode data in memory */ struct nfs_inode { /* * The 64bit 'inode number' */ __u64 fileid; /* * NFS file handle */ struct nfs_fh fh; /* * Various flags */ unsigned long flags; /* atomic bit ops */ unsigned long cache_validity; /* bit mask */ /* * read_cache_jiffies is when we started read-caching this inode. * attrtimeo is for how long the cached information is assumed * to be valid. A successful attribute revalidation doubles * attrtimeo (up to acregmax/acdirmax), a failure resets it to * acregmin/acdirmin. * * We need to revalidate the cached attrs for this inode if * * jiffies - read_cache_jiffies >= attrtimeo * * Please note the comparison is greater than or equal * so that zero timeout values can be specified. */ unsigned long read_cache_jiffies; unsigned long attrtimeo; unsigned long attrtimeo_timestamp; unsigned long attr_gencount; struct rb_root access_cache; struct list_head access_cache_entry_lru; struct list_head access_cache_inode_lru; union { /* Directory */ struct { /* "Generation counter" for the attribute cache. * This is bumped whenever we update the metadata * on the server. */ unsigned long cache_change_attribute; /* * This is the cookie verifier used for NFSv3 readdir * operations */ __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ struct rw_semaphore rmdir_sem; }; /* Regular file */ struct { atomic_long_t nrequests; atomic_long_t redirtied_pages; struct nfs_mds_commit_info commit_info; struct mutex commit_mutex; }; }; /* Open contexts for shared mmap writes */ struct list_head open_files; /* Keep track of out-of-order replies. * The ooo array contains start/end pairs of * numbers from the changeid sequence when * the inode's iversion has been updated. * It also contains end/start pair (i.e. reverse order) * of sections of the changeid sequence that have * been seen in replies from the server. * Normally these should match and when both * A:B and B:A are found in ooo, they are both removed. * And if a reply with A:B causes an iversion update * of A:B, then neither are added. * When a reply has pre_change that doesn't match * iversion, then the changeid pair and any consequent * change in iversion ARE added. Later replies * might fill in the gaps, or possibly a gap is caused * by a change from another client. * When a file or directory is opened, if the ooo table * is not empty, then we assume the gaps were due to * another client and we invalidate the cached data. * * We can only track a limited number of concurrent gaps. * Currently that limit is 16. * We allocate the table on demand. If there is insufficient * memory, then we probably cannot cache the file anyway * so there is no loss. */ struct { int cnt; struct { u64 start, end; } gap[16]; } *ooo; #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; /* NFSv4 state */ struct list_head open_states; struct nfs_delegation __rcu *delegation; struct rw_semaphore rwsem; /* pNFS layout information */ struct pnfs_layout_hdr *layout; #endif /* CONFIG_NFS_V4*/ /* how many bytes have been written/read and how many bytes queued up */ __u64 write_io; __u64 read_io; #ifdef CONFIG_NFS_V4_2 struct nfs4_xattr_cache *xattr_cache; #endif union { struct inode vfs_inode; #ifdef CONFIG_NFS_FSCACHE struct netfs_inode netfs; /* netfs context and VFS inode */ #endif }; }; struct nfs4_copy_state { struct list_head copies; struct list_head src_copies; nfs4_stateid stateid; struct completion completion; uint64_t count; struct nfs_writeverf verf; int error; int flags; struct nfs4_state *parent_src_state; struct nfs4_state *parent_dst_state; }; /* * Access bit flags */ #define NFS_ACCESS_READ 0x0001 #define NFS_ACCESS_LOOKUP 0x0002 #define NFS_ACCESS_MODIFY 0x0004 #define NFS_ACCESS_EXTEND 0x0008 #define NFS_ACCESS_DELETE 0x0010 #define NFS_ACCESS_EXECUTE 0x0020 #define NFS_ACCESS_XAREAD 0x0040 #define NFS_ACCESS_XAWRITE 0x0080 #define NFS_ACCESS_XALIST 0x0100 /* * Cache validity bit flags */ #define NFS_INO_INVALID_DATA BIT(1) /* cached data is invalid */ #define NFS_INO_INVALID_ATIME BIT(2) /* cached atime is invalid */ #define NFS_INO_INVALID_ACCESS BIT(3) /* cached access cred invalid */ #define NFS_INO_INVALID_ACL BIT(4) /* cached acls are invalid */ #define NFS_INO_REVAL_FORCED BIT(6) /* force revalidation ignoring a delegation */ #define NFS_INO_INVALID_LABEL BIT(7) /* cached label is invalid */ #define NFS_INO_INVALID_CHANGE BIT(8) /* cached change is invalid */ #define NFS_INO_INVALID_CTIME BIT(9) /* cached ctime is invalid */ #define NFS_INO_INVALID_MTIME BIT(10) /* cached mtime is invalid */ #define NFS_INO_INVALID_SIZE BIT(11) /* cached size is invalid */ #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ #define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ #define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ | NFS_INO_INVALID_SIZE \ | NFS_INO_INVALID_NLINK \ | NFS_INO_INVALID_MODE \ | NFS_INO_INVALID_OTHER) /* inode metadata is invalid */ /* * Bit offsets in flags field */ #define NFS_INO_STALE (1) /* possible stale inode */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ #define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { return container_of(inode, struct nfs_inode, vfs_inode); } static inline struct nfs_server *NFS_SB(const struct super_block *s) { return (struct nfs_server *)(s->s_fs_info); } static inline struct nfs_fh *NFS_FH(const struct inode *inode) { return &NFS_I(inode)->fh; } static inline struct nfs_server *NFS_SERVER(const struct inode *inode) { return NFS_SB(inode->i_sb); } static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) { return NFS_SERVER(inode)->client; } static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) { return NFS_SERVER(inode)->nfs_client->rpc_ops; } static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; } static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; } static inline int NFS_STALE(const struct inode *inode) { return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); } static inline __u64 NFS_FILEID(const struct inode *inode) { return NFS_I(inode)->fileid; } static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) { NFS_I(inode)->fileid = fileid; } static inline void nfs_mark_for_revalidate(struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL | NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | NFS_INO_INVALID_SIZE; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); } static inline int nfs_server_capable(const struct inode *inode, int cap) { return NFS_SERVER(inode)->caps & cap; } /** * nfs_save_change_attribute - Returns the inode attribute change cookie * @dir - pointer to parent directory inode * The "cache change attribute" is updated when we need to revalidate * our dentry cache after a directory was seen to change on the server. */ static inline unsigned long nfs_save_change_attribute(struct inode *dir) { return NFS_I(dir)->cache_change_attribute; } /* * linux/fs/nfs/inode.c */ extern int nfs_sync_mapping(struct address_space *mapping); extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); extern void nfs_zap_caches(struct inode *); extern void nfs_set_inode_stale(struct inode *inode); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, struct nfs_fattr *); struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct mnt_idmap *, const struct path *, struct kstat *, u32, unsigned int); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); extern int nfs_permission(struct mnt_idmap *, struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_attribute_cache_expired(struct inode *inode); extern int nfs_revalidate_inode(struct inode *inode, unsigned long flags); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_clear_invalid_mapping(struct address_space *mapping); extern bool nfs_mapping_need_revalidate_inode(struct inode *inode); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_setattr(struct mnt_idmap *, struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode); extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode, struct file *filp); extern void nfs_inode_attach_open_context(struct nfs_open_context *ctx); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern void nfs_file_clear_open_context(struct file *flip); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); extern unsigned long nfs_inc_attr_generation_counter(void); extern struct nfs_fattr *nfs_alloc_fattr(void); extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server); static inline void nfs4_label_free(struct nfs4_label *label) { #ifdef CONFIG_NFS_V4_SECURITY_LABEL if (label) { kfree(label->label); kfree(label); } #endif } static inline void nfs_free_fattr(const struct nfs_fattr *fattr) { if (fattr) nfs4_label_free(fattr->label); kfree(fattr); } extern struct nfs_fh *nfs_alloc_fhandle(void); static inline void nfs_free_fhandle(const struct nfs_fh *fh) { kfree(fh); } #ifdef NFS_DEBUG extern u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh); static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) { return _nfs_display_fhandle_hash(fh); } extern void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption); #define nfs_display_fhandle(fh, caption) \ do { \ if (unlikely(nfs_debug & NFSDBG_FACILITY)) \ _nfs_display_fhandle(fh, caption); \ } while (0) #else static inline u32 nfs_display_fhandle_hash(const struct nfs_fh *fh) { return 0; } static inline void nfs_display_fhandle(const struct nfs_fh *fh, const char *caption) { } #endif /* * linux/fs/nfs/nfsroot.c */ extern int nfs_root_data(char **root_device, char **root_data); /*__init*/ /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ extern __be32 root_nfs_parse_addr(char *name); /*__init*/ /* * linux/fs/nfs/file.c */ extern const struct file_operations nfs_file_operations; #if IS_ENABLED(CONFIG_NFS_V4) extern const struct file_operations nfs4_file_operations; #endif /* CONFIG_NFS_V4 */ extern const struct address_space_operations nfs_file_aops; extern const struct address_space_operations nfs_dir_aops; static inline struct nfs_open_context *nfs_file_open_context(struct file *filp) { return filp->private_data; } static inline const struct cred *nfs_file_cred(struct file *file) { if (file != NULL) { struct nfs_open_context *ctx = nfs_file_open_context(file); if (ctx) return ctx->cred; } return NULL; } /* * linux/fs/nfs/direct.c */ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter); ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, bool swap); ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, bool swap); /* * linux/fs/nfs/dir.c */ extern const struct file_operations nfs_dir_operations; extern const struct dentry_operations nfs_dentry_operations; extern void nfs_force_lookup_revalidate(struct inode *dir); extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf); #if IS_ENABLED(CONFIG_NFS_V4) extern void nfs_clear_verifier_delegated(struct inode *inode); #endif /* IS_ENABLED(CONFIG_NFS_V4) */ extern struct dentry *nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block); /* * linux/fs/nfs/symlink.c */ extern const struct inode_operations nfs_symlink_inode_operations; /* * linux/fs/nfs/sysctl.c */ #ifdef CONFIG_SYSCTL extern int nfs_register_sysctl(void); extern void nfs_unregister_sysctl(void); #else #define nfs_register_sysctl() 0 #define nfs_unregister_sysctl() do { } while(0) #endif /* * linux/fs/nfs/namespace.c */ extern const struct inode_operations nfs_mountpoint_inode_operations; extern const struct inode_operations nfs_referral_inode_operations; extern int nfs_mountpoint_expiry_timeout; extern void nfs_release_automount_timer(void); /* * linux/fs/nfs/unlink.c */ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *); /* * linux/fs/nfs/write.c */ extern int nfs_congestion_kb; extern int nfs_writepage(struct page *page, struct writeback_control *wbc); extern int nfs_writepages(struct address_space *, struct writeback_control *); extern int nfs_flush_incompatible(struct file *file, struct folio *folio); extern int nfs_update_folio(struct file *file, struct folio *folio, unsigned int offset, unsigned int count); /* * Try to write back everything synchronously (but check the * return value!) */ extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_folio(struct inode *inode, struct folio *folio); int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern void nfs_commit_free(struct nfs_commit_data *data); bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); static inline bool nfs_have_writebacks(const struct inode *inode) { if (S_ISREG(inode->i_mode)) return atomic_long_read(&NFS_I(inode)->nrequests) != 0; return false; } /* * linux/fs/nfs/read.c */ int nfs_read_folio(struct file *, struct folio *); void nfs_readahead(struct readahead_control *); /* * inline functions */ static inline loff_t nfs_size_to_loff_t(__u64 size) { return min_t(u64, size, OFFSET_MAX); } static inline ino_t nfs_fileid_to_ino_t(u64 fileid) { ino_t ino = (ino_t) fileid; if (sizeof(ino_t) < sizeof(u64)) ino ^= fileid >> (sizeof(u64)-sizeof(ino_t)) * 8; return ino; } static inline void nfs_ooo_clear(struct nfs_inode *nfsi) { nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER; kfree(nfsi->ooo); nfsi->ooo = NULL; } static inline bool nfs_ooo_test(struct nfs_inode *nfsi) { return (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER) || (nfsi->ooo && nfsi->ooo->cnt > 0); } #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) /* We need to block new opens while a file is being unlinked. * If it is opened *before* we decide to unlink, we will silly-rename * instead. If it is opened *after*, then we need to create or will fail. * If we allow the two to race, we could end up with a file that is open * but deleted on the server resulting in ESTALE. * So use ->d_fsdata to record when the unlink is happening * and block dentry revalidation while it is set. */ #define NFS_FSDATA_BLOCKED ((void*)1) # undef ifdebug # ifdef NFS_DEBUG # define ifdebug(fac) if (unlikely(nfs_debug & NFSDBG_##fac)) # define NFS_IFDEBUG(x) x # else # define ifdebug(fac) if (0) # define NFS_IFDEBUG(x) # endif #endif |
1 1 1 15 16 442 4 1 5 1 3 52 26 1 2 2 10 10 3 4 36 13 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 * * X86-64 port * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/ftrace.h> #include <linux/syscalls.h> #include <linux/iommu.h> #include <asm/processor.h> #include <asm/pkru.h> #include <asm/fpu/sched.h> #include <asm/mmu_context.h> #include <asm/prctl.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/ia32.h> #include <asm/debugreg.h> #include <asm/switch_to.h> #include <asm/xen/hypervisor.h> #include <asm/vdso.h> #include <asm/resctrl.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #ifdef CONFIG_IA32_EMULATION /* Not included via unistd.h */ #include <asm/unistd_32_ia32.h> #endif #include "process.h" /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, const char *log_lvl) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, es; show_iret_regs(regs, log_lvl); if (regs->orig_ax != -1) pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax); else pr_cont("\n"); printk("%sRAX: %016lx RBX: %016lx RCX: %016lx\n", log_lvl, regs->ax, regs->bx, regs->cx); printk("%sRDX: %016lx RSI: %016lx RDI: %016lx\n", log_lvl, regs->dx, regs->si, regs->di); printk("%sRBP: %016lx R08: %016lx R09: %016lx\n", log_lvl, regs->bp, regs->r8, regs->r9); printk("%sR10: %016lx R11: %016lx R12: %016lx\n", log_lvl, regs->r10, regs->r11, regs->r12); printk("%sR13: %016lx R14: %016lx R15: %016lx\n", log_lvl, regs->r13, regs->r14, regs->r15); if (mode == SHOW_REGS_SHORT) return; if (mode == SHOW_REGS_USER) { rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); printk("%sFS: %016lx GS: %016lx\n", log_lvl, fs, shadowgs); return; } asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); cr0 = read_cr0(); cr2 = read_cr2(); cr3 = __read_cr3(); cr4 = __read_cr4(); printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", log_lvl, fs, fsindex, gs, gsindex, shadowgs); printk("%sCS: %04lx DS: %04x ES: %04x CR0: %016lx\n", log_lvl, regs->cs, ds, es, cr0); printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n", log_lvl, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); /* Only print out debug registers if they are in their non-default state. */ if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) && (d6 == DR6_RESERVED) && (d7 == 0x400))) { printk("%sDR0: %016lx DR1: %016lx DR2: %016lx\n", log_lvl, d0, d1, d2); printk("%sDR3: %016lx DR6: %016lx DR7: %016lx\n", log_lvl, d3, d6, d7); } if (cpu_feature_enabled(X86_FEATURE_OSPKE)) printk("%sPKRU: %08x\n", log_lvl, read_pkru()); } void release_thread(struct task_struct *dead_task) { WARN_ON(dead_task->mm); } enum which_selector { FS, GS }; /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr unsigned long __rdgsbase_inactive(void) { unsigned long gsbase; lockdep_assert_irqs_disabled(); if (!cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); gsbase = rdgsbase(); native_swapgs(); } else { instrumentation_begin(); rdmsrl(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } return gsbase; } /* * Out of line to be protected from kprobes and tracing. If this would be * traced or probed than any access to a per CPU variable happens with * the wrong GS. * * It is not used on Xen paravirt. When paravirt support is needed, it * needs to be renamed with native_ prefix. */ static noinstr void __wrgsbase_inactive(unsigned long gsbase) { lockdep_assert_irqs_disabled(); if (!cpu_feature_enabled(X86_FEATURE_XENPV)) { native_swapgs(); wrgsbase(gsbase); native_swapgs(); } else { instrumentation_begin(); wrmsrl(MSR_KERNEL_GS_BASE, gsbase); instrumentation_end(); } } /* * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are * not available. The goal is to be reasonably fast on non-FSGSBASE systems. * It's forcibly inlined because it'll generate better code and this function * is hot. */ static __always_inline void save_base_legacy(struct task_struct *prev_p, unsigned short selector, enum which_selector which) { if (likely(selector == 0)) { /* * On Intel (without X86_BUG_NULL_SEG), the segment base could * be the pre-existing saved base or it could be zero. On AMD * (with X86_BUG_NULL_SEG), the segment base could be almost * anything. * * This branch is very hot (it's hit twice on almost every * context switch between 64-bit programs), and avoiding * the RDMSR helps a lot, so we just assume that whatever * value is already saved is correct. This matches historical * Linux behavior, so it won't break existing applications. * * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we * report that the base is zero, it needs to actually be zero: * see the corresponding logic in load_seg_legacy. */ } else { /* * If the selector is 1, 2, or 3, then the base is zero on * !X86_BUG_NULL_SEG CPUs and could be anything on * X86_BUG_NULL_SEG CPUs. In the latter case, Linux * has never attempted to preserve the base across context * switches. * * If selector > 3, then it refers to a real segment, and * saving the base isn't necessary. */ if (which == FS) prev_p->thread.fsbase = 0; else prev_p->thread.gsbase = 0; } } static __always_inline void save_fsgs(struct task_struct *task) { savesegment(fs, task->thread.fsindex); savesegment(gs, task->thread.gsindex); if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* * If FSGSBASE is enabled, we can't make any useful guesses * about the base, and user code expects us to save the current * value. Fortunately, reading the base directly is efficient. */ task->thread.fsbase = rdfsbase(); task->thread.gsbase = __rdgsbase_inactive(); } else { save_base_legacy(task, task->thread.fsindex, FS); save_base_legacy(task, task->thread.gsindex, GS); } } /* * While a process is running,current->thread.fsbase and current->thread.gsbase * may not match the corresponding CPU registers (see save_base_legacy()). */ void current_save_fsgs(void) { unsigned long flags; /* Interrupts need to be off for FSGSBASE */ local_irq_save(flags); save_fsgs(current); local_irq_restore(flags); } #if IS_ENABLED(CONFIG_KVM) EXPORT_SYMBOL_GPL(current_save_fsgs); #endif static __always_inline void loadseg(enum which_selector which, unsigned short sel) { if (which == FS) loadsegment(fs, sel); else load_gs_index(sel); } static __always_inline void load_seg_legacy(unsigned short prev_index, unsigned long prev_base, unsigned short next_index, unsigned long next_base, enum which_selector which) { if (likely(next_index <= 3)) { /* * The next task is using 64-bit TLS, is not using this * segment at all, or is having fun with arcane CPU features. */ if (next_base == 0) { /* * Nasty case: on AMD CPUs, we need to forcibly zero * the base. */ if (static_cpu_has_bug(X86_BUG_NULL_SEG)) { loadseg(which, __USER_DS); loadseg(which, next_index); } else { /* * We could try to exhaustively detect cases * under which we can skip the segment load, * but there's really only one case that matters * for performance: if both the previous and * next states are fully zeroed, we can skip * the load. * * (This assumes that prev_base == 0 has no * false positives. This is the case on * Intel-style CPUs.) */ if (likely(prev_index | next_index | prev_base)) loadseg(which, next_index); } } else { if (prev_index != next_index) loadseg(which, next_index); wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE, next_base); } } else { /* * The next task is using a real segment. Loading the selector * is sufficient. */ loadseg(which, next_index); } } /* * Store prev's PKRU value and load next's PKRU value if they differ. PKRU * is not XSTATE managed on context switch because that would require a * lookup in the task's FPU xsave buffer and require to keep that updated * in various places. */ static __always_inline void x86_pkru_load(struct thread_struct *prev, struct thread_struct *next) { if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) return; /* Stash the prev task's value: */ prev->pkru = rdpkru(); /* * PKRU writes are slightly expensive. Avoid them when not * strictly necessary: */ if (prev->pkru != next->pkru) wrpkru(next->pkru); } static __always_inline void x86_fsgsbase_load(struct thread_struct *prev, struct thread_struct *next) { if (static_cpu_has(X86_FEATURE_FSGSBASE)) { /* Update the FS and GS selectors if they could have changed. */ if (unlikely(prev->fsindex || next->fsindex)) loadseg(FS, next->fsindex); if (unlikely(prev->gsindex || next->gsindex)) loadseg(GS, next->gsindex); /* Update the bases. */ wrfsbase(next->fsbase); __wrgsbase_inactive(next->gsbase); } else { load_seg_legacy(prev->fsindex, prev->fsbase, next->fsindex, next->fsbase, FS); load_seg_legacy(prev->gsindex, prev->gsbase, next->gsindex, next->gsbase, GS); } } unsigned long x86_fsgsbase_read_task(struct task_struct *task, unsigned short selector) { unsigned short idx = selector >> 3; unsigned long base; if (likely((selector & SEGMENT_TI_MASK) == 0)) { if (unlikely(idx >= GDT_ENTRIES)) return 0; /* * There are no user segments in the GDT with nonzero bases * other than the TLS segments. */ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) return 0; idx -= GDT_ENTRY_TLS_MIN; base = get_desc_base(&task->thread.tls_array[idx]); } else { #ifdef CONFIG_MODIFY_LDT_SYSCALL struct ldt_struct *ldt; /* * If performance here mattered, we could protect the LDT * with RCU. This is a slow path, though, so we can just * take the mutex. */ mutex_lock(&task->mm->context.lock); ldt = task->mm->context.ldt; if (unlikely(!ldt || idx >= ldt->nr_entries)) base = 0; else base = get_desc_base(ldt->entries + idx); mutex_unlock(&task->mm->context.lock); #else base = 0; #endif } return base; } unsigned long x86_gsbase_read_cpu_inactive(void) { unsigned long gsbase; if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); gsbase = __rdgsbase_inactive(); local_irq_restore(flags); } else { rdmsrl(MSR_KERNEL_GS_BASE, gsbase); } return gsbase; } void x86_gsbase_write_cpu_inactive(unsigned long gsbase) { if (boot_cpu_has(X86_FEATURE_FSGSBASE)) { unsigned long flags; local_irq_save(flags); __wrgsbase_inactive(gsbase); local_irq_restore(flags); } else { wrmsrl(MSR_KERNEL_GS_BASE, gsbase); } } unsigned long x86_fsbase_read_task(struct task_struct *task) { unsigned long fsbase; if (task == current) fsbase = x86_fsbase_read_cpu(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.fsindex == 0)) fsbase = task->thread.fsbase; else fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex); return fsbase; } unsigned long x86_gsbase_read_task(struct task_struct *task) { unsigned long gsbase; if (task == current) gsbase = x86_gsbase_read_cpu_inactive(); else if (boot_cpu_has(X86_FEATURE_FSGSBASE) || (task->thread.gsindex == 0)) gsbase = task->thread.gsbase; else gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex); return gsbase; } void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase) { WARN_ON_ONCE(task == current); task->thread.fsbase = fsbase; } void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase) { WARN_ON_ONCE(task == current); task->thread.gsbase = gsbase; } static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, unsigned int _cs, unsigned int _ss, unsigned int _ds) { WARN_ON_ONCE(regs != current_pt_regs()); if (static_cpu_has(X86_BUG_NULL_SEG)) { /* Loading zero below won't clear the base. */ loadsegment(fs, __USER_DS); load_gs_index(__USER_DS); } reset_thread_features(); loadsegment(fs, 0); loadsegment(es, _ds); loadsegment(ds, _ds); load_gs_index(0); regs->ip = new_ip; regs->sp = new_sp; regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } EXPORT_SYMBOL_GPL(start_thread); #ifdef CONFIG_COMPAT void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32) { start_thread_common(regs, new_ip, new_sp, x32 ? __USER_CS : __USER32_CS, __USER_DS, __USER_DS); } #endif /* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. * Function graph tracer not supported too. */ __no_kmsan_checks __visible __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; struct fpu *prev_fpu = &prev->fpu; int cpu = smp_processor_id(); WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && this_cpu_read(pcpu_hot.hardirq_stack_inuse)); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) switch_fpu_prepare(prev_fpu, cpu); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * * (e.g. xen_load_tls()) */ save_fsgs(prev_p); /* * Load TLS before restoring any segments so that segment loads * reference the correct GDT entries. */ load_TLS(next, cpu); /* * Leave lazy mode, flushing any hypercalls made here. This * must be done after loading TLS entries in the GDT but before * loading segments that might reference them. */ arch_end_context_switch(next_p); /* Switch DS and ES. * * Reading them only returns the selectors, but writing them (if * nonzero) loads the full descriptor from the GDT or LDT. The * LDT for next is loaded in switch_mm, and the GDT is loaded * above. * * We therefore need to write new values to the segment * registers on every context switch unless both the new and old * values are zero. * * Note that we don't need to do anything for CS and SS, as * those are saved and restored as part of pt_regs. */ savesegment(es, prev->es); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); savesegment(ds, prev->ds); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); x86_fsgsbase_load(prev, next); x86_pkru_load(prev, next); /* * Switch the PDA and FPU contexts. */ raw_cpu_write(pcpu_hot.current_task, next_p); raw_cpu_write(pcpu_hot.top_of_stack, task_top_of_stack(next_p)); switch_fpu_finish(); /* Reload sp0. */ update_task_stack(next_p); switch_to_extra(prev_p, next_p); if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but * does not update the cached descriptor. As a result, if we * do SYSRET while SS is NULL, we'll end up in user mode with * SS apparently equal to __USER_DS but actually unusable. * * The straightforward workaround would be to fix it up just * before SYSRET, but that would slow down the system call * fast paths. Instead, we ensure that SS is never NULL in * system call context. We do this by replacing NULL SS * selectors at every context switch. SYSCALL sets up a valid * SS, so the only way to get NULL is to re-enter the kernel * from CPL 3 through an interrupt. Since that can't happen * in the same task as a running syscall, we are guaranteed to * context switch between every interrupt vector entry and a * subsequent SYSRET. * * We read SS first because SS reads are much faster than * writes. Out of caution, we force SS to __KERNEL_DS even if * it previously had a different non-NULL value. */ unsigned short ss_sel; savesegment(ss, ss_sel); if (ss_sel != __KERNEL_DS) loadsegment(ss, __KERNEL_DS); } /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); return prev_p; } void set_personality_64bit(void) { /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_ADDR32); /* Pretend that this comes from a 64bit execve */ task_pt_regs(current)->orig_ax = __NR_execve; current_thread_info()->status &= ~TS_COMPAT; if (current->mm) __set_bit(MM_CONTEXT_HAS_VSYSCALL, ¤t->mm->context.flags); /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that 32bit children are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } static void __set_personality_x32(void) { #ifdef CONFIG_X86_X32_ABI if (current->mm) current->mm->context.flags = 0; current->personality &= ~READ_IMPLIES_EXEC; /* * in_32bit_syscall() uses the presence of the x32 syscall bit * flag to determine compat status. The x86 mmap() code relies on * the syscall bitness so set x32 syscall bit right here to make * in_32bit_syscall() work during exec(). * * Pretend to come from a x32 execve. */ task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT; current_thread_info()->status &= ~TS_COMPAT; #endif } static void __set_personality_ia32(void) { #ifdef CONFIG_IA32_EMULATION if (current->mm) { /* * uprobes applied to this MM need to know this and * cannot use user_64bit_mode() at that time. */ __set_bit(MM_CONTEXT_UPROBE_IA32, ¤t->mm->context.flags); } current->personality |= force_personality32; /* Prepare the first "return" to user space */ task_pt_regs(current)->orig_ax = __NR_ia32_execve; current_thread_info()->status |= TS_COMPAT; #endif } void set_personality_ia32(bool x32) { /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); if (x32) __set_personality_x32(); else __set_personality_ia32(); } EXPORT_SYMBOL_GPL(set_personality_ia32); #ifdef CONFIG_CHECKPOINT_RESTORE static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr) { int ret; ret = map_vdso_once(image, addr); if (ret) return ret; return (long)image->size; } #endif #ifdef CONFIG_ADDRESS_MASKING #define LAM_U57_BITS 6 static int prctl_enable_tagged_addr(struct mm_struct *mm, unsigned long nr_bits) { if (!cpu_feature_enabled(X86_FEATURE_LAM)) return -ENODEV; /* PTRACE_ARCH_PRCTL */ if (current->mm != mm) return -EINVAL; if (mm_valid_pasid(mm) && !test_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &mm->context.flags)) return -EINVAL; if (mmap_write_lock_killable(mm)) return -EINTR; if (test_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags)) { mmap_write_unlock(mm); return -EBUSY; } if (!nr_bits) { mmap_write_unlock(mm); return -EINVAL; } else if (nr_bits <= LAM_U57_BITS) { mm->context.lam_cr3_mask = X86_CR3_LAM_U57; mm->context.untag_mask = ~GENMASK(62, 57); } else { mmap_write_unlock(mm); return -EINVAL; } write_cr3(__read_cr3() | mm->context.lam_cr3_mask); set_tlbstate_lam_mode(mm); set_bit(MM_CONTEXT_LOCK_LAM, &mm->context.flags); mmap_write_unlock(mm); return 0; } #endif long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2) { int ret = 0; switch (option) { case ARCH_SET_GS: { if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * ARCH_SET_GS has always overwritten the index * and the base. Zero is the most sensible value * to put in the index, and is the only value that * makes any sense if FSGSBASE is unavailable. */ if (task == current) { loadseg(GS, 0); x86_gsbase_write_cpu_inactive(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.gsbase. */ task->thread.gsbase = arg2; } else { task->thread.gsindex = 0; x86_gsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_SET_FS: { /* * Not strictly needed for %fs, but do it for symmetry * with %gs */ if (unlikely(arg2 >= TASK_SIZE_MAX)) return -EPERM; preempt_disable(); /* * Set the selector to 0 for the same reason * as %gs above. */ if (task == current) { loadseg(FS, 0); x86_fsbase_write_cpu(arg2); /* * On non-FSGSBASE systems, save_base_legacy() expects * that we also fill in thread.fsbase. */ task->thread.fsbase = arg2; } else { task->thread.fsindex = 0; x86_fsbase_write_task(task, arg2); } preempt_enable(); break; } case ARCH_GET_FS: { unsigned long base = x86_fsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } case ARCH_GET_GS: { unsigned long base = x86_gsbase_read_task(task); ret = put_user(base, (unsigned long __user *)arg2); break; } #ifdef CONFIG_CHECKPOINT_RESTORE # ifdef CONFIG_X86_X32_ABI case ARCH_MAP_VDSO_X32: return prctl_map_vdso(&vdso_image_x32, arg2); # endif # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case ARCH_MAP_VDSO_32: return prctl_map_vdso(&vdso_image_32, arg2); # endif case ARCH_MAP_VDSO_64: return prctl_map_vdso(&vdso_image_64, arg2); #endif #ifdef CONFIG_ADDRESS_MASKING case ARCH_GET_UNTAG_MASK: return put_user(task->mm->context.untag_mask, (unsigned long __user *)arg2); case ARCH_ENABLE_TAGGED_ADDR: return prctl_enable_tagged_addr(task->mm, arg2); case ARCH_FORCE_TAGGED_SVA: if (current != task) return -EINVAL; set_bit(MM_CONTEXT_FORCE_TAGGED_SVA, &task->mm->context.flags); return 0; case ARCH_GET_MAX_TAG_BITS: if (!cpu_feature_enabled(X86_FEATURE_LAM)) return put_user(0, (unsigned long __user *)arg2); else return put_user(LAM_U57_BITS, (unsigned long __user *)arg2); #endif case ARCH_SHSTK_ENABLE: case ARCH_SHSTK_DISABLE: case ARCH_SHSTK_LOCK: case ARCH_SHSTK_UNLOCK: case ARCH_SHSTK_STATUS: return shstk_prctl(task, option, arg2); default: ret = -EINVAL; break; } return ret; } SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { long ret; ret = do_arch_prctl_64(current, option, arg2); if (ret == -EINVAL) ret = do_arch_prctl_common(option, arg2); return ret; } #ifdef CONFIG_IA32_EMULATION COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2) { return do_arch_prctl_common(option, arg2); } #endif unsigned long KSTK_ESP(struct task_struct *task) { return task_pt_regs(task)->sp; } |
2 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <net/netrom.h> /* * This routine purges all of the queues of frames. */ void nr_clear_queues(struct sock *sk) { struct nr_sock *nr = nr_sk(sk); skb_queue_purge(&sk->sk_write_queue); skb_queue_purge(&nr->ack_queue); skb_queue_purge(&nr->reseq_queue); skb_queue_purge(&nr->frag_queue); } /* * This routine purges the input queue of those frames that have been * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the * SDL diagram. */ void nr_frames_acked(struct sock *sk, unsigned short nr) { struct nr_sock *nrom = nr_sk(sk); struct sk_buff *skb; /* * Remove all the ack-ed frames from the ack queue. */ if (nrom->va != nr) { while (skb_peek(&nrom->ack_queue) != NULL && nrom->va != nr) { skb = skb_dequeue(&nrom->ack_queue); kfree_skb(skb); nrom->va = (nrom->va + 1) % NR_MODULUS; } } } /* * Requeue all the un-ack-ed frames on the output queue to be picked * up by nr_kick called from the timer. This arrangement handles the * possibility of an empty output queue. */ void nr_requeue_frames(struct sock *sk) { struct sk_buff *skb, *skb_prev = NULL; while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) { if (skb_prev == NULL) skb_queue_head(&sk->sk_write_queue, skb); else skb_append(skb_prev, skb, &sk->sk_write_queue); skb_prev = skb; } } /* * Validate that the value of nr is between va and vs. Return true or * false for testing. */ int nr_validate_nr(struct sock *sk, unsigned short nr) { struct nr_sock *nrom = nr_sk(sk); unsigned short vc = nrom->va; while (vc != nrom->vs) { if (nr == vc) return 1; vc = (vc + 1) % NR_MODULUS; } return nr == nrom->vs; } /* * Check that ns is within the receive window. */ int nr_in_rx_window(struct sock *sk, unsigned short ns) { struct nr_sock *nr = nr_sk(sk); unsigned short vc = nr->vr; unsigned short vt = (nr->vl + nr->window) % NR_MODULUS; while (vc != vt) { if (ns == vc) return 1; vc = (vc + 1) % NR_MODULUS; } return 0; } /* * This routine is called when the HDLC layer internally generates a * control frame. */ void nr_write_internal(struct sock *sk, int frametype) { struct nr_sock *nr = nr_sk(sk); struct sk_buff *skb; unsigned char *dptr; int len, timeout; len = NR_TRANSPORT_LEN; switch (frametype & 0x0F) { case NR_CONNREQ: len += 17; break; case NR_CONNACK: len += (nr->bpqext) ? 2 : 1; break; case NR_DISCREQ: case NR_DISCACK: case NR_INFOACK: break; default: printk(KERN_ERR "NET/ROM: nr_write_internal - invalid frame type %d\n", frametype); return; } skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC); if (!skb) return; /* * Space for AX.25 and NET/ROM network header */ skb_reserve(skb, NR_NETWORK_LEN); dptr = skb_put(skb, len); switch (frametype & 0x0F) { case NR_CONNREQ: timeout = nr->t1 / HZ; *dptr++ = nr->my_index; *dptr++ = nr->my_id; *dptr++ = 0; *dptr++ = 0; *dptr++ = frametype; *dptr++ = nr->window; memcpy(dptr, &nr->user_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; *dptr++ = timeout % 256; *dptr++ = timeout / 256; break; case NR_CONNACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = nr->my_index; *dptr++ = nr->my_id; *dptr++ = frametype; *dptr++ = nr->window; if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser; break; case NR_DISCREQ: case NR_DISCACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = 0; *dptr++ = 0; *dptr++ = frametype; break; case NR_INFOACK: *dptr++ = nr->your_index; *dptr++ = nr->your_id; *dptr++ = 0; *dptr++ = nr->vr; *dptr++ = frametype; break; } nr_transmit_buffer(sk, skb); } /* * This routine is called to send an error reply. */ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags) { struct sk_buff *skbn; unsigned char *dptr; int len; len = NR_NETWORK_LEN + NR_TRANSPORT_LEN + 1; if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL) return; skb_reserve(skbn, 0); dptr = skb_put(skbn, NR_NETWORK_LEN + NR_TRANSPORT_LEN); skb_copy_from_linear_data_offset(skb, 7, dptr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] &= ~AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; skb_copy_from_linear_data(skb, dptr, AX25_ADDR_LEN); dptr[6] &= ~AX25_CBIT; dptr[6] |= AX25_EBIT; dptr[6] |= AX25_SSSID_SPARE; dptr += AX25_ADDR_LEN; *dptr++ = sysctl_netrom_network_ttl_initialiser; if (mine) { *dptr++ = 0; *dptr++ = 0; *dptr++ = skb->data[15]; *dptr++ = skb->data[16]; } else { *dptr++ = skb->data[15]; *dptr++ = skb->data[16]; *dptr++ = 0; *dptr++ = 0; } *dptr++ = cmdflags; *dptr++ = 0; if (!nr_route_frame(skbn, NULL)) kfree_skb(skbn); } void nr_disconnect(struct sock *sk, int reason) { nr_stop_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_clear_queues(sk); nr_sk(sk)->state = NR_STATE_0; sk->sk_state = TCP_CLOSE; sk->sk_err = reason; sk->sk_shutdown |= SEND_SHUTDOWN; if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } } |
7 3 3 2 8 2 1 4 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | // SPDX-License-Identifier: GPL-2.0-or-later /* ----------------------------------------------------------------------- * * * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved * Copyright 2009 Intel Corporation; author: H. Peter Anvin * * ----------------------------------------------------------------------- */ /* * x86 MSR access device * * This device is accessed by lseek() to the appropriate register number * and then read/write in chunks of 8 bytes. A larger size means multiple * reads or writes of the same register. * * This driver uses /dev/cpu/%d/msr where %d is the minor number, and on * an SMP box will direct the access to CPU %d. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/uaccess.h> #include <linux/gfp.h> #include <linux/security.h> #include <asm/cpufeature.h> #include <asm/msr.h> static enum cpuhp_state cpuhp_msr_state; enum allow_write_msrs { MSR_WRITES_ON, MSR_WRITES_OFF, MSR_WRITES_DEFAULT, }; static enum allow_write_msrs allow_writes = MSR_WRITES_DEFAULT; static ssize_t msr_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u32 __user *tmp = (u32 __user *) buf; u32 data[2]; u32 reg = *ppos; int cpu = iminor(file_inode(file)); int err = 0; ssize_t bytes = 0; if (count % 8) return -EINVAL; /* Invalid chunk size */ for (; count; count -= 8) { err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); if (err) break; if (copy_to_user(tmp, &data, 8)) { err = -EFAULT; break; } tmp += 2; bytes += 8; } return bytes ? bytes : err; } static int filter_write(u32 reg) { /* * MSRs writes usually happen all at once, and can easily saturate kmsg. * Only allow one message every 30 seconds. * * It's possible to be smarter here and do it (for example) per-MSR, but * it would certainly be more complex, and this is enough at least to * avoid saturating the ring buffer. */ static DEFINE_RATELIMIT_STATE(fw_rs, 30 * HZ, 1); switch (allow_writes) { case MSR_WRITES_ON: return 0; case MSR_WRITES_OFF: return -EPERM; default: break; } if (!__ratelimit(&fw_rs)) return 0; pr_warn("Write to unrecognized MSR 0x%x by %s (pid: %d).\n", reg, current->comm, current->pid); pr_warn("See https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/about for details.\n"); return 0; } static ssize_t msr_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { const u32 __user *tmp = (const u32 __user *)buf; u32 data[2]; u32 reg = *ppos; int cpu = iminor(file_inode(file)); int err = 0; ssize_t bytes = 0; err = security_locked_down(LOCKDOWN_MSR); if (err) return err; err = filter_write(reg); if (err) return err; if (count % 8) return -EINVAL; /* Invalid chunk size */ for (; count; count -= 8) { if (copy_from_user(&data, tmp, 8)) { err = -EFAULT; break; } add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); if (err) break; tmp += 2; bytes += 8; } return bytes ? bytes : err; } static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) { u32 __user *uregs = (u32 __user *)arg; u32 regs[8]; int cpu = iminor(file_inode(file)); int err; switch (ioc) { case X86_IOC_RDMSR_REGS: if (!(file->f_mode & FMODE_READ)) { err = -EBADF; break; } if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = rdmsr_safe_regs_on_cpu(cpu, regs); if (err) break; if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; case X86_IOC_WRMSR_REGS: if (!(file->f_mode & FMODE_WRITE)) { err = -EBADF; break; } if (copy_from_user(®s, uregs, sizeof(regs))) { err = -EFAULT; break; } err = security_locked_down(LOCKDOWN_MSR); if (err) break; err = filter_write(regs[1]); if (err) return err; add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); err = wrmsr_safe_regs_on_cpu(cpu, regs); if (err) break; if (copy_to_user(uregs, ®s, sizeof(regs))) err = -EFAULT; break; default: err = -ENOTTY; break; } return err; } static int msr_open(struct inode *inode, struct file *file) { unsigned int cpu = iminor(file_inode(file)); struct cpuinfo_x86 *c; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; /* No such CPU */ c = &cpu_data(cpu); if (!cpu_has(c, X86_FEATURE_MSR)) return -EIO; /* MSR not supported */ return 0; } /* * File operations we support */ static const struct file_operations msr_fops = { .owner = THIS_MODULE, .llseek = no_seek_end_llseek, .read = msr_read, .write = msr_write, .open = msr_open, .unlocked_ioctl = msr_ioctl, .compat_ioctl = msr_ioctl, }; static char *msr_devnode(const struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt)); } static const struct class msr_class = { .name = "msr", .devnode = msr_devnode, }; static int msr_device_create(unsigned int cpu) { struct device *dev; dev = device_create(&msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL, "msr%d", cpu); return PTR_ERR_OR_ZERO(dev); } static int msr_device_destroy(unsigned int cpu) { device_destroy(&msr_class, MKDEV(MSR_MAJOR, cpu)); return 0; } static int __init msr_init(void) { int err; if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { pr_err("unable to get major %d for msr\n", MSR_MAJOR); return -EBUSY; } err = class_register(&msr_class); if (err) goto out_chrdev; err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/msr:online", msr_device_create, msr_device_destroy); if (err < 0) goto out_class; cpuhp_msr_state = err; return 0; out_class: class_unregister(&msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); return err; } module_init(msr_init); static void __exit msr_exit(void) { cpuhp_remove_state(cpuhp_msr_state); class_unregister(&msr_class); __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); } module_exit(msr_exit) static int set_allow_writes(const char *val, const struct kernel_param *cp) { /* val is NUL-terminated, see kernfs_fop_write() */ char *s = strstrip((char *)val); if (!strcmp(s, "on")) allow_writes = MSR_WRITES_ON; else if (!strcmp(s, "off")) allow_writes = MSR_WRITES_OFF; else allow_writes = MSR_WRITES_DEFAULT; return 0; } static int get_allow_writes(char *buf, const struct kernel_param *kp) { const char *res; switch (allow_writes) { case MSR_WRITES_ON: res = "on"; break; case MSR_WRITES_OFF: res = "off"; break; default: res = "default"; break; } return sprintf(buf, "%s\n", res); } static const struct kernel_param_ops allow_writes_ops = { .set = set_allow_writes, .get = get_allow_writes }; module_param_cb(allow_writes, &allow_writes_ops, NULL, 0600); MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>"); MODULE_DESCRIPTION("x86 generic MSR driver"); MODULE_LICENSE("GPL"); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 | // SPDX-License-Identifier: GPL-2.0 #include <linux/err.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "messages.h" #include "ctree.h" #include "volumes.h" #include "extent_map.h" #include "compression.h" #include "btrfs_inode.h" static struct kmem_cache *extent_map_cache; int __init extent_map_init(void) { extent_map_cache = kmem_cache_create("btrfs_extent_map", sizeof(struct extent_map), 0, SLAB_MEM_SPREAD, NULL); if (!extent_map_cache) return -ENOMEM; return 0; } void __cold extent_map_exit(void) { kmem_cache_destroy(extent_map_cache); } /* * Initialize the extent tree @tree. Should be called for each new inode or * other user of the extent_map interface. */ void extent_map_tree_init(struct extent_map_tree *tree) { tree->map = RB_ROOT_CACHED; INIT_LIST_HEAD(&tree->modified_extents); rwlock_init(&tree->lock); } /* * Allocate a new extent_map structure. The new structure is returned with a * reference count of one and needs to be freed using free_extent_map() */ struct extent_map *alloc_extent_map(void) { struct extent_map *em; em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; RB_CLEAR_NODE(&em->rb_node); refcount_set(&em->refs, 1); INIT_LIST_HEAD(&em->list); return em; } /* * Drop the reference out on @em by one and free the structure if the reference * count hits zero. */ void free_extent_map(struct extent_map *em) { if (!em) return; if (refcount_dec_and_test(&em->refs)) { WARN_ON(extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); kmem_cache_free(extent_map_cache, em); } } /* Do the math around the end of an extent, handling wrapping. */ static u64 range_end(u64 start, u64 len) { if (start + len < start) return (u64)-1; return start + len; } static int tree_insert(struct rb_root_cached *root, struct extent_map *em) { struct rb_node **p = &root->rb_root.rb_node; struct rb_node *parent = NULL; struct extent_map *entry = NULL; struct rb_node *orig_parent = NULL; u64 end = range_end(em->start, em->len); bool leftmost = true; while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); if (em->start < entry->start) { p = &(*p)->rb_left; } else if (em->start >= extent_map_end(entry)) { p = &(*p)->rb_right; leftmost = false; } else { return -EEXIST; } } orig_parent = parent; while (parent && em->start >= extent_map_end(entry)) { parent = rb_next(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; parent = orig_parent; entry = rb_entry(parent, struct extent_map, rb_node); while (parent && em->start < entry->start) { parent = rb_prev(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; rb_link_node(&em->rb_node, orig_parent, p); rb_insert_color_cached(&em->rb_node, root, leftmost); return 0; } /* * Search through the tree for an extent_map with a given offset. If it can't * be found, try to find some neighboring extents */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_or_next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; ASSERT(prev_or_next_ret); while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; if (offset < entry->start) n = n->rb_left; else if (offset >= extent_map_end(entry)) n = n->rb_right; else return n; } orig_prev = prev; while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } /* * Previous extent map found, return as in this case the caller does not * care about the next one. */ if (prev) { *prev_or_next_ret = prev; return NULL; } prev = orig_prev; prev_entry = rb_entry(prev, struct extent_map, rb_node); while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *prev_or_next_ret = prev; return NULL; } static inline u64 extent_map_block_end(const struct extent_map *em) { if (em->block_start + em->block_len < em->block_start) return (u64)-1; return em->block_start + em->block_len; } static bool can_merge_extent_map(const struct extent_map *em) { if (em->flags & EXTENT_FLAG_PINNED) return false; /* Don't merge compressed extents, we need to know their actual size. */ if (extent_map_is_compressed(em)) return false; if (em->flags & EXTENT_FLAG_LOGGING) return false; /* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad. */ if (!list_empty(&em->list)) return false; return true; } /* Check to see if two extent_map structs are adjacent and safe to merge. */ static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next) { if (extent_map_end(prev) != next->start) return false; if (prev->flags != next->flags) return false; if (next->block_start < EXTENT_MAP_LAST_BYTE - 1) return next->block_start == extent_map_block_end(prev); /* HOLES and INLINE extents. */ return next->block_start == prev->block_start; } static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) { struct extent_map *merge = NULL; struct rb_node *rb; /* * We can't modify an extent map that is in the tree and that is being * used by another task, as it can cause that other task to see it in * inconsistent state during the merging. We always have 1 reference for * the tree and 1 for this task (which is unpinning the extent map or * clearing the logging flag), so anything > 2 means it's being used by * other tasks too. */ if (refcount_read(&em->refs) > 2) return; if (!can_merge_extent_map(em)) return; if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { em->start = merge->start; em->orig_start = merge->orig_start; em->len += merge->len; em->block_len += merge->block_len; em->block_start = merge->block_start; em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; em->mod_start = merge->mod_start; em->generation = max(em->generation, merge->generation); em->flags |= EXTENT_FLAG_MERGED; rb_erase_cached(&merge->rb_node, &tree->map); RB_CLEAR_NODE(&merge->rb_node); free_extent_map(merge); } } rb = rb_next(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { em->len += merge->len; em->block_len += merge->block_len; rb_erase_cached(&merge->rb_node, &tree->map); RB_CLEAR_NODE(&merge->rb_node); em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; em->generation = max(em->generation, merge->generation); em->flags |= EXTENT_FLAG_MERGED; free_extent_map(merge); } } /* * Unpin an extent from the cache. * * @inode: the inode from which we are unpinning an extent range * @start: logical offset in the file * @len: length of the extent * @gen: generation that this extent has been modified in * * Called after an extent has been written to disk properly. Set the generation * to the generation that actually added the file item to the inode so we know * we need to sync this extent when we call fsync(). */ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *tree = &inode->extent_tree; int ret = 0; struct extent_map *em; bool prealloc = false; write_lock(&tree->lock); em = lookup_extent_mapping(tree, start, len); if (WARN_ON(!em)) { btrfs_warn(fs_info, "no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu", btrfs_ino(inode), btrfs_root_id(inode->root), start, len, gen); goto out; } if (WARN_ON(em->start != start)) btrfs_warn(fs_info, "found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu", btrfs_ino(inode), btrfs_root_id(inode->root), em->start, start, len, gen); em->generation = gen; em->flags &= ~EXTENT_FLAG_PINNED; em->mod_start = em->start; em->mod_len = em->len; if (em->flags & EXTENT_FLAG_FILLING) { prealloc = true; em->flags &= ~EXTENT_FLAG_FILLING; } try_merge_map(tree, em); if (prealloc) { em->mod_start = em->start; em->mod_len = em->len; } free_extent_map(em); out: write_unlock(&tree->lock); return ret; } void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) { lockdep_assert_held_write(&tree->lock); em->flags &= ~EXTENT_FLAG_LOGGING; if (extent_map_in_tree(em)) try_merge_map(tree, em); } static inline void setup_extent_mapping(struct extent_map_tree *tree, struct extent_map *em, int modified) { refcount_inc(&em->refs); em->mod_start = em->start; em->mod_len = em->len; ASSERT(list_empty(&em->list)); if (modified) list_add(&em->list, &tree->modified_extents); else try_merge_map(tree, em); } /* * Add new extent map to the extent tree * * @tree: tree to insert new map in * @em: map to insert * @modified: indicate whether the given @em should be added to the * modified list, which indicates the extent needs to be logged * * Insert @em into @tree or perform a simple forward/backward merge with * existing mappings. The extent_map struct passed in will be inserted * into the tree directly, with an additional reference taken, or a * reference dropped if the merge attempt was successful. */ static int add_extent_mapping(struct extent_map_tree *tree, struct extent_map *em, int modified) { int ret = 0; lockdep_assert_held_write(&tree->lock); ret = tree_insert(&tree->map, em); if (ret) goto out; setup_extent_mapping(tree, em, modified); out: return ret; } static struct extent_map * __lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len, int strict) { struct extent_map *em; struct rb_node *rb_node; struct rb_node *prev_or_next = NULL; u64 end = range_end(start, len); rb_node = __tree_search(&tree->map.rb_root, start, &prev_or_next); if (!rb_node) { if (prev_or_next) rb_node = prev_or_next; else return NULL; } em = rb_entry(rb_node, struct extent_map, rb_node); if (strict && !(end > em->start && start < extent_map_end(em))) return NULL; refcount_inc(&em->refs); return em; } /* * Lookup extent_map that intersects @start + @len range. * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. There may be additional objects in the tree that * intersect, so check the object returned carefully to make sure that no * additional lookups are needed. */ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 1); } /* * Find a nearby extent map intersecting @start + @len (not an exact search). * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. * * If one can't be found, any nearby extent may be returned */ struct extent_map *search_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 0); } /* * Remove an extent_map from the extent tree. * * @tree: extent tree to remove from * @em: extent map being removed * * Remove @em from @tree. No reference counts are dropped, and no checks * are done to see if the range is in use. */ void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) { lockdep_assert_held_write(&tree->lock); WARN_ON(em->flags & EXTENT_FLAG_PINNED); rb_erase_cached(&em->rb_node, &tree->map); if (!(em->flags & EXTENT_FLAG_LOGGING)) list_del_init(&em->list); RB_CLEAR_NODE(&em->rb_node); } static void replace_extent_mapping(struct extent_map_tree *tree, struct extent_map *cur, struct extent_map *new, int modified) { lockdep_assert_held_write(&tree->lock); WARN_ON(cur->flags & EXTENT_FLAG_PINNED); ASSERT(extent_map_in_tree(cur)); if (!(cur->flags & EXTENT_FLAG_LOGGING)) list_del_init(&cur->list); rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map); RB_CLEAR_NODE(&cur->rb_node); setup_extent_mapping(tree, new, modified); } static struct extent_map *next_extent_map(const struct extent_map *em) { struct rb_node *next; next = rb_next(&em->rb_node); if (!next) return NULL; return container_of(next, struct extent_map, rb_node); } static struct extent_map *prev_extent_map(struct extent_map *em) { struct rb_node *prev; prev = rb_prev(&em->rb_node); if (!prev) return NULL; return container_of(prev, struct extent_map, rb_node); } /* * Helper for btrfs_get_extent. Given an existing extent in the tree, * the existing extent is the nearest extent to map_start, * and an extent that you want to insert, deal with overlap and insert * the best fitted new extent into the tree. */ static noinline int merge_extent_mapping(struct extent_map_tree *em_tree, struct extent_map *existing, struct extent_map *em, u64 map_start) { struct extent_map *prev; struct extent_map *next; u64 start; u64 end; u64 start_diff; BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); if (existing->start > map_start) { next = existing; prev = prev_extent_map(next); } else { prev = existing; next = next_extent_map(prev); } start = prev ? extent_map_end(prev) : em->start; start = max_t(u64, start, em->start); end = next ? next->start : extent_map_end(em); end = min_t(u64, end, extent_map_end(em)); start_diff = start - em->start; em->start = start; em->len = end - start; if (em->block_start < EXTENT_MAP_LAST_BYTE && !extent_map_is_compressed(em)) { em->block_start += start_diff; em->block_len = em->len; } return add_extent_mapping(em_tree, em, 0); } /* * Add extent mapping into em_tree. * * @fs_info: the filesystem * @em_tree: extent tree into which we want to insert the extent mapping * @em_in: extent we are inserting * @start: start of the logical range btrfs_get_extent() is requesting * @len: length of the logical range btrfs_get_extent() is requesting * * Note that @em_in's range may be different from [start, start+len), * but they must be overlapped. * * Insert @em_in into @em_tree. In case there is an overlapping range, handle * the -EEXIST by either: * a) Returning the existing extent in @em_in if @start is within the * existing em. * b) Merge the existing extent with @em_in passed in. * * Return 0 on success, otherwise -EEXIST. * */ int btrfs_add_extent_mapping(struct btrfs_fs_info *fs_info, struct extent_map_tree *em_tree, struct extent_map **em_in, u64 start, u64 len) { int ret; struct extent_map *em = *em_in; /* * Tree-checker should have rejected any inline extent with non-zero * file offset. Here just do a sanity check. */ if (em->block_start == EXTENT_MAP_INLINE) ASSERT(em->start == 0); ret = add_extent_mapping(em_tree, em, 0); /* it is possible that someone inserted the extent into the tree * while we had the lock dropped. It is also possible that * an overlapping map exists in the tree */ if (ret == -EEXIST) { struct extent_map *existing; existing = search_extent_mapping(em_tree, start, len); trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); /* * existing will always be non-NULL, since there must be * extent causing the -EEXIST. */ if (start >= existing->start && start < extent_map_end(existing)) { free_extent_map(em); *em_in = existing; ret = 0; } else { u64 orig_start = em->start; u64 orig_len = em->len; /* * The existing extent map is the one nearest to * the [start, start + len) range which overlaps */ ret = merge_extent_mapping(em_tree, existing, em, start); if (ret) { free_extent_map(em); *em_in = NULL; WARN_ONCE(ret, "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n", ret, existing->start, existing->len, orig_start, orig_len); } free_extent_map(existing); } } ASSERT(ret == 0 || ret == -EEXIST); return ret; } /* * Drop all extent maps from a tree in the fastest possible way, rescheduling * if needed. This avoids searching the tree, from the root down to the first * extent map, before each deletion. */ static void drop_all_extent_maps_fast(struct extent_map_tree *tree) { write_lock(&tree->lock); while (!RB_EMPTY_ROOT(&tree->map.rb_root)) { struct extent_map *em; struct rb_node *node; node = rb_first_cached(&tree->map); em = rb_entry(node, struct extent_map, rb_node); em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); remove_extent_mapping(tree, em); free_extent_map(em); cond_resched_rwlock_write(&tree->lock); } write_unlock(&tree->lock); } /* * Drop all extent maps in a given range. * * @inode: The target inode. * @start: Start offset of the range. * @end: End offset of the range (inclusive value). * @skip_pinned: Indicate if pinned extent maps should be ignored or not. * * This drops all the extent maps that intersect the given range [@start, @end]. * Extent maps that partially overlap the range and extend behind or beyond it, * are split. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function. */ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, bool skip_pinned) { struct extent_map *split; struct extent_map *split2; struct extent_map *em; struct extent_map_tree *em_tree = &inode->extent_tree; u64 len = end - start + 1; WARN_ON(end < start); if (end == (u64)-1) { if (start == 0 && !skip_pinned) { drop_all_extent_maps_fast(em_tree); return; } len = (u64)-1; } else { /* Make end offset exclusive for use in the loop below. */ end++; } /* * It's ok if we fail to allocate the extent maps, see the comment near * the bottom of the loop below. We only need two spare extent maps in * the worst case, where the first extent map that intersects our range * starts before the range and the last extent map that intersects our * range ends after our range (and they might be the same extent map), * because we need to split those two extent maps at the boundaries. */ split = alloc_extent_map(); split2 = alloc_extent_map(); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); while (em) { /* extent_map_end() returns exclusive value (last byte + 1). */ const u64 em_end = extent_map_end(em); struct extent_map *next_em = NULL; u64 gen; unsigned long flags; bool modified; bool compressed; if (em_end < end) { next_em = next_extent_map(em); if (next_em) { if (next_em->start < end) refcount_inc(&next_em->refs); else next_em = NULL; } } if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) { start = em_end; goto next; } flags = em->flags; /* * In case we split the extent map, we want to preserve the * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want * it on the new extent maps. */ em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); modified = !list_empty(&em->list); /* * The extent map does not cross our target range, so no need to * split it, we can remove it directly. */ if (em->start >= start && em_end <= end) goto remove_em; gen = em->generation; compressed = extent_map_is_compressed(em); if (em->start < start) { if (!split) { split = split2; split2 = NULL; if (!split) goto remove_em; } split->start = em->start; split->len = start - em->start; if (em->block_start < EXTENT_MAP_LAST_BYTE) { split->orig_start = em->orig_start; split->block_start = em->block_start; if (compressed) split->block_len = em->block_len; else split->block_len = split->len; split->orig_block_len = max(split->block_len, em->orig_block_len); split->ram_bytes = em->ram_bytes; } else { split->orig_start = split->start; split->block_len = 0; split->block_start = em->block_start; split->orig_block_len = 0; split->ram_bytes = split->len; } split->generation = gen; split->flags = flags; replace_extent_mapping(em_tree, em, split, modified); free_extent_map(split); split = split2; split2 = NULL; } if (em_end > end) { if (!split) { split = split2; split2 = NULL; if (!split) goto remove_em; } split->start = end; split->len = em_end - end; split->block_start = em->block_start; split->flags = flags; split->generation = gen; if (em->block_start < EXTENT_MAP_LAST_BYTE) { split->orig_block_len = max(em->block_len, em->orig_block_len); split->ram_bytes = em->ram_bytes; if (compressed) { split->block_len = em->block_len; split->orig_start = em->orig_start; } else { const u64 diff = start + len - em->start; split->block_len = split->len; split->block_start += diff; split->orig_start = em->orig_start; } } else { split->ram_bytes = split->len; split->orig_start = split->start; split->block_len = 0; split->orig_block_len = 0; } if (extent_map_in_tree(em)) { replace_extent_mapping(em_tree, em, split, modified); } else { int ret; ret = add_extent_mapping(em_tree, split, modified); /* Logic error, shouldn't happen. */ ASSERT(ret == 0); if (WARN_ON(ret != 0) && modified) btrfs_set_inode_full_sync(inode); } free_extent_map(split); split = NULL; } remove_em: if (extent_map_in_tree(em)) { /* * If the extent map is still in the tree it means that * either of the following is true: * * 1) It fits entirely in our range (doesn't end beyond * it or starts before it); * * 2) It starts before our range and/or ends after our * range, and we were not able to allocate the extent * maps for split operations, @split and @split2. * * If we are at case 2) then we just remove the entire * extent map - this is fine since if anyone needs it to * access the subranges outside our range, will just * load it again from the subvolume tree's file extent * item. However if the extent map was in the list of * modified extents, then we must mark the inode for a * full fsync, otherwise a fast fsync will miss this * extent if it's new and needs to be logged. */ if ((em->start < start || em_end > end) && modified) { ASSERT(!split); btrfs_set_inode_full_sync(inode); } remove_extent_mapping(em_tree, em); } /* * Once for the tree reference (we replaced or removed the * extent map from the tree). */ free_extent_map(em); next: /* Once for us (for our lookup reference). */ free_extent_map(em); em = next_em; } write_unlock(&em_tree->lock); free_extent_map(split); free_extent_map(split2); } /* * Replace a range in the inode's extent map tree with a new extent map. * * @inode: The target inode. * @new_em: The new extent map to add to the inode's extent map tree. * @modified: Indicate if the new extent map should be added to the list of * modified extents (for fast fsync tracking). * * Drops all the extent maps in the inode's extent map tree that intersect the * range of the new extent map and adds the new extent map to the tree. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function. */ int btrfs_replace_extent_map_range(struct btrfs_inode *inode, struct extent_map *new_em, bool modified) { const u64 end = new_em->start + new_em->len - 1; struct extent_map_tree *tree = &inode->extent_tree; int ret; ASSERT(!extent_map_in_tree(new_em)); /* * The caller has locked an appropriate file range in the inode's io * tree, but getting -EEXIST when adding the new extent map can still * happen in case there are extents that partially cover the range, and * this is due to two tasks operating on different parts of the extent. * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from * btrfs_get_extent") for an example and details. */ do { btrfs_drop_extent_map_range(inode, new_em->start, end, false); write_lock(&tree->lock); ret = add_extent_mapping(tree, new_em, modified); write_unlock(&tree->lock); } while (ret == -EEXIST); return ret; } /* * Split off the first pre bytes from the extent_map at [start, start + len], * and set the block_start for it to new_logical. * * This function is used when an ordered_extent needs to be split. */ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, u64 new_logical) { struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; struct extent_map *split_pre = NULL; struct extent_map *split_mid = NULL; int ret = 0; unsigned long flags; ASSERT(pre != 0); ASSERT(pre < len); split_pre = alloc_extent_map(); if (!split_pre) return -ENOMEM; split_mid = alloc_extent_map(); if (!split_mid) { ret = -ENOMEM; goto out_free_pre; } lock_extent(&inode->io_tree, start, start + len - 1, NULL); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (!em) { ret = -EIO; goto out_unlock; } ASSERT(em->len == len); ASSERT(!extent_map_is_compressed(em)); ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); ASSERT(em->flags & EXTENT_FLAG_PINNED); ASSERT(!(em->flags & EXTENT_FLAG_LOGGING)); ASSERT(!list_empty(&em->list)); flags = em->flags; em->flags &= ~EXTENT_FLAG_PINNED; /* First, replace the em with a new extent_map starting from * em->start */ split_pre->start = em->start; split_pre->len = pre; split_pre->orig_start = split_pre->start; split_pre->block_start = new_logical; split_pre->block_len = split_pre->len; split_pre->orig_block_len = split_pre->block_len; split_pre->ram_bytes = split_pre->len; split_pre->flags = flags; split_pre->generation = em->generation; replace_extent_mapping(em_tree, em, split_pre, 1); /* * Now we only have an extent_map at: * [em->start, em->start + pre] */ /* Insert the middle extent_map. */ split_mid->start = em->start + pre; split_mid->len = em->len - pre; split_mid->orig_start = split_mid->start; split_mid->block_start = em->block_start + pre; split_mid->block_len = split_mid->len; split_mid->orig_block_len = split_mid->block_len; split_mid->ram_bytes = split_mid->len; split_mid->flags = flags; split_mid->generation = em->generation; add_extent_mapping(em_tree, split_mid, 1); /* Once for us */ free_extent_map(em); /* Once for the tree */ free_extent_map(em); out_unlock: write_unlock(&em_tree->lock); unlock_extent(&inode->io_tree, start, start + len - 1, NULL); free_extent_map(split_mid); out_free_pre: free_extent_map(split_pre); return ret; } |
1 1 1 1 2 1 3 1 1 1 4 2 1 2 3 3 14 14 11 4 5 9 3 11 2 12 1 8 5 1 5 2 3 2 3 2 3 3 2 3 3 3 3 6 2 1 1 2 2 2 373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 | // SPDX-License-Identifier: GPL-2.0 /* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the NetFilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Version 1, is capable of handling both version 0 and 1 messages. * Version 0 is the plain old format. * Note Version 0 receivers will just drop Ver 1 messages. * Version 1 is capable of handle IPv6, Persistence data, * time-outs, and firewall marks. * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 * * Definitions Message: is a complete datagram * Sync_conn: is a part of a Message * Param Data is an option to a Sync_conn. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * ip_vs_sync: sync connection info from master load balancer to backups * through multicast * * Changes: * Alexandre Cassen : Added master & backup support at a time. * Alexandre Cassen : Added SyncID support for incoming sync * messages filtering. * Justin Ossevoort : Fix endian problem on sync message size. * Hans Schillstrom : Added Version 1: i.e. IPv6, * Persistence support, fwmark and time-out. */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/inetdevice.h> #include <linux/net.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/igmp.h> /* for ip_mc_join_group */ #include <linux/udp.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ #include <net/ip.h> #include <net/sock.h> #include <net/ip_vs.h> #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ #define IP_VS_SYNC_PORT 8848 /* multicast port */ #define SYNC_PROTO_VER 1 /* Protocol version in header */ static struct lock_class_key __ipvs_sync_key; /* * IPVS sync connection entry * Version 0, i.e. original version. */ struct ip_vs_sync_conn_v0 { __u8 reserved; /* Protocol, addresses and port numbers */ __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 cport; __be16 vport; __be16 dport; __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* Flags and state transition */ __be16 flags; /* status flags */ __be16 state; /* state info */ /* The sequence options start here */ }; struct ip_vs_sync_conn_options { struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; /* Sync Connection format (sync_conn) 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Protocol | Ver. | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Flags | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | State | cport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | vport | dport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | fwmark | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | timeout (in sec.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ... | | IP-Addresses (v4 or v6) | | ... | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Optional Parameters. +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param. Type | Param. Length | Param. data | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | ... | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | Param Type | Param. Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param data | | Last Param data should be padded for 32 bit alignment | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ /* * Type 0, IPv4 sync connection format */ struct ip_vs_sync_v4 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; /* * Type 2 messages IPv6 */ struct ip_vs_sync_v6 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ struct in6_addr caddr; /* client address */ struct in6_addr vaddr; /* virtual address */ struct in6_addr daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; union ip_vs_sync_conn { struct ip_vs_sync_v4 v4; struct ip_vs_sync_v6 v6; }; /* Bits in Type field in above */ #define STYPE_INET6 0 #define STYPE_F_INET6 (1 << STYPE_INET6) #define SVER_SHIFT 12 /* Shift to get version */ #define SVER_MASK 0x0fff /* Mask to strip version */ #define IPVS_OPT_SEQ_DATA 1 #define IPVS_OPT_PE_DATA 2 #define IPVS_OPT_PE_NAME 3 #define IPVS_OPT_PARAM 7 #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) struct ip_vs_sync_thread_data { struct task_struct *task; struct netns_ipvs *ipvs; struct socket *sock; char *buf; int id; }; /* Version 0 definition of packet sizes */ #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) #define FULL_CONN_SIZE \ (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) /* The master mulitcasts messages (Datagrams) to the backup load balancers in the following format. Version 1: Note, first byte should be Zero, so ver 0 receivers will drop the packet. 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 0 | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | Version | Reserved, set to Zero | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | . | ~ . ~ | . | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (n) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Version 0 Header 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | IPVS Sync Connection (1) | */ /* Version 0 header */ struct ip_vs_sync_mesg_v0 { __u8 nr_conns; __u8 syncid; __be16 size; /* ip_vs_sync_conn entries start here */ }; /* Version 1 header */ struct ip_vs_sync_mesg { __u8 reserved; /* must be zero */ __u8 syncid; __be16 size; __u8 nr_conns; __s8 version; /* SYNC_PROTO_VER */ __u16 spare; /* ip_vs_sync_conn entries start here */ }; union ipvs_sockaddr { struct sockaddr_in in; struct sockaddr_in6 in6; }; struct ip_vs_sync_buff { struct list_head list; unsigned long firstuse; /* pointers for the message data */ struct ip_vs_sync_mesg *mesg; unsigned char *head; unsigned char *end; }; /* * Copy of struct ip_vs_seq * From unaligned network order to aligned host order */ static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) { memset(ho, 0, sizeof(*ho)); ho->init_seq = get_unaligned_be32(&no->init_seq); ho->delta = get_unaligned_be32(&no->delta); ho->previous_delta = get_unaligned_be32(&no->previous_delta); } /* * Copy of struct ip_vs_seq * From Aligned host order to unaligned network order */ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) { put_unaligned_be32(ho->init_seq, &no->init_seq); put_unaligned_be32(ho->delta, &no->delta); put_unaligned_be32(ho->previous_delta, &no->previous_delta); } static inline struct ip_vs_sync_buff * sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_lock); if (list_empty(&ms->sync_queue)) { sb = NULL; __set_current_state(TASK_INTERRUPTIBLE); } else { sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); ms->sync_queue_len--; if (!ms->sync_queue_len) ms->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); return sb; } /* * Create a new sync buffer for Version 1 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(struct netns_ipvs *ipvs, unsigned int len) { struct ip_vs_sync_buff *sb; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg), ipvs->mcfg.sync_maxlen); sb->mesg = kmalloc(len, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ sb->mesg->version = SYNC_PROTO_VER; sb->mesg->syncid = ipvs->mcfg.syncid; sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg)); sb->mesg->nr_conns = 0; sb->mesg->spare = 0; sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); sb->end = (unsigned char *)sb->mesg + len; sb->firstuse = jiffies; return sb; } static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) { kfree(sb->mesg); kfree(sb); } static inline void sb_queue_tail(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb = ms->sync_buff; spin_lock(&ipvs->sync_lock); if (ipvs->sync_state & IP_VS_STATE_MASTER && ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { if (!ms->sync_queue_len) schedule_delayed_work(&ms->master_wakeup_work, max(IPVS_SYNC_SEND_DELAY, 1)); ms->sync_queue_len++; list_add_tail(&sb->list, &ms->sync_queue); if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) { int id = (int)(ms - ipvs->ms); wake_up_process(ipvs->master_tinfo[id].task); } } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); } /* * Get the current sync buffer if it has been created for more * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, unsigned long time) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_buff_lock); sb = ms->sync_buff; if (sb && time_after_eq(jiffies - sb->firstuse, time)) { ms->sync_buff = NULL; __set_current_state(TASK_RUNNING); } else sb = NULL; spin_unlock_bh(&ipvs->sync_buff_lock); return sb; } static inline int select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) { return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; } /* * Create a new sync buffer for Version 0 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs, unsigned int len) { struct ip_vs_sync_buff *sb; struct ip_vs_sync_mesg_v0 *mesg; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg_v0), ipvs->mcfg.sync_maxlen); sb->mesg = kmalloc(len, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; mesg->nr_conns = 0; mesg->syncid = ipvs->mcfg.syncid; mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0)); sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); sb->end = (unsigned char *)mesg + len; sb->firstuse = jiffies; return sb; } /* Check if connection is controlled by persistence */ static inline bool in_persistence(struct ip_vs_conn *cp) { for (cp = cp->control; cp; cp = cp->control) { if (cp->flags & IP_VS_CONN_F_TEMPLATE) return true; } return false; } /* Check if conn should be synced. * pkts: conn packets, use sysctl_sync_threshold to avoid packet check * - (1) sync_refresh_period: reduce sync rate. Additionally, retry * sync_retries times with period of sync_refresh_period/8 * - (2) if both sync_refresh_period and sync_period are 0 send sync only * for state changes or only once when pkts matches sync_threshold * - (3) templates: rate can be reduced only with sync_refresh_period or * with (2) */ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) { unsigned long orig = READ_ONCE(cp->sync_endtime); unsigned long now = jiffies; unsigned long n = (now + cp->timeout) & ~3UL; unsigned int sync_refresh_period; int sync_period; int force; /* Check if we sync in current state */ if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) force = 0; else if (unlikely(sysctl_sync_persist_mode(ipvs) && in_persistence(cp))) return 0; else if (likely(cp->protocol == IPPROTO_TCP)) { if (!((1 << cp->state) & ((1 << IP_VS_TCP_S_ESTABLISHED) | (1 << IP_VS_TCP_S_FIN_WAIT) | (1 << IP_VS_TCP_S_CLOSE) | (1 << IP_VS_TCP_S_CLOSE_WAIT) | (1 << IP_VS_TCP_S_TIME_WAIT)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) goto set; } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { if (!((1 << cp->state) & ((1 << IP_VS_SCTP_S_ESTABLISHED) | (1 << IP_VS_SCTP_S_SHUTDOWN_SENT) | (1 << IP_VS_SCTP_S_SHUTDOWN_RECEIVED) | (1 << IP_VS_SCTP_S_SHUTDOWN_ACK_SENT) | (1 << IP_VS_SCTP_S_CLOSED)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) goto set; } else { /* UDP or another protocol with single state */ force = 0; } sync_refresh_period = sysctl_sync_refresh_period(ipvs); if (sync_refresh_period > 0) { long diff = n - orig; long min_diff = max(cp->timeout >> 1, 10UL * HZ); /* Avoid sync if difference is below sync_refresh_period * and below the half timeout. */ if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { int retries = orig & 3; if (retries >= sysctl_sync_retries(ipvs)) return 0; if (time_before(now, orig - cp->timeout + (sync_refresh_period >> 3))) return 0; n |= retries + 1; } } sync_period = sysctl_sync_period(ipvs); if (sync_period > 0) { if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && pkts % sync_period != sysctl_sync_threshold(ipvs)) return 0; } else if (!sync_refresh_period && pkts != sysctl_sync_threshold(ipvs)) return 0; set: cp->old_state = cp->state; n = cmpxchg(&cp->sync_endtime, orig, n); return n == orig || force; } /* * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. */ static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) { struct ip_vs_sync_mesg_v0 *m; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; unsigned int len; if (unlikely(cp->af != AF_INET)) return; /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) return; if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) return; spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; buff = ms->sync_buff; len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; if (buff) { m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; /* Send buffer if it is for v1 */ if (buff->head + len > buff->end || !m->nr_conns) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; } } if (!buff) { buff = ip_vs_sync_buff_create_v0(ipvs, len); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; } m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; s = (struct ip_vs_sync_conn_v0 *) buff->head; /* copy members */ s->reserved = 0; s->protocol = cp->protocol; s->cport = cp->cport; s->vport = cp->vport; s->dport = cp->dport; s->caddr = cp->caddr.ip; s->vaddr = cp->vaddr.ip; s->daddr = cp->daddr.ip; s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); s->state = htons(cp->state); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { struct ip_vs_sync_conn_options *opt = (struct ip_vs_sync_conn_options *)&s[1]; memcpy(opt, &cp->sync_conn_opt, sizeof(*opt)); } m->nr_conns++; m->size = htons(ntohs(m->size) + len); buff->head += len; spin_unlock_bh(&ipvs->sync_buff_lock); /* synchronize its controller if it has */ cp = cp->control; if (cp) { if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_inc_return(&cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); ip_vs_sync_conn(ipvs, cp, pkts); } } /* * Add an ip_vs_conn information into the current sync_buff. * Called by ip_vs_in. * Sending Version 1 messages */ void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) { struct ip_vs_sync_mesg *m; union ip_vs_sync_conn *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; __u8 *p; unsigned int len, pe_name_len, pad; /* Handle old version of the protocol */ if (sysctl_sync_ver(ipvs) == 0) { ip_vs_sync_conn_v0(ipvs, cp, pkts); return; } /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) goto control; sloop: if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) goto control; /* Sanity checks */ pe_name_len = 0; if (cp->pe_data_len) { if (!cp->pe_data || !cp->dest) { IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); return; } pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); } spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) len = sizeof(struct ip_vs_sync_v6); else #endif len = sizeof(struct ip_vs_sync_v4); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) len += sizeof(struct ip_vs_sync_conn_options) + 2; if (cp->pe_data_len) len += cp->pe_data_len + 2; /* + Param hdr field */ if (pe_name_len) len += pe_name_len + 2; /* check if there is a space for this one */ pad = 0; buff = ms->sync_buff; if (buff) { m = buff->mesg; pad = (4 - (size_t) buff->head) & 3; /* Send buffer if it is for v0 */ if (buff->head + len + pad > buff->end || m->reserved) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; pad = 0; } } if (!buff) { buff = ip_vs_sync_buff_create(ipvs, len); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; m = buff->mesg; } p = buff->head; buff->head += pad + len; m->size = htons(ntohs(m->size) + pad + len); /* Add ev. padding from prev. sync_conn */ while (pad--) *(p++) = 0; s = (union ip_vs_sync_conn *)p; /* Set message type & copy members */ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); s->v4.state = htons(cp->state); s->v4.protocol = cp->protocol; s->v4.cport = cp->cport; s->v4.vport = cp->vport; s->v4.dport = cp->dport; s->v4.fwmark = htonl(cp->fwmark); s->v4.timeout = htonl(cp->timeout / HZ); m->nr_conns++; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) { p += sizeof(struct ip_vs_sync_v6); s->v6.caddr = cp->caddr.in6; s->v6.vaddr = cp->vaddr.in6; s->v6.daddr = cp->daddr.in6; } else #endif { p += sizeof(struct ip_vs_sync_v4); /* options ptr */ s->v4.caddr = cp->caddr.ip; s->v4.vaddr = cp->vaddr.ip; s->v4.daddr = cp->daddr.ip; } if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { *(p++) = IPVS_OPT_SEQ_DATA; *(p++) = sizeof(struct ip_vs_sync_conn_options); hton_seq((struct ip_vs_seq *)p, &cp->in_seq); p += sizeof(struct ip_vs_seq); hton_seq((struct ip_vs_seq *)p, &cp->out_seq); p += sizeof(struct ip_vs_seq); } /* Handle pe data */ if (cp->pe_data_len && cp->pe_data) { *(p++) = IPVS_OPT_PE_DATA; *(p++) = cp->pe_data_len; memcpy(p, cp->pe_data, cp->pe_data_len); p += cp->pe_data_len; if (pe_name_len) { /* Add PE_NAME */ *(p++) = IPVS_OPT_PE_NAME; *(p++) = pe_name_len; memcpy(p, cp->pe->name, pe_name_len); p += pe_name_len; } } spin_unlock_bh(&ipvs->sync_buff_lock); control: /* synchronize its controller if it has */ cp = cp->control; if (!cp) return; if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_inc_return(&cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); goto sloop; } /* * fill_param used by version 1 */ static inline int ip_vs_conn_fill_param_sync(struct netns_ipvs *ipvs, int af, union ip_vs_sync_conn *sc, struct ip_vs_conn_param *p, __u8 *pe_data, unsigned int pe_data_len, __u8 *pe_name, unsigned int pe_name_len) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_conn_fill_param(ipvs, af, sc->v6.protocol, (const union nf_inet_addr *)&sc->v6.caddr, sc->v6.cport, (const union nf_inet_addr *)&sc->v6.vaddr, sc->v6.vport, p); else #endif ip_vs_conn_fill_param(ipvs, af, sc->v4.protocol, (const union nf_inet_addr *)&sc->v4.caddr, sc->v4.cport, (const union nf_inet_addr *)&sc->v4.vaddr, sc->v4.vport, p); /* Handle pe data */ if (pe_data_len) { if (pe_name_len) { char buff[IP_VS_PENAME_MAXLEN+1]; memcpy(buff, pe_name, pe_name_len); buff[pe_name_len]=0; p->pe = __ip_vs_pe_getbyname(buff); if (!p->pe) { IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", buff); return 1; } } else { IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); return 1; } p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); if (!p->pe_data) { module_put(p->pe->module); return -ENOMEM; } p->pe_data_len = pe_data_len; } return 0; } /* * Connection Add / Update. * Common for version 0 and 1 reception of backup sync_conns. * Param: ... * timeout is in sec. */ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *param, unsigned int flags, unsigned int state, unsigned int protocol, unsigned int type, const union nf_inet_addr *daddr, __be16 dport, unsigned long timeout, __u32 fwmark, struct ip_vs_sync_conn_options *opt) { struct ip_vs_dest *dest; struct ip_vs_conn *cp; if (!(flags & IP_VS_CONN_F_TEMPLATE)) { cp = ip_vs_conn_in_get(param); if (cp && ((cp->dport != dport) || !ip_vs_addr_equal(cp->daf, &cp->daddr, daddr))) { if (!(flags & IP_VS_CONN_F_INACTIVE)) { ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); cp = NULL; } else { /* This is the expiration message for the * connection that was already replaced, so we * just ignore it. */ __ip_vs_conn_put(cp); kfree(param->pe_data); return; } } } else { cp = ip_vs_ct_in_get(param); } if (cp) { /* Free pe_data */ kfree(param->pe_data); dest = cp->dest; spin_lock_bh(&cp->lock); if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { if (flags & IP_VS_CONN_F_INACTIVE) { atomic_dec(&dest->activeconns); atomic_inc(&dest->inactconns); } else { atomic_inc(&dest->activeconns); atomic_dec(&dest->inactconns); } } flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; cp->flags = flags; spin_unlock_bh(&cp->lock); if (!dest) ip_vs_try_bind_dest(cp); } else { /* * Find the appropriate destination for the connection. * If it is not found the connection will remain unbound * but still handled. */ rcu_read_lock(); /* This function is only invoked by the synchronization * code. We do not currently support heterogeneous pools * with synchronization, so we can make the assumption that * the svc_af is the same as the dest_af */ dest = ip_vs_find_dest(ipvs, type, type, daddr, dport, param->vaddr, param->vport, protocol, fwmark, flags); cp = ip_vs_conn_new(param, type, daddr, dport, flags, dest, fwmark); rcu_read_unlock(); if (!cp) { kfree(param->pe_data); IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } if (!(flags & IP_VS_CONN_F_TEMPLATE)) kfree(param->pe_data); } if (opt) { cp->in_seq = opt->in_seq; cp->out_seq = opt->out_seq; } atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); cp->state = state; cp->old_state = cp->state; /* * For Ver 0 messages style * - Not possible to recover the right timeout for templates * - can not find the right fwmark * virtual service. If needed, we can do it for * non-fwmark persistent services. * Ver 1 messages style. * - No problem. */ if (timeout) { if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) timeout = MAX_SCHEDULE_TIMEOUT / HZ; cp->timeout = timeout*HZ; } else { struct ip_vs_proto_data *pd; pd = ip_vs_proto_data_get(ipvs, protocol); if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) cp->timeout = pd->timeout_table[state]; else cp->timeout = (3*60*HZ); } ip_vs_conn_put(cp); } /* * Process received multicast message for Version 0 */ static void ip_vs_process_message_v0(struct netns_ipvs *ipvs, const char *buffer, const size_t buflen) { struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_conn_options *opt; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; char *p; int i; p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); for (i=0; i<m->nr_conns; i++) { unsigned int flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); return; } s = (struct ip_vs_sync_conn_v0 *) p; flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; flags &= ~IP_VS_CONN_F_HASHED; if (flags & IP_VS_CONN_F_SEQ_MASK) { opt = (struct ip_vs_sync_conn_options *)&s[1]; p += FULL_CONN_SIZE; if (p > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); return; } } else { opt = NULL; p += SIMPLE_CONN_SIZE; } state = ntohs(s->state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->protocol); if (!pp) { IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", s->protocol); continue; } if (state >= pp->num_states) { IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", pp->name, state); continue; } } else { if (state >= IP_VS_CTPL_S_LAST) IP_VS_DBG(7, "BACKUP v0, Invalid tpl state %u\n", state); } ip_vs_conn_fill_param(ipvs, AF_INET, s->protocol, (const union nf_inet_addr *)&s->caddr, s->cport, (const union nf_inet_addr *)&s->vaddr, s->vport, ¶m); /* Send timeout as Zero */ ip_vs_proc_conn(ipvs, ¶m, flags, state, s->protocol, AF_INET, (union nf_inet_addr *)&s->daddr, s->dport, 0, 0, opt); } } /* * Handle options */ static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, __u32 *opt_flags, struct ip_vs_sync_conn_options *opt) { struct ip_vs_sync_conn_options *topt; topt = (struct ip_vs_sync_conn_options *)p; if (plen != sizeof(struct ip_vs_sync_conn_options)) { IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); return -EINVAL; } if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { IP_VS_DBG(2, "BACKUP, conn options found twice\n"); return -EINVAL; } ntoh_seq(&topt->in_seq, &opt->in_seq); ntoh_seq(&topt->out_seq, &opt->out_seq); *opt_flags |= IPVS_OPT_F_SEQ_DATA; return 0; } static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, __u8 **data, unsigned int maxlen, __u32 *opt_flags, __u32 flag) { if (plen > maxlen) { IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); return -EINVAL; } if (*opt_flags & flag) { IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); return -EINVAL; } *data_len = plen; *data = p; *opt_flags |= flag; return 0; } /* * Process a Version 1 sync. connection */ static inline int ip_vs_proc_sync_conn(struct netns_ipvs *ipvs, __u8 *p, __u8 *msg_end) { struct ip_vs_sync_conn_options opt; union ip_vs_sync_conn *s; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; __u32 flags; unsigned int af, state, pe_data_len=0, pe_name_len=0; __u8 *pe_data=NULL, *pe_name=NULL; __u32 opt_flags=0; int retc=0; s = (union ip_vs_sync_conn *) p; if (s->v6.type & STYPE_F_INET6) { #ifdef CONFIG_IP_VS_IPV6 af = AF_INET6; p += sizeof(struct ip_vs_sync_v6); #else IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); retc = 10; goto out; #endif } else if (!s->v4.type) { af = AF_INET; p += sizeof(struct ip_vs_sync_v4); } else { return -10; } if (p > msg_end) return -20; /* Process optional params check Type & Len. */ while (p < msg_end) { int ptype; int plen; if (p+2 > msg_end) return -30; ptype = *(p++); plen = *(p++); if (!plen || ((p + plen) > msg_end)) return -40; /* Handle seq option p = param data */ switch (ptype & ~IPVS_OPT_F_PARAM) { case IPVS_OPT_SEQ_DATA: if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) return -50; break; case IPVS_OPT_PE_DATA: if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, IP_VS_PEDATA_MAXLEN, &opt_flags, IPVS_OPT_F_PE_DATA)) return -60; break; case IPVS_OPT_PE_NAME: if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, IP_VS_PENAME_MAXLEN, &opt_flags, IPVS_OPT_F_PE_NAME)) return -70; break; default: /* Param data mandatory ? */ if (!(ptype & IPVS_OPT_F_PARAM)) { IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", ptype & ~IPVS_OPT_F_PARAM); retc = 20; goto out; } } p += plen; /* Next option */ } /* Get flags and Mask off unsupported */ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; flags |= IP_VS_CONN_F_SYNC; state = ntohs(s->v4.state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->v4.protocol); if (!pp) { IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", s->v4.protocol); retc = 30; goto out; } if (state >= pp->num_states) { IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", pp->name, state); retc = 40; goto out; } } else { if (state >= IP_VS_CTPL_S_LAST) IP_VS_DBG(7, "BACKUP, Invalid tpl state %u\n", state); } if (ip_vs_conn_fill_param_sync(ipvs, af, s, ¶m, pe_data, pe_data_len, pe_name, pe_name_len)) { retc = 50; goto out; } /* If only IPv4, just silent skip IPv6 */ if (af == AF_INET) ip_vs_proc_conn(ipvs, ¶m, flags, state, s->v4.protocol, af, (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, ntohl(s->v4.timeout), ntohl(s->v4.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #ifdef CONFIG_IP_VS_IPV6 else ip_vs_proc_conn(ipvs, ¶m, flags, state, s->v6.protocol, af, (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, ntohl(s->v6.timeout), ntohl(s->v6.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #endif ip_vs_pe_put(param.pe); return 0; /* Error exit */ out: IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); return retc; } /* * Process received multicast message and create the corresponding * ip_vs_conn entries. * Handles Version 0 & 1 */ static void ip_vs_process_message(struct netns_ipvs *ipvs, __u8 *buffer, const size_t buflen) { struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; __u8 *p, *msg_end; int i, nr_conns; if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { IP_VS_DBG(2, "BACKUP, message header too short\n"); return; } if (buflen != ntohs(m2->size)) { IP_VS_DBG(2, "BACKUP, bogus message size\n"); return; } /* SyncID sanity check */ if (ipvs->bcfg.syncid != 0 && m2->syncid != ipvs->bcfg.syncid) { IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); return; } /* Handle version 1 message */ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) && (m2->spare == 0)) { msg_end = buffer + sizeof(struct ip_vs_sync_mesg); nr_conns = m2->nr_conns; for (i=0; i<nr_conns; i++) { union ip_vs_sync_conn *s; unsigned int size; int retc; p = msg_end; if (p + sizeof(s->v4) > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, too small\n"); return; } s = (union ip_vs_sync_conn *)p; size = ntohs(s->v4.ver_size) & SVER_MASK; msg_end = p + size; /* Basic sanity checks */ if (msg_end > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); return; } if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", ntohs(s->v4.ver_size) >> SVER_SHIFT); return; } /* Process a single sync_conn */ retc = ip_vs_proc_sync_conn(ipvs, p, msg_end); if (retc < 0) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", retc); return; } /* Make sure we have 32 bit alignment */ msg_end = p + ((size + 3) & ~3); } } else { /* Old type of message */ ip_vs_process_message_v0(ipvs, buffer, buflen); return; } } /* * Setup sndbuf (mode=1) or rcvbuf (mode=0) */ static void set_sock_size(struct sock *sk, int mode, int val) { /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, READ_ONCE(sysctl_wmem_max)); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, READ_ONCE(sysctl_rmem_max)); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } /* * Setup loopback of outgoing multicasts on a sending socket */ static void set_mcast_loop(struct sock *sk, u_char loop) { /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */ inet_assign_bit(MC_LOOP, sk, loop); #ifdef CONFIG_IP_VS_IPV6 if (READ_ONCE(sk->sk_family) == AF_INET6) { /* IPV6_MULTICAST_LOOP */ inet6_assign_bit(MC6_LOOP, sk, loop); } #endif } /* * Specify TTL for outgoing multicasts on a sending socket */ static void set_mcast_ttl(struct sock *sk, u_char ttl) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */ lock_sock(sk); WRITE_ONCE(inet->mc_ttl, ttl); #ifdef CONFIG_IP_VS_IPV6 if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); /* IPV6_MULTICAST_HOPS */ WRITE_ONCE(np->mcast_hops, ttl); } #endif release_sock(sk); } /* Control fragmentation of messages */ static void set_mcast_pmtudisc(struct sock *sk, int val) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */ lock_sock(sk); WRITE_ONCE(inet->pmtudisc, val); #ifdef CONFIG_IP_VS_IPV6 if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); /* IPV6_MTU_DISCOVER */ WRITE_ONCE(np->pmtudisc, val); } #endif release_sock(sk); } /* * Specifiy default interface for outgoing multicasts */ static int set_mcast_if(struct sock *sk, struct net_device *dev) { struct inet_sock *inet = inet_sk(sk); if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; lock_sock(sk); inet->mc_index = dev->ifindex; /* inet->mc_addr = 0; */ #ifdef CONFIG_IP_VS_IPV6 if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); /* IPV6_MULTICAST_IF */ np->mcast_oif = dev->ifindex; } #endif release_sock(sk); return 0; } /* * Join a multicast group. * the group is specified by a class D multicast address 224.0.0.0/8 * in the in_addr structure passed in as a parameter. */ static int join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev) { struct ip_mreqn mreq; int ret; memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; mreq.imr_ifindex = dev->ifindex; lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); return ret; } #ifdef CONFIG_IP_VS_IPV6 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, struct net_device *dev) { int ret; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; lock_sock(sk); ret = ipv6_sock_mc_join(sk, dev->ifindex, addr); release_sock(sk); return ret; } #endif static int bind_mcastif_addr(struct socket *sock, struct net_device *dev) { __be32 addr; struct sockaddr_in sin; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) pr_err("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %pI4\n", dev->name, &addr); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = 0; return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin)); } static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen, struct ipvs_sync_daemon_cfg *c, int id) { if (AF_INET6 == c->mcast_af) { sa->in6 = (struct sockaddr_in6) { .sin6_family = AF_INET6, .sin6_port = htons(c->mcast_port + id), }; sa->in6.sin6_addr = c->mcast_group.in6; *salen = sizeof(sa->in6); } else { sa->in = (struct sockaddr_in) { .sin_family = AF_INET, .sin_port = htons(c->mcast_port + id), }; sa->in.sin_addr = c->mcast_group.in; *salen = sizeof(sa->in); } } /* * Set up sending multicast socket over UDP */ static int make_send_sock(struct netns_ipvs *ipvs, int id, struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; struct socket *sock; int result, salen; /* First create a socket */ result = sock_create_kern(ipvs->net, ipvs->mcfg.mcast_af, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); goto error; } *sock_ret = sock; result = set_mcast_if(sock->sk, dev); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; } set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl); /* Allow fragmentation if MTU changes */ set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT); result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 1, result); if (AF_INET == ipvs->mcfg.mcast_af) result = bind_mcastif_addr(sock, dev); else result = 0; if (result < 0) { pr_err("Error binding address of the mcast interface\n"); goto error; } get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id); result = kernel_connect(sock, (struct sockaddr *)&mcast_addr, salen, 0); if (result < 0) { pr_err("Error connecting to the multicast addr\n"); goto error; } return 0; error: return result; } /* * Set up receiving multicast socket over UDP */ static int make_receive_sock(struct netns_ipvs *ipvs, int id, struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; struct socket *sock; int result, salen; /* First create a socket */ result = sock_create_kern(ipvs->net, ipvs->bcfg.mcast_af, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); goto error; } *sock_ret = sock; /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 0, result); get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); sock->sk->sk_bound_dev_if = dev->ifindex; result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen); if (result < 0) { pr_err("Error binding to the multicast addr\n"); goto error; } /* join the multicast group */ #ifdef CONFIG_IP_VS_IPV6 if (ipvs->bcfg.mcast_af == AF_INET6) result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, dev); else #endif result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, dev); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; } return 0; error: return result; } static int ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) { struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL}; struct kvec iov; int len; iov.iov_base = (void *)buffer; iov.iov_len = length; len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); return len; } static int ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) { int msize; int ret; msize = ntohs(msg->size); ret = ip_vs_send_async(sock, (char *)msg, msize); if (ret >= 0 || ret == -EAGAIN) return ret; pr_err("ip_vs_send_async error %d\n", ret); return 0; } static int ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) { struct msghdr msg = {NULL,}; struct kvec iov = {buffer, buflen}; int len; /* Receive a packet */ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen); len = sock_recvmsg(sock, &msg, MSG_DONTWAIT); if (len < 0) return len; return len; } /* Wakeup the master thread for sending */ static void master_wakeup_work_handler(struct work_struct *work) { struct ipvs_master_sync_state *ms = container_of(work, struct ipvs_master_sync_state, master_wakeup_work.work); struct netns_ipvs *ipvs = ms->ipvs; spin_lock_bh(&ipvs->sync_lock); if (ms->sync_queue_len && ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { int id = (int)(ms - ipvs->ms); ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; wake_up_process(ipvs->master_tinfo[id].task); } spin_unlock_bh(&ipvs->sync_lock); } /* Get next buffer to send */ static inline struct ip_vs_sync_buff * next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; sb = sb_dequeue(ipvs, ms); if (sb) return sb; /* Do not delay entries in buffer for more than 2 seconds */ return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); } static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->mcfg.mcast_ifn, ipvs->mcfg.syncid, tinfo->id); for (;;) { sb = next_sync_buff(ipvs, ms); if (unlikely(kthread_should_stop())) break; if (!sb) { schedule_timeout(IPVS_SYNC_CHECK_PERIOD); continue; } while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { /* (Ab)use interruptible sleep to avoid increasing * the load avg. */ __wait_event_interruptible(*sk_sleep(sk), sock_writeable(sk) || kthread_should_stop()); if (unlikely(kthread_should_stop())) goto done; } ip_vs_sync_buff_release(sb); } done: __set_current_state(TASK_RUNNING); if (sb) ip_vs_sync_buff_release(sb); /* clean up the sync_buff queue */ while ((sb = sb_dequeue(ipvs, ms))) ip_vs_sync_buff_release(sb); __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ sb = get_curr_sync_buff(ipvs, ms, 0); if (sb) ip_vs_sync_buff_release(sb); return 0; } static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = tinfo->ipvs; struct sock *sk = tinfo->sock->sk; struct udp_sock *up = udp_sk(sk); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(sk), !skb_queue_empty_lockless(&sk->sk_receive_queue) || !skb_queue_empty_lockless(&up->reader_queue) || kthread_should_stop()); /* do we have data now? */ while (!skb_queue_empty_lockless(&sk->sk_receive_queue) || !skb_queue_empty_lockless(&up->reader_queue)) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->bcfg.sync_maxlen); if (len <= 0) { if (len != -EAGAIN) pr_err("receiving message error\n"); break; } ip_vs_process_message(ipvs, tinfo->buf, len); } } return 0; } int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int state) { struct ip_vs_sync_thread_data *ti = NULL, *tinfo; struct task_struct *task; struct net_device *dev; char *name; int (*threadfn)(void *data); int id = 0, count, hlen; int result = -ENOMEM; u16 mtu, min_mtu; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); /* increase the module use count */ if (!ip_vs_use_count_inc()) return -ENOPROTOOPT; /* Do not hold one mutex and then to block on another */ for (;;) { rtnl_lock(); if (mutex_trylock(&ipvs->sync_mutex)) break; rtnl_unlock(); mutex_lock(&ipvs->sync_mutex); if (rtnl_trylock()) break; mutex_unlock(&ipvs->sync_mutex); } if (!ipvs->sync_state) { count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); ipvs->threads_mask = count - 1; } else count = ipvs->threads_mask + 1; if (c->mcast_af == AF_UNSPEC) { c->mcast_af = AF_INET; c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP); } if (!c->mcast_port) c->mcast_port = IP_VS_SYNC_PORT; if (!c->mcast_ttl) c->mcast_ttl = 1; dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); if (!dev) { pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); result = -ENODEV; goto out_early; } hlen = (AF_INET6 == c->mcast_af) ? sizeof(struct ipv6hdr) + sizeof(struct udphdr) : sizeof(struct iphdr) + sizeof(struct udphdr); mtu = (state == IP_VS_STATE_BACKUP) ? clamp(dev->mtu, 1500U, 65535U) : 1500U; min_mtu = (state == IP_VS_STATE_BACKUP) ? 1024 : 1; if (c->sync_maxlen) c->sync_maxlen = clamp_t(unsigned int, c->sync_maxlen, min_mtu, 65535 - hlen); else c->sync_maxlen = mtu - hlen; if (state == IP_VS_STATE_MASTER) { result = -EEXIST; if (ipvs->ms) goto out_early; ipvs->mcfg = *c; name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; } else if (state == IP_VS_STATE_BACKUP) { result = -EEXIST; if (ipvs->backup_tinfo) goto out_early; ipvs->bcfg = *c; name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; } else { result = -EINVAL; goto out_early; } if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; result = -ENOMEM; ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; ms = ipvs->ms; for (id = 0; id < count; id++, ms++) { INIT_LIST_HEAD(&ms->sync_queue); ms->sync_queue_len = 0; ms->sync_queue_delay = 0; INIT_DELAYED_WORK(&ms->master_wakeup_work, master_wakeup_work_handler); ms->ipvs = ipvs; } } result = -ENOMEM; ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL); if (!ti) goto out; for (id = 0; id < count; id++) { tinfo = &ti[id]; tinfo->ipvs = ipvs; if (state == IP_VS_STATE_BACKUP) { result = -ENOMEM; tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, GFP_KERNEL); if (!tinfo->buf) goto out; } tinfo->id = id; if (state == IP_VS_STATE_MASTER) result = make_send_sock(ipvs, id, dev, &tinfo->sock); else result = make_receive_sock(ipvs, id, dev, &tinfo->sock); if (result < 0) goto out; task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); if (IS_ERR(task)) { result = PTR_ERR(task); goto out; } tinfo->task = task; } /* mark as active */ if (state == IP_VS_STATE_MASTER) ipvs->master_tinfo = ti; else ipvs->backup_tinfo = ti; spin_lock_bh(&ipvs->sync_buff_lock); ipvs->sync_state |= state; spin_unlock_bh(&ipvs->sync_buff_lock); mutex_unlock(&ipvs->sync_mutex); rtnl_unlock(); return 0; out: /* We do not need RTNL lock anymore, release it here so that * sock_release below can use rtnl_lock to leave the mcast group. */ rtnl_unlock(); id = min(id, count - 1); if (ti) { for (tinfo = ti + id; tinfo >= ti; tinfo--) { if (tinfo->task) kthread_stop(tinfo->task); } } if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { kfree(ipvs->ms); ipvs->ms = NULL; } mutex_unlock(&ipvs->sync_mutex); /* No more mutexes, release socks */ if (ti) { for (tinfo = ti + id; tinfo >= ti; tinfo--) { if (tinfo->sock) sock_release(tinfo->sock); kfree(tinfo->buf); } kfree(ti); } /* decrease the module use count */ ip_vs_use_count_dec(); return result; out_early: mutex_unlock(&ipvs->sync_mutex); rtnl_unlock(); /* decrease the module use count */ ip_vs_use_count_dec(); return result; } int stop_sync_thread(struct netns_ipvs *ipvs, int state) { struct ip_vs_sync_thread_data *ti, *tinfo; int id; int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); mutex_lock(&ipvs->sync_mutex); if (state == IP_VS_STATE_MASTER) { retc = -ESRCH; if (!ipvs->ms) goto err; ti = ipvs->master_tinfo; /* * The lock synchronizes with sb_queue_tail(), so that we don't * add sync buffers to the queue, when we are already in * progress of stopping the master sync daemon. */ spin_lock_bh(&ipvs->sync_buff_lock); spin_lock(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock(&ipvs->sync_lock); spin_unlock_bh(&ipvs->sync_buff_lock); retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { struct ipvs_master_sync_state *ms = &ipvs->ms[id]; int ret; tinfo = &ti[id]; pr_info("stopping master sync thread %d ...\n", task_pid_nr(tinfo->task)); cancel_delayed_work_sync(&ms->master_wakeup_work); ret = kthread_stop(tinfo->task); if (retc >= 0) retc = ret; } kfree(ipvs->ms); ipvs->ms = NULL; ipvs->master_tinfo = NULL; } else if (state == IP_VS_STATE_BACKUP) { retc = -ESRCH; if (!ipvs->backup_tinfo) goto err; ti = ipvs->backup_tinfo; ipvs->sync_state &= ~IP_VS_STATE_BACKUP; retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { int ret; tinfo = &ti[id]; pr_info("stopping backup sync thread %d ...\n", task_pid_nr(tinfo->task)); ret = kthread_stop(tinfo->task); if (retc >= 0) retc = ret; } ipvs->backup_tinfo = NULL; } else { goto err; } id = ipvs->threads_mask; mutex_unlock(&ipvs->sync_mutex); /* No more mutexes, release socks */ for (tinfo = ti + id; tinfo >= ti; tinfo--) { if (tinfo->sock) sock_release(tinfo->sock); kfree(tinfo->buf); } kfree(ti); /* decrease the module use count */ ip_vs_use_count_dec(); return retc; err: mutex_unlock(&ipvs->sync_mutex); return retc; } /* * Initialize data struct for each netns */ int __net_init ip_vs_sync_net_init(struct netns_ipvs *ipvs) { __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); return 0; } void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs) { int retc; retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER); if (retc && retc != -ESRCH) pr_err("Failed to stop Master Daemon\n"); retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP); if (retc && retc != -ESRCH) pr_err("Failed to stop Backup Daemon\n"); } |
2 1 1 7 1 6 2 4 4 3 4 1 1 4 1 10 1 12 4 4 4 17 1 5 4 4 1 2 1 1 3 1 1 6 1 1 3 3 1 1 1 1 1 1 1 1 1 3 1 1 1 1 1 8 8 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 | // SPDX-License-Identifier: GPL-2.0-only /* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. */ #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/slab.h> #include "vmci_queue_pair.h" #include "vmci_datagram.h" #include "vmci_doorbell.h" #include "vmci_context.h" #include "vmci_driver.h" #include "vmci_event.h" /* Use a wide upper bound for the maximum contexts. */ #define VMCI_MAX_CONTEXTS 2000 /* * List of current VMCI contexts. Contexts can be added by * vmci_ctx_create() and removed via vmci_ctx_destroy(). * These, along with context lookup, are protected by the * list structure's lock. */ static struct { struct list_head head; spinlock_t lock; /* Spinlock for context list operations */ } ctx_list = { .head = LIST_HEAD_INIT(ctx_list.head), .lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock), }; /* Used by contexts that did not set up notify flag pointers */ static bool ctx_dummy_notify; static void ctx_signal_notify(struct vmci_ctx *context) { *context->notify = true; } static void ctx_clear_notify(struct vmci_ctx *context) { *context->notify = false; } /* * If nothing requires the attention of the guest, clears both * notify flag and call. */ static void ctx_clear_notify_call(struct vmci_ctx *context) { if (context->pending_datagrams == 0 && vmci_handle_arr_get_size(context->pending_doorbell_array) == 0) ctx_clear_notify(context); } /* * Sets the context's notify flag iff datagrams are pending for this * context. Called from vmci_setup_notify(). */ void vmci_ctx_check_signal_notify(struct vmci_ctx *context) { spin_lock(&context->lock); if (context->pending_datagrams) ctx_signal_notify(context); spin_unlock(&context->lock); } /* * Allocates and initializes a VMCI context. */ struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags, uintptr_t event_hnd, int user_version, const struct cred *cred) { struct vmci_ctx *context; int error; if (cid == VMCI_INVALID_ID) { pr_devel("Invalid context ID for VMCI context\n"); error = -EINVAL; goto err_out; } if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) { pr_devel("Invalid flag (flags=0x%x) for VMCI context\n", priv_flags); error = -EINVAL; goto err_out; } if (user_version == 0) { pr_devel("Invalid suer_version %d\n", user_version); error = -EINVAL; goto err_out; } context = kzalloc(sizeof(*context), GFP_KERNEL); if (!context) { pr_warn("Failed to allocate memory for VMCI context\n"); error = -ENOMEM; goto err_out; } kref_init(&context->kref); spin_lock_init(&context->lock); INIT_LIST_HEAD(&context->list_item); INIT_LIST_HEAD(&context->datagram_queue); INIT_LIST_HEAD(&context->notifier_list); /* Initialize host-specific VMCI context. */ init_waitqueue_head(&context->host_context.wait_queue); context->queue_pair_array = vmci_handle_arr_create(0, VMCI_MAX_GUEST_QP_COUNT); if (!context->queue_pair_array) { error = -ENOMEM; goto err_free_ctx; } context->doorbell_array = vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->doorbell_array) { error = -ENOMEM; goto err_free_qp_array; } context->pending_doorbell_array = vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->pending_doorbell_array) { error = -ENOMEM; goto err_free_db_array; } context->user_version = user_version; context->priv_flags = priv_flags; if (cred) context->cred = get_cred(cred); context->notify = &ctx_dummy_notify; context->notify_page = NULL; /* * If we collide with an existing context we generate a new * and use it instead. The VMX will determine if regeneration * is okay. Since there isn't 4B - 16 VMs running on a given * host, the below loop will terminate. */ spin_lock(&ctx_list.lock); while (vmci_ctx_exists(cid)) { /* We reserve the lowest 16 ids for fixed contexts. */ cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1; if (cid == VMCI_INVALID_ID) cid = VMCI_RESERVED_CID_LIMIT; } context->cid = cid; list_add_tail_rcu(&context->list_item, &ctx_list.head); spin_unlock(&ctx_list.lock); return context; err_free_db_array: vmci_handle_arr_destroy(context->doorbell_array); err_free_qp_array: vmci_handle_arr_destroy(context->queue_pair_array); err_free_ctx: kfree(context); err_out: return ERR_PTR(error); } /* * Destroy VMCI context. */ void vmci_ctx_destroy(struct vmci_ctx *context) { spin_lock(&ctx_list.lock); list_del_rcu(&context->list_item); spin_unlock(&ctx_list.lock); synchronize_rcu(); vmci_ctx_put(context); } /* * Fire notification for all contexts interested in given cid. */ static int ctx_fire_notification(u32 context_id, u32 priv_flags) { u32 i, array_size; struct vmci_ctx *sub_ctx; struct vmci_handle_arr *subscriber_array; struct vmci_handle context_handle = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); /* * We create an array to hold the subscribers we find when * scanning through all contexts. */ subscriber_array = vmci_handle_arr_create(0, VMCI_MAX_CONTEXTS); if (subscriber_array == NULL) return VMCI_ERROR_NO_MEM; /* * Scan all contexts to find who is interested in being * notified about given contextID. */ rcu_read_lock(); list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) { struct vmci_handle_list *node; /* * We only deliver notifications of the removal of * contexts, if the two contexts are allowed to * interact. */ if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags)) continue; list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) { if (!vmci_handle_is_equal(node->handle, context_handle)) continue; vmci_handle_arr_append_entry(&subscriber_array, vmci_make_handle(sub_ctx->cid, VMCI_EVENT_HANDLER)); } } rcu_read_unlock(); /* Fire event to all subscribers. */ array_size = vmci_handle_arr_get_size(subscriber_array); for (i = 0; i < array_size; i++) { int result; struct vmci_event_ctx ev; ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr); ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED; ev.payload.context_id = context_id; result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID, &ev.msg.hdr, false); if (result < VMCI_SUCCESS) { pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n", ev.msg.event_data.event, ev.msg.hdr.dst.context); /* We continue to enqueue on next subscriber. */ } } vmci_handle_arr_destroy(subscriber_array); return VMCI_SUCCESS; } /* * Returns the current number of pending datagrams. The call may * also serve as a synchronization point for the datagram queue, * as no enqueue operations can occur concurrently. */ int vmci_ctx_pending_datagrams(u32 cid, u32 *pending) { struct vmci_ctx *context; context = vmci_ctx_get(cid); if (context == NULL) return VMCI_ERROR_INVALID_ARGS; spin_lock(&context->lock); if (pending) *pending = context->pending_datagrams; spin_unlock(&context->lock); vmci_ctx_put(context); return VMCI_SUCCESS; } /* * Queues a VMCI datagram for the appropriate target VM context. */ int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg) { struct vmci_datagram_queue_entry *dq_entry; struct vmci_ctx *context; struct vmci_handle dg_src; size_t vmci_dg_size; vmci_dg_size = VMCI_DG_SIZE(dg); if (vmci_dg_size > VMCI_MAX_DG_SIZE) { pr_devel("Datagram too large (bytes=%zu)\n", vmci_dg_size); return VMCI_ERROR_INVALID_ARGS; } /* Get the target VM's VMCI context. */ context = vmci_ctx_get(cid); if (!context) { pr_devel("Invalid context (ID=0x%x)\n", cid); return VMCI_ERROR_INVALID_ARGS; } /* Allocate guest call entry and add it to the target VM's queue. */ dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL); if (dq_entry == NULL) { pr_warn("Failed to allocate memory for datagram\n"); vmci_ctx_put(context); return VMCI_ERROR_NO_MEM; } dq_entry->dg = dg; dq_entry->dg_size = vmci_dg_size; dg_src = dg->src; INIT_LIST_HEAD(&dq_entry->list_item); spin_lock(&context->lock); /* * We put a higher limit on datagrams from the hypervisor. If * the pending datagram is not from hypervisor, then we check * if enqueueing it would exceed the * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination. If * the pending datagram is from hypervisor, we allow it to be * queued at the destination side provided we don't reach the * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit. */ if (context->datagram_queue_size + vmci_dg_size >= VMCI_MAX_DATAGRAM_QUEUE_SIZE && (!vmci_handle_is_equal(dg_src, vmci_make_handle (VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID)) || context->datagram_queue_size + vmci_dg_size >= VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) { spin_unlock(&context->lock); vmci_ctx_put(context); kfree(dq_entry); pr_devel("Context (ID=0x%x) receive queue is full\n", cid); return VMCI_ERROR_NO_RESOURCES; } list_add(&dq_entry->list_item, &context->datagram_queue); context->pending_datagrams++; context->datagram_queue_size += vmci_dg_size; ctx_signal_notify(context); wake_up(&context->host_context.wait_queue); spin_unlock(&context->lock); vmci_ctx_put(context); return vmci_dg_size; } /* * Verifies whether a context with the specified context ID exists. * FIXME: utility is dubious as no decisions can be reliably made * using this data as context can appear and disappear at any time. */ bool vmci_ctx_exists(u32 cid) { struct vmci_ctx *context; bool exists = false; rcu_read_lock(); list_for_each_entry_rcu(context, &ctx_list.head, list_item) { if (context->cid == cid) { exists = true; break; } } rcu_read_unlock(); return exists; } /* * Retrieves VMCI context corresponding to the given cid. */ struct vmci_ctx *vmci_ctx_get(u32 cid) { struct vmci_ctx *c, *context = NULL; if (cid == VMCI_INVALID_ID) return NULL; rcu_read_lock(); list_for_each_entry_rcu(c, &ctx_list.head, list_item) { if (c->cid == cid) { /* * The context owner drops its own reference to the * context only after removing it from the list and * waiting for RCU grace period to expire. This * means that we are not about to increase the * reference count of something that is in the * process of being destroyed. */ context = c; kref_get(&context->kref); break; } } rcu_read_unlock(); return context; } /* * Deallocates all parts of a context data structure. This * function doesn't lock the context, because it assumes that * the caller was holding the last reference to context. */ static void ctx_free_ctx(struct kref *kref) { struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref); struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp; struct vmci_handle temp_handle; struct vmci_handle_list *notifier, *tmp; /* * Fire event to all contexts interested in knowing this * context is dying. */ ctx_fire_notification(context->cid, context->priv_flags); /* * Cleanup all queue pair resources attached to context. If * the VM dies without cleaning up, this code will make sure * that no resources are leaked. */ temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) { if (vmci_qp_broker_detach(temp_handle, context) < VMCI_SUCCESS) { /* * When vmci_qp_broker_detach() succeeds it * removes the handle from the array. If * detach fails, we must remove the handle * ourselves. */ vmci_handle_arr_remove_entry(context->queue_pair_array, temp_handle); } temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0); } /* * It is fine to destroy this without locking the callQueue, as * this is the only thread having a reference to the context. */ list_for_each_entry_safe(dq_entry, dq_entry_tmp, &context->datagram_queue, list_item) { WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg)); list_del(&dq_entry->list_item); kfree(dq_entry->dg); kfree(dq_entry); } list_for_each_entry_safe(notifier, tmp, &context->notifier_list, node) { list_del(¬ifier->node); kfree(notifier); } vmci_handle_arr_destroy(context->queue_pair_array); vmci_handle_arr_destroy(context->doorbell_array); vmci_handle_arr_destroy(context->pending_doorbell_array); vmci_ctx_unset_notify(context); if (context->cred) put_cred(context->cred); kfree(context); } /* * Drops reference to VMCI context. If this is the last reference to * the context it will be deallocated. A context is created with * a reference count of one, and on destroy, it is removed from * the context list before its reference count is decremented. Thus, * if we reach zero, we are sure that nobody else are about to increment * it (they need the entry in the context list for that), and so there * is no need for locking. */ void vmci_ctx_put(struct vmci_ctx *context) { kref_put(&context->kref, ctx_free_ctx); } /* * Dequeues the next datagram and returns it to caller. * The caller passes in a pointer to the max size datagram * it can handle and the datagram is only unqueued if the * size is less than max_size. If larger max_size is set to * the size of the datagram to give the caller a chance to * set up a larger buffer for the guestcall. */ int vmci_ctx_dequeue_datagram(struct vmci_ctx *context, size_t *max_size, struct vmci_datagram **dg) { struct vmci_datagram_queue_entry *dq_entry; struct list_head *list_item; int rv; /* Dequeue the next datagram entry. */ spin_lock(&context->lock); if (context->pending_datagrams == 0) { ctx_clear_notify_call(context); spin_unlock(&context->lock); pr_devel("No datagrams pending\n"); return VMCI_ERROR_NO_MORE_DATAGRAMS; } list_item = context->datagram_queue.next; dq_entry = list_entry(list_item, struct vmci_datagram_queue_entry, list_item); /* Check size of caller's buffer. */ if (*max_size < dq_entry->dg_size) { *max_size = dq_entry->dg_size; spin_unlock(&context->lock); pr_devel("Caller's buffer should be at least (size=%u bytes)\n", (u32) *max_size); return VMCI_ERROR_NO_MEM; } list_del(list_item); context->pending_datagrams--; context->datagram_queue_size -= dq_entry->dg_size; if (context->pending_datagrams == 0) { ctx_clear_notify_call(context); rv = VMCI_SUCCESS; } else { /* * Return the size of the next datagram. */ struct vmci_datagram_queue_entry *next_entry; list_item = context->datagram_queue.next; next_entry = list_entry(list_item, struct vmci_datagram_queue_entry, list_item); /* * The following size_t -> int truncation is fine as * the maximum size of a (routable) datagram is 68KB. */ rv = (int)next_entry->dg_size; } spin_unlock(&context->lock); /* Caller must free datagram. */ *dg = dq_entry->dg; dq_entry->dg = NULL; kfree(dq_entry); return rv; } /* * Reverts actions set up by vmci_setup_notify(). Unmaps and unlocks the * page mapped/locked by vmci_setup_notify(). */ void vmci_ctx_unset_notify(struct vmci_ctx *context) { struct page *notify_page; spin_lock(&context->lock); notify_page = context->notify_page; context->notify = &ctx_dummy_notify; context->notify_page = NULL; spin_unlock(&context->lock); if (notify_page) { kunmap(notify_page); put_page(notify_page); } } /* * Add remote_cid to list of contexts current contexts wants * notifications from/about. */ int vmci_ctx_add_notification(u32 context_id, u32 remote_cid) { struct vmci_ctx *context; struct vmci_handle_list *notifier, *n; int result; bool exists = false; context = vmci_ctx_get(context_id); if (!context) return VMCI_ERROR_NOT_FOUND; if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) { pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n", context_id, remote_cid); result = VMCI_ERROR_DST_UNREACHABLE; goto out; } if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) { result = VMCI_ERROR_NO_ACCESS; goto out; } notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL); if (!notifier) { result = VMCI_ERROR_NO_MEM; goto out; } INIT_LIST_HEAD(¬ifier->node); notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); spin_lock(&context->lock); if (context->n_notifiers < VMCI_MAX_CONTEXTS) { list_for_each_entry(n, &context->notifier_list, node) { if (vmci_handle_is_equal(n->handle, notifier->handle)) { exists = true; break; } } if (exists) { kfree(notifier); result = VMCI_ERROR_ALREADY_EXISTS; } else { list_add_tail_rcu(¬ifier->node, &context->notifier_list); context->n_notifiers++; result = VMCI_SUCCESS; } } else { kfree(notifier); result = VMCI_ERROR_NO_MEM; } spin_unlock(&context->lock); out: vmci_ctx_put(context); return result; } /* * Remove remote_cid from current context's list of contexts it is * interested in getting notifications from/about. */ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid) { struct vmci_ctx *context; struct vmci_handle_list *notifier = NULL, *iter, *tmp; struct vmci_handle handle; context = vmci_ctx_get(context_id); if (!context) return VMCI_ERROR_NOT_FOUND; handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER); spin_lock(&context->lock); list_for_each_entry_safe(iter, tmp, &context->notifier_list, node) { if (vmci_handle_is_equal(iter->handle, handle)) { list_del_rcu(&iter->node); context->n_notifiers--; notifier = iter; break; } } spin_unlock(&context->lock); if (notifier) kvfree_rcu_mightsleep(notifier); vmci_ctx_put(context); return notifier ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND; } static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context, u32 *buf_size, void **pbuf) { u32 *notifiers; size_t data_size; struct vmci_handle_list *entry; int i = 0; if (context->n_notifiers == 0) { *buf_size = 0; *pbuf = NULL; return VMCI_SUCCESS; } data_size = context->n_notifiers * sizeof(*notifiers); if (*buf_size < data_size) { *buf_size = data_size; return VMCI_ERROR_MORE_DATA; } notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */ if (!notifiers) return VMCI_ERROR_NO_MEM; list_for_each_entry(entry, &context->notifier_list, node) notifiers[i++] = entry->handle.context; *buf_size = data_size; *pbuf = notifiers; return VMCI_SUCCESS; } static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context, u32 *buf_size, void **pbuf) { struct dbell_cpt_state *dbells; u32 i, n_doorbells; n_doorbells = vmci_handle_arr_get_size(context->doorbell_array); if (n_doorbells > 0) { size_t data_size = n_doorbells * sizeof(*dbells); if (*buf_size < data_size) { *buf_size = data_size; return VMCI_ERROR_MORE_DATA; } dbells = kzalloc(data_size, GFP_ATOMIC); if (!dbells) return VMCI_ERROR_NO_MEM; for (i = 0; i < n_doorbells; i++) dbells[i].handle = vmci_handle_arr_get_entry( context->doorbell_array, i); *buf_size = data_size; *pbuf = dbells; } else { *buf_size = 0; *pbuf = NULL; } return VMCI_SUCCESS; } /* * Get current context's checkpoint state of given type. */ int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type, u32 *buf_size, void **pbuf) { struct vmci_ctx *context; int result; context = vmci_ctx_get(context_id); if (!context) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); switch (cpt_type) { case VMCI_NOTIFICATION_CPT_STATE: result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf); break; case VMCI_WELLKNOWN_CPT_STATE: /* * For compatibility with VMX'en with VM to VM communication, we * always return zero wellknown handles. */ *buf_size = 0; *pbuf = NULL; result = VMCI_SUCCESS; break; case VMCI_DOORBELL_CPT_STATE: result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf); break; default: pr_devel("Invalid cpt state (type=%d)\n", cpt_type); result = VMCI_ERROR_INVALID_ARGS; break; } spin_unlock(&context->lock); vmci_ctx_put(context); return result; } /* * Set current context's checkpoint state of given type. */ int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type, u32 buf_size, void *cpt_buf) { u32 i; u32 current_id; int result = VMCI_SUCCESS; u32 num_ids = buf_size / sizeof(u32); if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) { /* * We would end up here if VMX with VM to VM communication * attempts to restore a checkpoint with wellknown handles. */ pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n"); return VMCI_ERROR_OBSOLETE; } if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) { pr_devel("Invalid cpt state (type=%d)\n", cpt_type); return VMCI_ERROR_INVALID_ARGS; } for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) { current_id = ((u32 *)cpt_buf)[i]; result = vmci_ctx_add_notification(context_id, current_id); if (result != VMCI_SUCCESS) break; } if (result != VMCI_SUCCESS) pr_devel("Failed to set cpt state (type=%d) (error=%d)\n", cpt_type, result); return result; } /* * Retrieves the specified context's pending notifications in the * form of a handle array. The handle arrays returned are the * actual data - not a copy and should not be modified by the * caller. They must be released using * vmci_ctx_rcv_notifications_release. */ int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr **db_handle_array, struct vmci_handle_arr **qp_handle_array) { struct vmci_ctx *context; int result = VMCI_SUCCESS; context = vmci_ctx_get(context_id); if (context == NULL) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); *db_handle_array = context->pending_doorbell_array; context->pending_doorbell_array = vmci_handle_arr_create(0, VMCI_MAX_GUEST_DOORBELL_COUNT); if (!context->pending_doorbell_array) { context->pending_doorbell_array = *db_handle_array; *db_handle_array = NULL; result = VMCI_ERROR_NO_MEM; } *qp_handle_array = NULL; spin_unlock(&context->lock); vmci_ctx_put(context); return result; } /* * Releases handle arrays with pending notifications previously * retrieved using vmci_ctx_rcv_notifications_get. If the * notifications were not successfully handed over to the guest, * success must be false. */ void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr *db_handle_array, struct vmci_handle_arr *qp_handle_array, bool success) { struct vmci_ctx *context = vmci_ctx_get(context_id); spin_lock(&context->lock); if (!success) { struct vmci_handle handle; /* * New notifications may have been added while we were not * holding the context lock, so we transfer any new pending * doorbell notifications to the old array, and reinstate the * old array. */ handle = vmci_handle_arr_remove_tail( context->pending_doorbell_array); while (!vmci_handle_is_invalid(handle)) { if (!vmci_handle_arr_has_entry(db_handle_array, handle)) { vmci_handle_arr_append_entry( &db_handle_array, handle); } handle = vmci_handle_arr_remove_tail( context->pending_doorbell_array); } vmci_handle_arr_destroy(context->pending_doorbell_array); context->pending_doorbell_array = db_handle_array; db_handle_array = NULL; } else { ctx_clear_notify_call(context); } spin_unlock(&context->lock); vmci_ctx_put(context); if (db_handle_array) vmci_handle_arr_destroy(db_handle_array); if (qp_handle_array) vmci_handle_arr_destroy(qp_handle_array); } /* * Registers that a new doorbell handle has been allocated by the * context. Only doorbell handles registered can be notified. */ int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle) { struct vmci_ctx *context; int result; if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; context = vmci_ctx_get(context_id); if (context == NULL) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) result = vmci_handle_arr_append_entry(&context->doorbell_array, handle); else result = VMCI_ERROR_DUPLICATE_ENTRY; spin_unlock(&context->lock); vmci_ctx_put(context); return result; } /* * Unregisters a doorbell handle that was previously registered * with vmci_ctx_dbell_create. */ int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle) { struct vmci_ctx *context; struct vmci_handle removed_handle; if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; context = vmci_ctx_get(context_id); if (context == NULL) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); removed_handle = vmci_handle_arr_remove_entry(context->doorbell_array, handle); vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle); spin_unlock(&context->lock); vmci_ctx_put(context); return vmci_handle_is_invalid(removed_handle) ? VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; } /* * Unregisters all doorbell handles that were previously * registered with vmci_ctx_dbell_create. */ int vmci_ctx_dbell_destroy_all(u32 context_id) { struct vmci_ctx *context; struct vmci_handle handle; if (context_id == VMCI_INVALID_ID) return VMCI_ERROR_INVALID_ARGS; context = vmci_ctx_get(context_id); if (context == NULL) return VMCI_ERROR_NOT_FOUND; spin_lock(&context->lock); do { struct vmci_handle_arr *arr = context->doorbell_array; handle = vmci_handle_arr_remove_tail(arr); } while (!vmci_handle_is_invalid(handle)); do { struct vmci_handle_arr *arr = context->pending_doorbell_array; handle = vmci_handle_arr_remove_tail(arr); } while (!vmci_handle_is_invalid(handle)); spin_unlock(&context->lock); vmci_ctx_put(context); return VMCI_SUCCESS; } /* * Registers a notification of a doorbell handle initiated by the * specified source context. The notification of doorbells are * subject to the same isolation rules as datagram delivery. To * allow host side senders of notifications a finer granularity * of sender rights than those assigned to the sending context * itself, the host context is required to specify a different * set of privilege flags that will override the privileges of * the source context. */ int vmci_ctx_notify_dbell(u32 src_cid, struct vmci_handle handle, u32 src_priv_flags) { struct vmci_ctx *dst_context; int result; if (vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; /* Get the target VM's VMCI context. */ dst_context = vmci_ctx_get(handle.context); if (!dst_context) { pr_devel("Invalid context (ID=0x%x)\n", handle.context); return VMCI_ERROR_NOT_FOUND; } if (src_cid != handle.context) { u32 dst_priv_flags; if (VMCI_CONTEXT_IS_VM(src_cid) && VMCI_CONTEXT_IS_VM(handle.context)) { pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n", src_cid, handle.context); result = VMCI_ERROR_DST_UNREACHABLE; goto out; } result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags); if (result < VMCI_SUCCESS) { pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n", handle.context, handle.resource); goto out; } if (src_cid != VMCI_HOST_CONTEXT_ID || src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) { src_priv_flags = vmci_context_get_priv_flags(src_cid); } if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) { result = VMCI_ERROR_NO_ACCESS; goto out; } } if (handle.context == VMCI_HOST_CONTEXT_ID) { result = vmci_dbell_host_context_notify(src_cid, handle); } else { spin_lock(&dst_context->lock); if (!vmci_handle_arr_has_entry(dst_context->doorbell_array, handle)) { result = VMCI_ERROR_NOT_FOUND; } else { if (!vmci_handle_arr_has_entry( dst_context->pending_doorbell_array, handle)) { result = vmci_handle_arr_append_entry( &dst_context->pending_doorbell_array, handle); if (result == VMCI_SUCCESS) { ctx_signal_notify(dst_context); wake_up(&dst_context->host_context.wait_queue); } } else { result = VMCI_SUCCESS; } } spin_unlock(&dst_context->lock); } out: vmci_ctx_put(dst_context); return result; } bool vmci_ctx_supports_host_qp(struct vmci_ctx *context) { return context && context->user_version >= VMCI_VERSION_HOSTQP; } /* * Registers that a new queue pair handle has been allocated by * the context. */ int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle) { int result; if (context == NULL || vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) result = vmci_handle_arr_append_entry( &context->queue_pair_array, handle); else result = VMCI_ERROR_DUPLICATE_ENTRY; return result; } /* * Unregisters a queue pair handle that was previously registered * with vmci_ctx_qp_create. */ int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle) { struct vmci_handle hndl; if (context == NULL || vmci_handle_is_invalid(handle)) return VMCI_ERROR_INVALID_ARGS; hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle); return vmci_handle_is_invalid(hndl) ? VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS; } /* * Determines whether a given queue pair handle is registered * with the given context. */ bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle) { if (context == NULL || vmci_handle_is_invalid(handle)) return false; return vmci_handle_arr_has_entry(context->queue_pair_array, handle); } /* * vmci_context_get_priv_flags() - Retrieve privilege flags. * @context_id: The context ID of the VMCI context. * * Retrieves privilege flags of the given VMCI context ID. */ u32 vmci_context_get_priv_flags(u32 context_id) { if (vmci_host_code_active()) { u32 flags; struct vmci_ctx *context; context = vmci_ctx_get(context_id); if (!context) return VMCI_LEAST_PRIVILEGE_FLAGS; flags = context->priv_flags; vmci_ctx_put(context); return flags; } return VMCI_NO_PRIVILEGE_FLAGS; } EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags); /* * vmci_is_context_owner() - Determimnes if user is the context owner * @context_id: The context ID of the VMCI context. * @uid: The host user id (real kernel value). * * Determines whether a given UID is the owner of given VMCI context. */ bool vmci_is_context_owner(u32 context_id, kuid_t uid) { bool is_owner = false; if (vmci_host_code_active()) { struct vmci_ctx *context = vmci_ctx_get(context_id); if (context) { if (context->cred) is_owner = uid_eq(context->cred->uid, uid); vmci_ctx_put(context); } } return is_owner; } EXPORT_SYMBOL_GPL(vmci_is_context_owner); |
13 14 13 2 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 | // SPDX-License-Identifier: GPL-2.0 /* * USB Serial Console driver * * Copyright (C) 2001 - 2002 Greg Kroah-Hartman (greg@kroah.com) * * Thanks to Randy Dunlap for the original version of this code. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/serial.h> #include <linux/usb.h> #include <linux/usb/serial.h> struct usbcons_info { int magic; int break_flag; struct usb_serial_port *port; }; static struct usbcons_info usbcons_info; static struct console usbcons; /* * ------------------------------------------------------------ * USB Serial console driver * * Much of the code here is copied from drivers/char/serial.c * and implements a phony serial console in the same way that * serial.c does so that in case some software queries it, * it will get the same results. * * Things that are different from the way the serial port code * does things, is that we call the lower level usb-serial * driver code to initialize the device, and we set the initial * console speeds based on the command line arguments. * ------------------------------------------------------------ */ static const struct tty_operations usb_console_fake_tty_ops = { }; /* * The parsing of the command line works exactly like the * serial.c code, except that the specifier is "ttyUSB" instead * of "ttyS". */ static int usb_console_setup(struct console *co, char *options) { struct usbcons_info *info = &usbcons_info; int baud = 9600; int bits = 8; int parity = 'n'; int doflow = 0; int cflag = CREAD | HUPCL | CLOCAL; char *s; struct usb_serial *serial; struct usb_serial_port *port; int retval; struct tty_struct *tty = NULL; struct ktermios dummy; if (options) { baud = simple_strtoul(options, NULL, 10); s = options; while (*s >= '0' && *s <= '9') s++; if (*s) parity = *s++; if (*s) bits = *s++ - '0'; if (*s) doflow = (*s++ == 'r'); } /* Sane default */ if (baud == 0) baud = 9600; switch (bits) { case 7: cflag |= CS7; break; default: case 8: cflag |= CS8; break; } switch (parity) { case 'o': case 'O': cflag |= PARODD; break; case 'e': case 'E': cflag |= PARENB; break; } if (doflow) cflag |= CRTSCTS; /* * no need to check the index here: if the index is wrong, console * code won't call us */ port = usb_serial_port_get_by_minor(co->index); if (port == NULL) { /* no device is connected yet, sorry :( */ pr_err("No USB device connected to ttyUSB%i\n", co->index); return -ENODEV; } serial = port->serial; retval = usb_autopm_get_interface(serial->interface); if (retval) goto error_get_interface; tty_port_tty_set(&port->port, NULL); info->port = port; ++port->port.count; if (!tty_port_initialized(&port->port)) { if (serial->type->set_termios) { /* * allocate a fake tty so the driver can initialize * the termios structure, then later call set_termios to * configure according to command line arguments */ tty = kzalloc(sizeof(*tty), GFP_KERNEL); if (!tty) { retval = -ENOMEM; goto reset_open_count; } kref_init(&tty->kref); tty->driver = usb_serial_tty_driver; tty->index = co->index; init_ldsem(&tty->ldisc_sem); spin_lock_init(&tty->files_lock); INIT_LIST_HEAD(&tty->tty_files); kref_get(&tty->driver->kref); __module_get(tty->driver->owner); tty->ops = &usb_console_fake_tty_ops; tty_init_termios(tty); tty_port_tty_set(&port->port, tty); } /* only call the device specific open if this * is the first time the port is opened */ retval = serial->type->open(NULL, port); if (retval) { dev_err(&port->dev, "could not open USB console port\n"); goto fail; } if (serial->type->set_termios) { tty->termios.c_cflag = cflag; tty_termios_encode_baud_rate(&tty->termios, baud, baud); memset(&dummy, 0, sizeof(struct ktermios)); serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); tty_save_termios(tty); tty_kref_put(tty); } tty_port_set_initialized(&port->port, true); } /* Now that any required fake tty operations are completed restore * the tty port count */ --port->port.count; /* The console is special in terms of closing the device so * indicate this port is now acting as a system console. */ port->port.console = 1; mutex_unlock(&serial->disc_mutex); return retval; fail: tty_port_tty_set(&port->port, NULL); tty_kref_put(tty); reset_open_count: port->port.count = 0; info->port = NULL; usb_autopm_put_interface(serial->interface); error_get_interface: mutex_unlock(&serial->disc_mutex); usb_serial_put(serial); return retval; } static void usb_console_write(struct console *co, const char *buf, unsigned count) { static struct usbcons_info *info = &usbcons_info; struct usb_serial_port *port = info->port; struct usb_serial *serial; int retval = -ENODEV; if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) return; serial = port->serial; if (count == 0) return; dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count); if (!port->port.console) { dev_dbg(&port->dev, "%s - port not opened\n", __func__); return; } while (count) { unsigned int i; unsigned int lf; /* search for LF so we can insert CR if necessary */ for (i = 0, lf = 0 ; i < count ; i++) { if (*(buf + i) == 10) { lf = 1; i++; break; } } /* pass on to the driver specific version of this function if it is available */ retval = serial->type->write(NULL, port, buf, i); dev_dbg(&port->dev, "%s - write: %d\n", __func__, retval); if (lf) { /* append CR after LF */ unsigned char cr = 13; retval = serial->type->write(NULL, port, &cr, 1); dev_dbg(&port->dev, "%s - write cr: %d\n", __func__, retval); } buf += i; count -= i; } } static struct tty_driver *usb_console_device(struct console *co, int *index) { struct tty_driver **p = (struct tty_driver **)co->data; if (!*p) return NULL; *index = co->index; return *p; } static struct console usbcons = { .name = "ttyUSB", .write = usb_console_write, .device = usb_console_device, .setup = usb_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &usb_serial_tty_driver, }; void usb_serial_console_disconnect(struct usb_serial *serial) { if (serial->port[0] && serial->port[0] == usbcons_info.port) { usb_serial_console_exit(); usb_serial_put(serial); } } void usb_serial_console_init(int minor) { if (minor == 0) { /* * Call register_console() if this is the first device plugged * in. If we call it earlier, then the callback to * console_setup() will fail, as there is not a device seen by * the USB subsystem yet. */ /* * Register console. * NOTES: * console_setup() is called (back) immediately (from * register_console). console_write() is called immediately * from register_console iff CON_PRINTBUFFER is set in flags. */ pr_debug("registering the USB serial console.\n"); register_console(&usbcons); } } void usb_serial_console_exit(void) { if (usbcons_info.port) { unregister_console(&usbcons); usbcons_info.port->port.console = 0; usbcons_info.port = NULL; } } |
22 23 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2013 * Phillip Lougher <phillip@squashfs.org.uk> */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pagemap.h> #include "squashfs_fs_sb.h" #include "decompressor.h" #include "page_actor.h" /* * This file contains implementations of page_actor for decompressing into * an intermediate buffer, and for decompressing directly into the * page cache. * * Calling code should avoid sleeping between calls to squashfs_first_page() * and squashfs_finish_page(). */ /* Implementation of page_actor for decompressing into intermediate buffer */ static void *cache_first_page(struct squashfs_page_actor *actor) { actor->next_page = 1; return actor->buffer[0]; } static void *cache_next_page(struct squashfs_page_actor *actor) { if (actor->next_page == actor->pages) return NULL; return actor->buffer[actor->next_page++]; } static void cache_finish_page(struct squashfs_page_actor *actor) { /* empty */ } struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, int pages, int length) { struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); if (actor == NULL) return NULL; actor->length = length ? : pages * PAGE_SIZE; actor->buffer = buffer; actor->pages = pages; actor->next_page = 0; actor->tmp_buffer = NULL; actor->squashfs_first_page = cache_first_page; actor->squashfs_next_page = cache_next_page; actor->squashfs_finish_page = cache_finish_page; return actor; } /* Implementation of page_actor for decompressing directly into page cache. */ static void *handle_next_page(struct squashfs_page_actor *actor) { int max_pages = (actor->length + PAGE_SIZE - 1) >> PAGE_SHIFT; if (actor->returned_pages == max_pages) return NULL; if ((actor->next_page == actor->pages) || (actor->next_index != actor->page[actor->next_page]->index)) { actor->next_index++; actor->returned_pages++; actor->last_page = NULL; return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM); } actor->next_index++; actor->returned_pages++; actor->last_page = actor->page[actor->next_page]; return actor->pageaddr = kmap_local_page(actor->page[actor->next_page++]); } static void *direct_first_page(struct squashfs_page_actor *actor) { return handle_next_page(actor); } static void *direct_next_page(struct squashfs_page_actor *actor) { if (actor->pageaddr) { kunmap_local(actor->pageaddr); actor->pageaddr = NULL; } return handle_next_page(actor); } static void direct_finish_page(struct squashfs_page_actor *actor) { if (actor->pageaddr) kunmap_local(actor->pageaddr); } struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, struct page **page, int pages, int length) { struct squashfs_page_actor *actor = kmalloc(sizeof(*actor), GFP_KERNEL); if (actor == NULL) return NULL; if (msblk->decompressor->alloc_buffer) { actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (actor->tmp_buffer == NULL) { kfree(actor); return NULL; } } else actor->tmp_buffer = NULL; actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; actor->returned_pages = 0; actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1); actor->pageaddr = NULL; actor->last_page = NULL; actor->alloc_buffer = msblk->decompressor->alloc_buffer; actor->squashfs_first_page = direct_first_page; actor->squashfs_next_page = direct_next_page; actor->squashfs_finish_page = direct_finish_page; return actor; } |
82 1 82 82 82 13 13 7 82 18 5 82 64 18 82 57 4 45 1 45 31 82 31 82 57 45 45 45 22 8 30 8 22 30 30 57 57 45 45 30 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 | // SPDX-License-Identifier: GPL-2.0-only /* * unicode.c * * PURPOSE * Routines for converting between UTF-8 and OSTA Compressed Unicode. * Also handles filename mangling * * DESCRIPTION * OSTA Compressed Unicode is explained in the OSTA UDF specification. * http://www.osta.org/ * UTF-8 is explained in the IETF RFC XXXX. * ftp://ftp.internic.net/rfc/rfcxxxx.txt * */ #include "udfdecl.h" #include <linux/kernel.h> #include <linux/string.h> /* for memset */ #include <linux/nls.h> #include <linux/crc-itu-t.h> #include <linux/slab.h> #include "udf_sb.h" #define PLANE_SIZE 0x10000 #define UNICODE_MAX 0x10ffff #define SURROGATE_MASK 0xfffff800 #define SURROGATE_PAIR 0x0000d800 #define SURROGATE_LOW 0x00000400 #define SURROGATE_CHAR_BITS 10 #define SURROGATE_CHAR_MASK ((1 << SURROGATE_CHAR_BITS) - 1) #define ILLEGAL_CHAR_MARK '_' #define EXT_MARK '.' #define CRC_MARK '#' #define EXT_SIZE 5 /* Number of chars we need to store generated CRC to make filename unique */ #define CRC_LEN 5 static unicode_t get_utf16_char(const uint8_t *str_i, int str_i_max_len, int str_i_idx, int u_ch, unicode_t *ret) { unicode_t c; int start_idx = str_i_idx; /* Expand OSTA compressed Unicode to Unicode */ c = str_i[str_i_idx++]; if (u_ch > 1) c = (c << 8) | str_i[str_i_idx++]; if ((c & SURROGATE_MASK) == SURROGATE_PAIR) { unicode_t next; /* Trailing surrogate char */ if (str_i_idx >= str_i_max_len) { c = UNICODE_MAX + 1; goto out; } /* Low surrogate must follow the high one... */ if (c & SURROGATE_LOW) { c = UNICODE_MAX + 1; goto out; } WARN_ON_ONCE(u_ch != 2); next = str_i[str_i_idx++] << 8; next |= str_i[str_i_idx++]; if ((next & SURROGATE_MASK) != SURROGATE_PAIR || !(next & SURROGATE_LOW)) { c = UNICODE_MAX + 1; goto out; } c = PLANE_SIZE + ((c & SURROGATE_CHAR_MASK) << SURROGATE_CHAR_BITS) + (next & SURROGATE_CHAR_MASK); } out: *ret = c; return str_i_idx - start_idx; } static int udf_name_conv_char(uint8_t *str_o, int str_o_max_len, int *str_o_idx, const uint8_t *str_i, int str_i_max_len, int *str_i_idx, int u_ch, int *needsCRC, int (*conv_f)(wchar_t, unsigned char *, int), int translate) { unicode_t c; int illChar = 0; int len, gotch = 0; while (!gotch && *str_i_idx < str_i_max_len) { if (*str_o_idx >= str_o_max_len) { *needsCRC = 1; return gotch; } len = get_utf16_char(str_i, str_i_max_len, *str_i_idx, u_ch, &c); /* These chars cannot be converted. Replace them. */ if (c == 0 || c > UNICODE_MAX || (conv_f && c > MAX_WCHAR_T) || (translate && c == '/')) { illChar = 1; if (!translate) gotch = 1; } else if (illChar) break; else gotch = 1; *str_i_idx += len; } if (illChar) { *needsCRC = 1; c = ILLEGAL_CHAR_MARK; gotch = 1; } if (gotch) { if (conv_f) { len = conv_f(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); } else { len = utf32_to_utf8(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx); if (len < 0) len = -ENAMETOOLONG; } /* Valid character? */ if (len >= 0) *str_o_idx += len; else if (len == -ENAMETOOLONG) { *needsCRC = 1; gotch = 0; } else { str_o[(*str_o_idx)++] = ILLEGAL_CHAR_MARK; *needsCRC = 1; } } return gotch; } static int udf_name_from_CS0(struct super_block *sb, uint8_t *str_o, int str_max_len, const uint8_t *ocu, int ocu_len, int translate) { uint32_t c; uint8_t cmp_id; int idx, len; int u_ch; int needsCRC = 0; int ext_i_len, ext_max_len; int str_o_len = 0; /* Length of resulting output */ int ext_o_len = 0; /* Extension output length */ int ext_crc_len = 0; /* Extension output length if used with CRC */ int i_ext = -1; /* Extension position in input buffer */ int o_crc = 0; /* Rightmost possible output pos for CRC+ext */ unsigned short valueCRC; uint8_t ext[EXT_SIZE * NLS_MAX_CHARSET_SIZE + 1]; uint8_t crc[CRC_LEN]; int (*conv_f)(wchar_t, unsigned char *, int); if (str_max_len <= 0) return 0; if (ocu_len == 0) { memset(str_o, 0, str_max_len); return 0; } if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->uni2char; else conv_f = NULL; cmp_id = ocu[0]; if (cmp_id != 8 && cmp_id != 16) { memset(str_o, 0, str_max_len); pr_err("unknown compression code (%u)\n", cmp_id); return -EINVAL; } u_ch = cmp_id >> 3; ocu++; ocu_len--; if (ocu_len % u_ch) { pr_err("incorrect filename length (%d)\n", ocu_len + 1); return -EINVAL; } if (translate) { /* Look for extension */ for (idx = ocu_len - u_ch, ext_i_len = 0; (idx >= 0) && (ext_i_len < EXT_SIZE); idx -= u_ch, ext_i_len++) { c = ocu[idx]; if (u_ch > 1) c = (c << 8) | ocu[idx + 1]; if (c == EXT_MARK) { if (ext_i_len) i_ext = idx; break; } } if (i_ext >= 0) { /* Convert extension */ ext_max_len = min_t(int, sizeof(ext), str_max_len); ext[ext_o_len++] = EXT_MARK; idx = i_ext + u_ch; while (udf_name_conv_char(ext, ext_max_len, &ext_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) { if ((ext_o_len + CRC_LEN) < str_max_len) ext_crc_len = ext_o_len; } } } idx = 0; while (1) { if (translate && (idx == i_ext)) { if (str_o_len > (str_max_len - ext_o_len)) needsCRC = 1; break; } if (!udf_name_conv_char(str_o, str_max_len, &str_o_len, ocu, ocu_len, &idx, u_ch, &needsCRC, conv_f, translate)) break; if (translate && (str_o_len <= (str_max_len - ext_o_len - CRC_LEN))) o_crc = str_o_len; } if (translate) { if (str_o_len > 0 && str_o_len <= 2 && str_o[0] == '.' && (str_o_len == 1 || str_o[1] == '.')) needsCRC = 1; if (needsCRC) { str_o_len = o_crc; valueCRC = crc_itu_t(0, ocu, ocu_len); crc[0] = CRC_MARK; crc[1] = hex_asc_upper_hi(valueCRC >> 8); crc[2] = hex_asc_upper_lo(valueCRC >> 8); crc[3] = hex_asc_upper_hi(valueCRC); crc[4] = hex_asc_upper_lo(valueCRC); len = min_t(int, CRC_LEN, str_max_len - str_o_len); memcpy(&str_o[str_o_len], crc, len); str_o_len += len; ext_o_len = ext_crc_len; } if (ext_o_len > 0) { memcpy(&str_o[str_o_len], ext, ext_o_len); str_o_len += ext_o_len; } } return str_o_len; } static int udf_name_to_CS0(struct super_block *sb, uint8_t *ocu, int ocu_max_len, const uint8_t *str_i, int str_len) { int i, len; unsigned int max_val; int u_len, u_ch; unicode_t uni_char; int (*conv_f)(const unsigned char *, int, wchar_t *); if (ocu_max_len <= 0) return 0; if (UDF_SB(sb)->s_nls_map) conv_f = UDF_SB(sb)->s_nls_map->char2uni; else conv_f = NULL; memset(ocu, 0, ocu_max_len); ocu[0] = 8; max_val = 0xff; u_ch = 1; try_again: u_len = 1; for (i = 0; i < str_len; i += len) { /* Name didn't fit? */ if (u_len + u_ch > ocu_max_len) return 0; if (conv_f) { wchar_t wchar; len = conv_f(&str_i[i], str_len - i, &wchar); if (len > 0) uni_char = wchar; } else { len = utf8_to_utf32(&str_i[i], str_len - i, &uni_char); } /* Invalid character, deal with it */ if (len <= 0 || uni_char > UNICODE_MAX) { len = 1; uni_char = '?'; } if (uni_char > max_val) { unicode_t c; if (max_val == 0xff) { max_val = 0xffff; ocu[0] = 0x10; u_ch = 2; goto try_again; } /* * Use UTF-16 encoding for chars outside we * cannot encode directly. */ if (u_len + 2 * u_ch > ocu_max_len) return 0; uni_char -= PLANE_SIZE; c = SURROGATE_PAIR | ((uni_char >> SURROGATE_CHAR_BITS) & SURROGATE_CHAR_MASK); ocu[u_len++] = (uint8_t)(c >> 8); ocu[u_len++] = (uint8_t)(c & 0xff); uni_char = SURROGATE_PAIR | SURROGATE_LOW | (uni_char & SURROGATE_CHAR_MASK); } if (max_val == 0xffff) ocu[u_len++] = (uint8_t)(uni_char >> 8); ocu[u_len++] = (uint8_t)(uni_char & 0xff); } return u_len; } /* * Convert CS0 dstring to output charset. Warning: This function may truncate * input string if it is too long as it is used for informational strings only * and it is better to truncate the string than to refuse mounting a media. */ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) { int s_len = 0; if (i_len > 0) { s_len = ocu_i[i_len - 1]; if (s_len >= i_len) { pr_warn("incorrect dstring lengths (%d/%d)," " truncating\n", s_len, i_len); s_len = i_len - 1; /* 2-byte encoding? Need to round properly... */ if (ocu_i[0] == 16) s_len -= (s_len - 1) & 2; } } return udf_name_from_CS0(sb, utf_o, o_len, ocu_i, s_len, 0); } int udf_get_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { int ret; if (!slen) return -EIO; if (dlen <= 0) return 0; ret = udf_name_from_CS0(sb, dname, dlen, sname, slen, 1); /* Zero length filename isn't valid... */ if (ret == 0) ret = -EINVAL; return ret; } int udf_put_filename(struct super_block *sb, const uint8_t *sname, int slen, uint8_t *dname, int dlen) { return udf_name_to_CS0(sb, dname, dlen, sname, slen); } |
1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 | /* SPDX-License-Identifier: GPL-2.0 */ /* * fs/f2fs/f2fs.h * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #ifndef _LINUX_F2FS_H #define _LINUX_F2FS_H #include <linux/uio.h> #include <linux/types.h> #include <linux/page-flags.h> #include <linux/buffer_head.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/magic.h> #include <linux/kobject.h> #include <linux/sched.h> #include <linux/cred.h> #include <linux/sched/mm.h> #include <linux/vmalloc.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/quotaops.h> #include <linux/part_stat.h> #include <crypto/hash.h> #include <linux/fscrypt.h> #include <linux/fsverity.h> struct pagevec; #ifdef CONFIG_F2FS_CHECK_FS #define f2fs_bug_on(sbi, condition) BUG_ON(condition) #else #define f2fs_bug_on(sbi, condition) \ do { \ if (WARN_ON(condition)) \ set_sbi_flag(sbi, SBI_NEED_FSCK); \ } while (0) #endif enum { FAULT_KMALLOC, FAULT_KVMALLOC, FAULT_PAGE_ALLOC, FAULT_PAGE_GET, FAULT_ALLOC_BIO, /* it's obsolete due to bio_alloc() will never fail */ FAULT_ALLOC_NID, FAULT_ORPHAN, FAULT_BLOCK, FAULT_DIR_DEPTH, FAULT_EVICT_INODE, FAULT_TRUNCATE, FAULT_READ_IO, FAULT_CHECKPOINT, FAULT_DISCARD, FAULT_WRITE_IO, FAULT_SLAB_ALLOC, FAULT_DQUOT_INIT, FAULT_LOCK_OP, FAULT_BLKADDR, FAULT_MAX, }; #ifdef CONFIG_F2FS_FAULT_INJECTION #define F2FS_ALL_FAULT_TYPE (GENMASK(FAULT_MAX - 1, 0)) struct f2fs_fault_info { atomic_t inject_ops; unsigned int inject_rate; unsigned int inject_type; }; extern const char *f2fs_fault_name[FAULT_MAX]; #define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type)) #endif /* * For mount options */ #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000001 #define F2FS_MOUNT_DISCARD 0x00000002 #define F2FS_MOUNT_NOHEAP 0x00000004 #define F2FS_MOUNT_XATTR_USER 0x00000008 #define F2FS_MOUNT_POSIX_ACL 0x00000010 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000020 #define F2FS_MOUNT_INLINE_XATTR 0x00000040 #define F2FS_MOUNT_INLINE_DATA 0x00000080 #define F2FS_MOUNT_INLINE_DENTRY 0x00000100 #define F2FS_MOUNT_FLUSH_MERGE 0x00000200 #define F2FS_MOUNT_NOBARRIER 0x00000400 #define F2FS_MOUNT_FASTBOOT 0x00000800 #define F2FS_MOUNT_READ_EXTENT_CACHE 0x00001000 #define F2FS_MOUNT_DATA_FLUSH 0x00002000 #define F2FS_MOUNT_FAULT_INJECTION 0x00004000 #define F2FS_MOUNT_USRQUOTA 0x00008000 #define F2FS_MOUNT_GRPQUOTA 0x00010000 #define F2FS_MOUNT_PRJQUOTA 0x00020000 #define F2FS_MOUNT_QUOTA 0x00040000 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00080000 #define F2FS_MOUNT_RESERVE_ROOT 0x00100000 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x00200000 #define F2FS_MOUNT_NORECOVERY 0x00400000 #define F2FS_MOUNT_ATGC 0x00800000 #define F2FS_MOUNT_MERGE_CHECKPOINT 0x01000000 #define F2FS_MOUNT_GC_MERGE 0x02000000 #define F2FS_MOUNT_COMPRESS_CACHE 0x04000000 #define F2FS_MOUNT_AGE_EXTENT_CACHE 0x08000000 #define F2FS_OPTION(sbi) ((sbi)->mount_opt) #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option) #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option) #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option) #define ver_after(a, b) (typecheck(unsigned long long, a) && \ typecheck(unsigned long long, b) && \ ((long long)((a) - (b)) > 0)) typedef u32 block_t; /* * should not change u32, since it is the on-disk block * address format, __le32. */ typedef u32 nid_t; #define COMPRESS_EXT_NUM 16 /* * An implementation of an rwsem that is explicitly unfair to readers. This * prevents priority inversion when a low-priority reader acquires the read lock * while sleeping on the write lock but the write lock is needed by * higher-priority clients. */ struct f2fs_rwsem { struct rw_semaphore internal_rwsem; #ifdef CONFIG_F2FS_UNFAIR_RWSEM wait_queue_head_t read_waiters; #endif }; struct f2fs_mount_info { unsigned int opt; int write_io_size_bits; /* Write IO size bits */ block_t root_reserved_blocks; /* root reserved blocks */ kuid_t s_resuid; /* reserved blocks for uid */ kgid_t s_resgid; /* reserved blocks for gid */ int active_logs; /* # of active logs */ int inline_xattr_size; /* inline xattr size */ #ifdef CONFIG_F2FS_FAULT_INJECTION struct f2fs_fault_info fault_info; /* For fault injection */ #endif #ifdef CONFIG_QUOTA /* Names of quota files with journalled quota */ char *s_qf_names[MAXQUOTAS]; int s_jquota_fmt; /* Format of quota to use */ #endif /* For which write hints are passed down to block layer */ int alloc_mode; /* segment allocation policy */ int fsync_mode; /* fsync policy */ int fs_mode; /* fs mode: LFS or ADAPTIVE */ int bggc_mode; /* bggc mode: off, on or sync */ int memory_mode; /* memory mode */ int errors; /* errors parameter */ int discard_unit; /* * discard command's offset/size should * be aligned to this unit: block, * segment or section */ struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */ block_t unusable_cap_perc; /* percentage for cap */ block_t unusable_cap; /* Amount of space allowed to be * unusable when disabling checkpoint */ /* For compression */ unsigned char compress_algorithm; /* algorithm type */ unsigned char compress_log_size; /* cluster log size */ unsigned char compress_level; /* compress level */ bool compress_chksum; /* compressed data chksum */ unsigned char compress_ext_cnt; /* extension count */ unsigned char nocompress_ext_cnt; /* nocompress extension count */ int compress_mode; /* compression mode */ unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ unsigned char noextensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */ }; #define F2FS_FEATURE_ENCRYPT 0x00000001 #define F2FS_FEATURE_BLKZONED 0x00000002 #define F2FS_FEATURE_ATOMIC_WRITE 0x00000004 #define F2FS_FEATURE_EXTRA_ATTR 0x00000008 #define F2FS_FEATURE_PRJQUOTA 0x00000010 #define F2FS_FEATURE_INODE_CHKSUM 0x00000020 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x00000040 #define F2FS_FEATURE_QUOTA_INO 0x00000080 #define F2FS_FEATURE_INODE_CRTIME 0x00000100 #define F2FS_FEATURE_LOST_FOUND 0x00000200 #define F2FS_FEATURE_VERITY 0x00000400 #define F2FS_FEATURE_SB_CHKSUM 0x00000800 #define F2FS_FEATURE_CASEFOLD 0x00001000 #define F2FS_FEATURE_COMPRESSION 0x00002000 #define F2FS_FEATURE_RO 0x00004000 #define __F2FS_HAS_FEATURE(raw_super, mask) \ ((raw_super->feature & cpu_to_le32(mask)) != 0) #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask) /* * Default values for user and/or group using reserved blocks */ #define F2FS_DEF_RESUID 0 #define F2FS_DEF_RESGID 0 /* * For checkpoint manager */ enum { NAT_BITMAP, SIT_BITMAP }; #define CP_UMOUNT 0x00000001 #define CP_FASTBOOT 0x00000002 #define CP_SYNC 0x00000004 #define CP_RECOVERY 0x00000008 #define CP_DISCARD 0x00000010 #define CP_TRIMMED 0x00000020 #define CP_PAUSE 0x00000040 #define CP_RESIZE 0x00000080 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */ #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */ #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */ #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */ #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */ #define DEF_CP_INTERVAL 60 /* 60 secs */ #define DEF_IDLE_INTERVAL 5 /* 5 secs */ #define DEF_DISABLE_INTERVAL 5 /* 5 secs */ #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */ #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */ struct cp_control { int reason; __u64 trim_start; __u64 trim_end; __u64 trim_minlen; }; /* * indicate meta/data type */ enum { META_CP, META_NAT, META_SIT, META_SSA, META_MAX, META_POR, DATA_GENERIC, /* check range only */ DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */ DATA_GENERIC_ENHANCE_READ, /* * strong check on range and segment * bitmap but no warning due to race * condition of read on truncated area * by extent_cache */ DATA_GENERIC_ENHANCE_UPDATE, /* * strong check on range and segment * bitmap for update case */ META_GENERIC, }; /* for the list of ino */ enum { ORPHAN_INO, /* for orphan ino list */ APPEND_INO, /* for append ino list */ UPDATE_INO, /* for update ino list */ TRANS_DIR_INO, /* for transactions dir ino list */ FLUSH_INO, /* for multiple device flushing */ MAX_INO_ENTRY, /* max. list */ }; struct ino_entry { struct list_head list; /* list head */ nid_t ino; /* inode number */ unsigned int dirty_device; /* dirty device bitmap */ }; /* for the list of inodes to be GCed */ struct inode_entry { struct list_head list; /* list head */ struct inode *inode; /* vfs inode pointer */ }; struct fsync_node_entry { struct list_head list; /* list head */ struct page *page; /* warm node page pointer */ unsigned int seq_id; /* sequence id */ }; struct ckpt_req { struct completion wait; /* completion for checkpoint done */ struct llist_node llnode; /* llist_node to be linked in wait queue */ int ret; /* return code of checkpoint */ ktime_t queue_time; /* request queued time */ }; struct ckpt_req_control { struct task_struct *f2fs_issue_ckpt; /* checkpoint task */ int ckpt_thread_ioprio; /* checkpoint merge thread ioprio */ wait_queue_head_t ckpt_wait_queue; /* waiting queue for wake-up */ atomic_t issued_ckpt; /* # of actually issued ckpts */ atomic_t total_ckpt; /* # of total ckpts */ atomic_t queued_ckpt; /* # of queued ckpts */ struct llist_head issue_list; /* list for command issue */ spinlock_t stat_lock; /* lock for below checkpoint time stats */ unsigned int cur_time; /* cur wait time in msec for currently issued checkpoint */ unsigned int peak_time; /* peak wait time in msec until now */ }; /* for the bitmap indicate blocks to be discarded */ struct discard_entry { struct list_head list; /* list head */ block_t start_blkaddr; /* start blockaddr of current segment */ unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */ }; /* minimum discard granularity, unit: block count */ #define MIN_DISCARD_GRANULARITY 1 /* default discard granularity of inner discard thread, unit: block count */ #define DEFAULT_DISCARD_GRANULARITY 16 /* default maximum discard granularity of ordered discard, unit: block count */ #define DEFAULT_MAX_ORDERED_DISCARD_GRANULARITY 16 /* max discard pend list number */ #define MAX_PLIST_NUM 512 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \ (MAX_PLIST_NUM - 1) : ((blk_num) - 1)) enum { D_PREP, /* initial */ D_PARTIAL, /* partially submitted */ D_SUBMIT, /* all submitted */ D_DONE, /* finished */ }; struct discard_info { block_t lstart; /* logical start address */ block_t len; /* length */ block_t start; /* actual start address in dev */ }; struct discard_cmd { struct rb_node rb_node; /* rb node located in rb-tree */ struct discard_info di; /* discard info */ struct list_head list; /* command list */ struct completion wait; /* compleation */ struct block_device *bdev; /* bdev */ unsigned short ref; /* reference count */ unsigned char state; /* state */ unsigned char queued; /* queued discard */ int error; /* bio error */ spinlock_t lock; /* for state/bio_ref updating */ unsigned short bio_ref; /* bio reference count */ }; enum { DPOLICY_BG, DPOLICY_FORCE, DPOLICY_FSTRIM, DPOLICY_UMOUNT, MAX_DPOLICY, }; struct discard_policy { int type; /* type of discard */ unsigned int min_interval; /* used for candidates exist */ unsigned int mid_interval; /* used for device busy */ unsigned int max_interval; /* used for candidates not exist */ unsigned int max_requests; /* # of discards issued per round */ unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */ bool io_aware; /* issue discard in idle time */ bool sync; /* submit discard with REQ_SYNC flag */ bool ordered; /* issue discard by lba order */ bool timeout; /* discard timeout for put_super */ unsigned int granularity; /* discard granularity */ }; struct discard_cmd_control { struct task_struct *f2fs_issue_discard; /* discard thread */ struct list_head entry_list; /* 4KB discard entry list */ struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */ struct list_head wait_list; /* store on-flushing entries */ struct list_head fstrim_list; /* in-flight discard from fstrim */ wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */ struct mutex cmd_lock; unsigned int nr_discards; /* # of discards in the list */ unsigned int max_discards; /* max. discards to be issued */ unsigned int max_discard_request; /* max. discard request per round */ unsigned int min_discard_issue_time; /* min. interval between discard issue */ unsigned int mid_discard_issue_time; /* mid. interval between discard issue */ unsigned int max_discard_issue_time; /* max. interval between discard issue */ unsigned int discard_io_aware_gran; /* minimum discard granularity not be aware of I/O */ unsigned int discard_urgent_util; /* utilization which issue discard proactively */ unsigned int discard_granularity; /* discard granularity */ unsigned int max_ordered_discard; /* maximum discard granularity issued by lba order */ unsigned int undiscard_blks; /* # of undiscard blocks */ unsigned int next_pos; /* next discard position */ atomic_t issued_discard; /* # of issued discard */ atomic_t queued_discard; /* # of queued discard */ atomic_t discard_cmd_cnt; /* # of cached cmd count */ struct rb_root_cached root; /* root of discard rb-tree */ bool rbtree_check; /* config for consistence check */ bool discard_wake; /* to wake up discard thread */ }; /* for the list of fsync inodes, used only during recovery */ struct fsync_inode_entry { struct list_head list; /* list head */ struct inode *inode; /* vfs inode pointer */ block_t blkaddr; /* block address locating the last fsync */ block_t last_dentry; /* block address locating the last dentry */ }; #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats)) #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits)) #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne) #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se) #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno) #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl)) #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl)) static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i) { int before = nats_in_cursum(journal); journal->n_nats = cpu_to_le16(before + i); return before; } static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i) { int before = sits_in_cursum(journal); journal->n_sits = cpu_to_le16(before + i); return before; } static inline bool __has_cursum_space(struct f2fs_journal *journal, int size, int type) { if (type == NAT_JOURNAL) return size <= MAX_NAT_JENTRIES(journal); return size <= MAX_SIT_JENTRIES(journal); } /* for inline stuff */ #define DEF_INLINE_RESERVED_SIZE 1 static inline int get_extra_isize(struct inode *inode); static inline int get_inline_xattr_addrs(struct inode *inode); #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \ (CUR_ADDRS_PER_INODE(inode) - \ get_inline_xattr_addrs(inode) - \ DEF_INLINE_RESERVED_SIZE)) /* for inline dir */ #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ BITS_PER_BYTE + 1)) #define INLINE_DENTRY_BITMAP_SIZE(inode) \ DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE) #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ NR_INLINE_DENTRY(inode) + \ INLINE_DENTRY_BITMAP_SIZE(inode))) /* * For INODE and NODE manager */ /* for directory operations */ struct f2fs_filename { /* * The filename the user specified. This is NULL for some * filesystem-internal operations, e.g. converting an inline directory * to a non-inline one, or roll-forward recovering an encrypted dentry. */ const struct qstr *usr_fname; /* * The on-disk filename. For encrypted directories, this is encrypted. * This may be NULL for lookups in an encrypted dir without the key. */ struct fscrypt_str disk_name; /* The dirhash of this filename */ f2fs_hash_t hash; #ifdef CONFIG_FS_ENCRYPTION /* * For lookups in encrypted directories: either the buffer backing * disk_name, or a buffer that holds the decoded no-key name. */ struct fscrypt_str crypto_buf; #endif #if IS_ENABLED(CONFIG_UNICODE) /* * For casefolded directories: the casefolded name, but it's left NULL * if the original name is not valid Unicode, if the original name is * "." or "..", if the directory is both casefolded and encrypted and * its encryption key is unavailable, or if the filesystem is doing an * internal operation where usr_fname is also NULL. In all these cases * we fall back to treating the name as an opaque byte sequence. */ struct fscrypt_str cf_name; #endif }; struct f2fs_dentry_ptr { struct inode *inode; void *bitmap; struct f2fs_dir_entry *dentry; __u8 (*filename)[F2FS_SLOT_LEN]; int max; int nr_bitmap; }; static inline void make_dentry_ptr_block(struct inode *inode, struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t) { d->inode = inode; d->max = NR_DENTRY_IN_BLOCK; d->nr_bitmap = SIZE_OF_DENTRY_BITMAP; d->bitmap = t->dentry_bitmap; d->dentry = t->dentry; d->filename = t->filename; } static inline void make_dentry_ptr_inline(struct inode *inode, struct f2fs_dentry_ptr *d, void *t) { int entry_cnt = NR_INLINE_DENTRY(inode); int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode); int reserved_size = INLINE_RESERVED_SIZE(inode); d->inode = inode; d->max = entry_cnt; d->nr_bitmap = bitmap_size; d->bitmap = t; d->dentry = t + bitmap_size + reserved_size; d->filename = t + bitmap_size + reserved_size + SIZE_OF_DIR_ENTRY * entry_cnt; } /* * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1 * as its node offset to distinguish from index node blocks. * But some bits are used to mark the node block. */ #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \ >> OFFSET_BIT_SHIFT) enum { ALLOC_NODE, /* allocate a new node page if needed */ LOOKUP_NODE, /* look up a node without readahead */ LOOKUP_NODE_RA, /* * look up a node with readahead called * by get_data_block. */ }; #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO or flush count */ /* congestion wait timeout value, default: 20ms */ #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20)) /* maximum retry quota flush count */ #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8 /* maximum retry of EIO'ed page */ #define MAX_RETRY_PAGE_EIO 100 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */ #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */ /* dirty segments threshold for triggering CP */ #define DEFAULT_DIRTY_THRESHOLD 4 #define RECOVERY_MAX_RA_BLOCKS BIO_MAX_VECS #define RECOVERY_MIN_RA_BLOCKS 1 #define F2FS_ONSTACK_PAGES 16 /* nr of onstack pages */ /* for in-memory extent cache entry */ #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */ /* number of extent info in extent cache we try to shrink */ #define READ_EXTENT_CACHE_SHRINK_NUMBER 128 /* number of age extent info in extent cache we try to shrink */ #define AGE_EXTENT_CACHE_SHRINK_NUMBER 128 #define LAST_AGE_WEIGHT 30 #define SAME_AGE_REGION 1024 /* * Define data block with age less than 1GB as hot data * define data block with age less than 10GB but more than 1GB as warm data */ #define DEF_HOT_DATA_AGE_THRESHOLD 262144 #define DEF_WARM_DATA_AGE_THRESHOLD 2621440 /* extent cache type */ enum extent_type { EX_READ, EX_BLOCK_AGE, NR_EXTENT_CACHES, }; struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ union { /* read extent_cache */ struct { /* start block address of the extent */ block_t blk; #ifdef CONFIG_F2FS_FS_COMPRESSION /* physical extent length of compressed blocks */ unsigned int c_len; #endif }; /* block age extent_cache */ struct { /* block age of the extent */ unsigned long long age; /* last total blocks allocated */ unsigned long long last_blocks; }; }; }; struct extent_node { struct rb_node rb_node; /* rb node located in rb-tree */ struct extent_info ei; /* extent info */ struct list_head list; /* node in global extent list of sbi */ struct extent_tree *et; /* extent tree pointer */ }; struct extent_tree { nid_t ino; /* inode number */ enum extent_type type; /* keep the extent tree type */ struct rb_root_cached root; /* root of extent info rb-tree */ struct extent_node *cached_en; /* recently accessed extent node */ struct list_head list; /* to be used by sbi->zombie_list */ rwlock_t lock; /* protect extent info rb-tree */ atomic_t node_cnt; /* # of extent node in rb-tree*/ bool largest_updated; /* largest extent updated */ struct extent_info largest; /* largest cached extent for EX_READ */ }; struct extent_tree_info { struct radix_tree_root extent_tree_root;/* cache extent cache entries */ struct mutex extent_tree_lock; /* locking extent radix tree */ struct list_head extent_list; /* lru list for shrinker */ spinlock_t extent_lock; /* locking extent lru list */ atomic_t total_ext_tree; /* extent tree count */ struct list_head zombie_list; /* extent zombie tree list */ atomic_t total_zombie_tree; /* extent zombie tree count */ atomic_t total_ext_node; /* extent info count */ }; /* * State of block returned by f2fs_map_blocks. */ #define F2FS_MAP_NEW (1U << 0) #define F2FS_MAP_MAPPED (1U << 1) #define F2FS_MAP_DELALLOC (1U << 2) #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\ F2FS_MAP_DELALLOC) struct f2fs_map_blocks { struct block_device *m_bdev; /* for multi-device dio */ block_t m_pblk; block_t m_lblk; unsigned int m_len; unsigned int m_flags; pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */ pgoff_t *m_next_extent; /* point to next possible extent */ int m_seg_type; bool m_may_create; /* indicate it is from write path */ bool m_multidev_dio; /* indicate it allows multi-device dio */ }; /* for flag in get_data_block */ enum { F2FS_GET_BLOCK_DEFAULT, F2FS_GET_BLOCK_FIEMAP, F2FS_GET_BLOCK_BMAP, F2FS_GET_BLOCK_DIO, F2FS_GET_BLOCK_PRE_DIO, F2FS_GET_BLOCK_PRE_AIO, F2FS_GET_BLOCK_PRECACHE, }; /* * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. */ #define FADVISE_COLD_BIT 0x01 #define FADVISE_LOST_PINO_BIT 0x02 #define FADVISE_ENCRYPT_BIT 0x04 #define FADVISE_ENC_NAME_BIT 0x08 #define FADVISE_KEEP_SIZE_BIT 0x10 #define FADVISE_HOT_BIT 0x20 #define FADVISE_VERITY_BIT 0x40 #define FADVISE_TRUNC_BIT 0x80 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT) #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT) #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT) #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT) #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT) #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT) #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT) #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT) #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT) #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT) #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT) #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT) #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT) #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT) #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT) #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT) #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT) #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT) #define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT) #define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT) #define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT) #define DEF_DIR_LEVEL 0 enum { GC_FAILURE_PIN, MAX_GC_FAILURE }; /* used for f2fs_inode_info->flags */ enum { FI_NEW_INODE, /* indicate newly allocated inode */ FI_DIRTY_INODE, /* indicate inode is dirty or not */ FI_AUTO_RECOVER, /* indicate inode is recoverable */ FI_DIRTY_DIR, /* indicate directory has dirty pages */ FI_INC_LINK, /* need to increment i_nlink */ FI_ACL_MODE, /* indicate acl mode */ FI_NO_ALLOC, /* should not allocate any blocks */ FI_FREE_NID, /* free allocated nide */ FI_NO_EXTENT, /* not to use the extent cache */ FI_INLINE_XATTR, /* used for inline xattr */ FI_INLINE_DATA, /* used for inline data*/ FI_INLINE_DENTRY, /* used for inline dentry */ FI_APPEND_WRITE, /* inode has appended data */ FI_UPDATE_WRITE, /* inode has in-place-update data */ FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */ FI_DROP_CACHE, /* drop dirty page cache */ FI_DATA_EXIST, /* indicate data exists */ FI_INLINE_DOTS, /* indicate inline dot dentries */ FI_SKIP_WRITES, /* should skip data page writeback */ FI_OPU_WRITE, /* used for opu per file */ FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */ FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */ FI_HOT_DATA, /* indicate file is hot */ FI_EXTRA_ATTR, /* indicate file has extra attribute */ FI_PROJ_INHERIT, /* indicate file inherits projectid */ FI_PIN_FILE, /* indicate file should not be gced */ FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */ FI_COMPRESSED_FILE, /* indicate file's data can be compressed */ FI_COMPRESS_CORRUPT, /* indicate compressed cluster is corrupted */ FI_MMAP_FILE, /* indicate file was mmapped */ FI_ENABLE_COMPRESS, /* enable compression in "user" compression mode */ FI_COMPRESS_RELEASED, /* compressed blocks were released */ FI_ALIGNED_WRITE, /* enable aligned write */ FI_COW_FILE, /* indicate COW file */ FI_ATOMIC_COMMITTED, /* indicate atomic commit completed except disk sync */ FI_ATOMIC_REPLACE, /* indicate atomic replace */ FI_MAX, /* max flag, never be used */ }; struct f2fs_inode_info { struct inode vfs_inode; /* serve a vfs inode */ unsigned long i_flags; /* keep an inode flags for ioctl */ unsigned char i_advise; /* use to give file attribute hints */ unsigned char i_dir_level; /* use for dentry level for large dir */ unsigned int i_current_depth; /* only for directory depth */ /* for gc failure statistic */ unsigned int i_gc_failures[MAX_GC_FAILURE]; unsigned int i_pino; /* parent inode number */ umode_t i_acl_mode; /* keep file acl mode temporarily */ /* Use below internally in f2fs*/ unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ struct f2fs_rwsem i_sem; /* protect fi info */ atomic_t dirty_pages; /* # of dirty pages */ f2fs_hash_t chash; /* hash value of given file name */ unsigned int clevel; /* maximum level of given file name */ struct task_struct *task; /* lookup and create consistency */ struct task_struct *cp_task; /* separate cp/wb IO stats*/ struct task_struct *wb_task; /* indicate inode is in context of writeback */ nid_t i_xattr_nid; /* node id that contains xattrs */ loff_t last_disk_size; /* lastly written file size */ spinlock_t i_size_lock; /* protect last_disk_size */ #ifdef CONFIG_QUOTA struct dquot *i_dquot[MAXQUOTAS]; /* quota space reservation, managed internally by quota code */ qsize_t i_reserved_quota; #endif struct list_head dirty_list; /* dirty list for dirs and files */ struct list_head gdirty_list; /* linked in global dirty list */ struct task_struct *atomic_write_task; /* store atomic write task */ struct extent_tree *extent_tree[NR_EXTENT_CACHES]; /* cached extent_tree entry */ struct inode *cow_inode; /* copy-on-write inode for atomic write */ /* avoid racing between foreground op and gc */ struct f2fs_rwsem i_gc_rwsem[2]; struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ int i_extra_isize; /* size of extra space located in i_addr */ kprojid_t i_projid; /* id for project quota */ int i_inline_xattr_size; /* inline xattr size */ struct timespec64 i_crtime; /* inode creation time */ struct timespec64 i_disk_time[3];/* inode disk times */ /* for file compress */ atomic_t i_compr_blocks; /* # of compressed blocks */ unsigned char i_compress_algorithm; /* algorithm type */ unsigned char i_log_cluster_size; /* log of cluster size */ unsigned char i_compress_level; /* compress level (lz4hc,zstd) */ unsigned char i_compress_flag; /* compress flag */ unsigned int i_cluster_size; /* cluster size */ unsigned int atomic_write_cnt; loff_t original_i_size; /* original i_size before atomic write */ }; static inline void get_read_extent_info(struct extent_info *ext, struct f2fs_extent *i_ext) { ext->fofs = le32_to_cpu(i_ext->fofs); ext->blk = le32_to_cpu(i_ext->blk); ext->len = le32_to_cpu(i_ext->len); } static inline void set_raw_read_extent(struct extent_info *ext, struct f2fs_extent *i_ext) { i_ext->fofs = cpu_to_le32(ext->fofs); i_ext->blk = cpu_to_le32(ext->blk); i_ext->len = cpu_to_le32(ext->len); } static inline bool __is_discard_mergeable(struct discard_info *back, struct discard_info *front, unsigned int max_len) { return (back->lstart + back->len == front->lstart) && (back->len + front->len <= max_len); } static inline bool __is_discard_back_mergeable(struct discard_info *cur, struct discard_info *back, unsigned int max_len) { return __is_discard_mergeable(back, cur, max_len); } static inline bool __is_discard_front_mergeable(struct discard_info *cur, struct discard_info *front, unsigned int max_len) { return __is_discard_mergeable(cur, front, max_len); } /* * For free nid management */ enum nid_state { FREE_NID, /* newly added to free nid list */ PREALLOC_NID, /* it is preallocated */ MAX_NID_STATE, }; enum nat_state { TOTAL_NAT, DIRTY_NAT, RECLAIMABLE_NAT, MAX_NAT_STATE, }; struct f2fs_nm_info { block_t nat_blkaddr; /* base disk address of NAT */ nid_t max_nid; /* maximum possible node ids */ nid_t available_nids; /* # of available node ids */ nid_t next_scan_nid; /* the next nid to be scanned */ nid_t max_rf_node_blocks; /* max # of nodes for recovery */ unsigned int ram_thresh; /* control the memory footprint */ unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */ unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */ /* NAT cache management */ struct radix_tree_root nat_root;/* root of the nat entry cache */ struct radix_tree_root nat_set_root;/* root of the nat set cache */ struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ unsigned int nat_blocks; /* # of nat blocks */ /* free node ids management */ struct radix_tree_root free_nid_root;/* root of the free_nid cache */ struct list_head free_nid_list; /* list for free nids excluding preallocated nids */ unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */ spinlock_t nid_list_lock; /* protect nid lists ops */ struct mutex build_lock; /* lock for build free nids */ unsigned char **free_nid_bitmap; unsigned char *nat_block_bitmap; unsigned short *free_nid_count; /* free nid count of NAT block */ /* for checkpoint */ char *nat_bitmap; /* NAT bitmap pointer */ unsigned int nat_bits_blocks; /* # of nat bits blocks */ unsigned char *nat_bits; /* NAT bits blocks */ unsigned char *full_nat_bits; /* full NAT pages */ unsigned char *empty_nat_bits; /* empty NAT pages */ #ifdef CONFIG_F2FS_CHECK_FS char *nat_bitmap_mir; /* NAT bitmap mirror */ #endif int bitmap_size; /* bitmap size */ }; /* * this structure is used as one of function parameters. * all the information are dedicated to a given direct node block determined * by the data offset in a file. */ struct dnode_of_data { struct inode *inode; /* vfs inode pointer */ struct page *inode_page; /* its inode page, NULL is possible */ struct page *node_page; /* cached direct node page */ nid_t nid; /* node id of the direct node block */ unsigned int ofs_in_node; /* data offset in the node page */ bool inode_page_locked; /* inode page is locked or not */ bool node_changed; /* is node block changed */ char cur_level; /* level of hole node page */ char max_level; /* level of current page located */ block_t data_blkaddr; /* block address of the node block */ }; static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, struct page *ipage, struct page *npage, nid_t nid) { memset(dn, 0, sizeof(*dn)); dn->inode = inode; dn->inode_page = ipage; dn->node_page = npage; dn->nid = nid; } /* * For SIT manager * * By default, there are 6 active log areas across the whole main area. * When considering hot and cold data separation to reduce cleaning overhead, * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, * respectively. * In the current design, you should not change the numbers intentionally. * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 * logs individually according to the underlying devices. (default: 6) * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for * data and 8 for node logs. */ #define NR_CURSEG_DATA_TYPE (3) #define NR_CURSEG_NODE_TYPE (3) #define NR_CURSEG_INMEM_TYPE (2) #define NR_CURSEG_RO_TYPE (2) #define NR_CURSEG_PERSIST_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) #define NR_CURSEG_TYPE (NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE) enum { CURSEG_HOT_DATA = 0, /* directory entry blocks */ CURSEG_WARM_DATA, /* data blocks */ CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ CURSEG_HOT_NODE, /* direct node blocks of directory files */ CURSEG_WARM_NODE, /* direct node blocks of normal files */ CURSEG_COLD_NODE, /* indirect node blocks */ NR_PERSISTENT_LOG, /* number of persistent log */ CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG, /* pinned file that needs consecutive block address */ CURSEG_ALL_DATA_ATGC, /* SSR alloctor in hot/warm/cold data area */ NO_CHECK_TYPE, /* number of persistent & inmem log */ }; struct flush_cmd { struct completion wait; struct llist_node llnode; nid_t ino; int ret; }; struct flush_cmd_control { struct task_struct *f2fs_issue_flush; /* flush thread */ wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */ atomic_t issued_flush; /* # of issued flushes */ atomic_t queued_flush; /* # of queued flushes */ struct llist_head issue_list; /* list for command issue */ struct llist_node *dispatch_list; /* list for command dispatch */ }; struct f2fs_sm_info { struct sit_info *sit_info; /* whole segment information */ struct free_segmap_info *free_info; /* free segment information */ struct dirty_seglist_info *dirty_info; /* dirty segment information */ struct curseg_info *curseg_array; /* active segment information */ struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ block_t seg0_blkaddr; /* block address of 0'th segment */ block_t main_blkaddr; /* start block address of main area */ block_t ssa_blkaddr; /* start block address of SSA area */ unsigned int segment_count; /* total # of segments */ unsigned int main_segments; /* # of segments in main area */ unsigned int reserved_segments; /* # of reserved segments */ unsigned int additional_reserved_segments;/* reserved segs for IO align feature */ unsigned int ovp_segments; /* # of overprovision segments */ /* a threshold to reclaim prefree segments */ unsigned int rec_prefree_segments; struct list_head sit_entry_set; /* sit entry set list */ unsigned int ipu_policy; /* in-place-update policy */ unsigned int min_ipu_util; /* in-place-update threshold */ unsigned int min_fsync_blocks; /* threshold for fsync */ unsigned int min_seq_blocks; /* threshold for sequential blocks */ unsigned int min_hot_blocks; /* threshold for hot block allocation */ unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */ /* for flush command control */ struct flush_cmd_control *fcc_info; /* for discard command control */ struct discard_cmd_control *dcc_info; }; /* * For superblock */ /* * COUNT_TYPE for monitoring * * f2fs monitors the number of several block types such as on-writeback, * dirty dentry blocks, dirty node blocks, and dirty meta blocks. */ #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA) enum count_type { F2FS_DIRTY_DENTS, F2FS_DIRTY_DATA, F2FS_DIRTY_QDATA, F2FS_DIRTY_NODES, F2FS_DIRTY_META, F2FS_DIRTY_IMETA, F2FS_WB_CP_DATA, F2FS_WB_DATA, F2FS_RD_DATA, F2FS_RD_NODE, F2FS_RD_META, F2FS_DIO_WRITE, F2FS_DIO_READ, NR_COUNT_TYPE, }; /* * The below are the page types of bios used in submit_bio(). * The available types are: * DATA User data pages. It operates as async mode. * NODE Node pages. It operates as async mode. * META FS metadata pages such as SIT, NAT, CP. * NR_PAGE_TYPE The number of page types. * META_FLUSH Make sure the previous pages are written * with waiting the bio's completion * ... Only can be used with META. */ #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type)) enum page_type { DATA = 0, NODE = 1, /* should not change this */ META, NR_PAGE_TYPE, META_FLUSH, IPU, /* the below types are used by tracepoints only. */ OPU, }; enum temp_type { HOT = 0, /* must be zero for meta bio */ WARM, COLD, NR_TEMP_TYPE, }; enum need_lock_type { LOCK_REQ = 0, LOCK_DONE, LOCK_RETRY, }; enum cp_reason_type { CP_NO_NEEDED, CP_NON_REGULAR, CP_COMPRESSED, CP_HARDLINK, CP_SB_NEED_CP, CP_WRONG_PINO, CP_NO_SPC_ROLL, CP_NODE_NEED_CP, CP_FASTBOOT_MODE, CP_SPEC_LOG_NUM, CP_RECOVER_DIR, }; enum iostat_type { /* WRITE IO */ APP_DIRECT_IO, /* app direct write IOs */ APP_BUFFERED_IO, /* app buffered write IOs */ APP_WRITE_IO, /* app write IOs */ APP_MAPPED_IO, /* app mapped IOs */ APP_BUFFERED_CDATA_IO, /* app buffered write IOs on compressed file */ APP_MAPPED_CDATA_IO, /* app mapped write IOs on compressed file */ FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */ FS_CDATA_IO, /* data IOs from kworker/fsync/reclaimer on compressed file */ FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */ FS_META_IO, /* meta IOs from kworker/reclaimer */ FS_GC_DATA_IO, /* data IOs from forground gc */ FS_GC_NODE_IO, /* node IOs from forground gc */ FS_CP_DATA_IO, /* data IOs from checkpoint */ FS_CP_NODE_IO, /* node IOs from checkpoint */ FS_CP_META_IO, /* meta IOs from checkpoint */ /* READ IO */ APP_DIRECT_READ_IO, /* app direct read IOs */ APP_BUFFERED_READ_IO, /* app buffered read IOs */ APP_READ_IO, /* app read IOs */ APP_MAPPED_READ_IO, /* app mapped read IOs */ APP_BUFFERED_CDATA_READ_IO, /* app buffered read IOs on compressed file */ APP_MAPPED_CDATA_READ_IO, /* app mapped read IOs on compressed file */ FS_DATA_READ_IO, /* data read IOs */ FS_GDATA_READ_IO, /* data read IOs from background gc */ FS_CDATA_READ_IO, /* compressed data read IOs */ FS_NODE_READ_IO, /* node read IOs */ FS_META_READ_IO, /* meta read IOs */ /* other */ FS_DISCARD_IO, /* discard */ FS_FLUSH_IO, /* flush */ FS_ZONE_RESET_IO, /* zone reset */ NR_IO_TYPE, }; struct f2fs_io_info { struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */ nid_t ino; /* inode number */ enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ enum temp_type temp; /* contains HOT/WARM/COLD */ enum req_op op; /* contains REQ_OP_ */ blk_opf_t op_flags; /* req_flag_bits */ block_t new_blkaddr; /* new block address to be written */ block_t old_blkaddr; /* old block address before Cow */ struct page *page; /* page to be written */ struct page *encrypted_page; /* encrypted page */ struct page *compressed_page; /* compressed page */ struct list_head list; /* serialize IOs */ unsigned int compr_blocks; /* # of compressed block addresses */ unsigned int need_lock:8; /* indicate we need to lock cp_rwsem */ unsigned int version:8; /* version of the node */ unsigned int submitted:1; /* indicate IO submission */ unsigned int in_list:1; /* indicate fio is in io_list */ unsigned int is_por:1; /* indicate IO is from recovery or not */ unsigned int retry:1; /* need to reallocate block address */ unsigned int encrypted:1; /* indicate file is encrypted */ unsigned int post_read:1; /* require post read */ enum iostat_type io_type; /* io type */ struct writeback_control *io_wbc; /* writeback control */ struct bio **bio; /* bio for ipu */ sector_t *last_block; /* last block number in bio */ }; struct bio_entry { struct bio *bio; struct list_head list; }; #define is_read_io(rw) ((rw) == READ) struct f2fs_bio_info { struct f2fs_sb_info *sbi; /* f2fs superblock */ struct bio *bio; /* bios to merge */ sector_t last_block_in_bio; /* last block number */ struct f2fs_io_info fio; /* store buffered io info. */ #ifdef CONFIG_BLK_DEV_ZONED struct completion zone_wait; /* condition value for the previous open zone to close */ struct bio *zone_pending_bio; /* pending bio for the previous zone */ void *bi_private; /* previous bi_private for pending bio */ #endif struct f2fs_rwsem io_rwsem; /* blocking op for bio */ spinlock_t io_lock; /* serialize DATA/NODE IOs */ struct list_head io_list; /* track fios */ struct list_head bio_list; /* bio entry list head */ struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ }; #define FDEV(i) (sbi->devs[i]) #define RDEV(i) (raw_super->devs[i]) struct f2fs_dev_info { struct bdev_handle *bdev_handle; struct block_device *bdev; char path[MAX_PATH_LEN]; unsigned int total_segments; block_t start_blk; block_t end_blk; #ifdef CONFIG_BLK_DEV_ZONED unsigned int nr_blkz; /* Total number of zones */ unsigned long *blkz_seq; /* Bitmap indicating sequential zones */ #endif }; enum inode_type { DIR_INODE, /* for dirty dir inode */ FILE_INODE, /* for dirty regular/symlink inode */ DIRTY_META, /* for all dirtied inode metadata */ NR_INODE_TYPE, }; /* for inner inode cache management */ struct inode_management { struct radix_tree_root ino_root; /* ino entry array */ spinlock_t ino_lock; /* for ino entry lock */ struct list_head ino_list; /* inode list head */ unsigned long ino_num; /* number of entries */ }; /* for GC_AT */ struct atgc_management { bool atgc_enabled; /* ATGC is enabled or not */ struct rb_root_cached root; /* root of victim rb-tree */ struct list_head victim_list; /* linked with all victim entries */ unsigned int victim_count; /* victim count in rb-tree */ unsigned int candidate_ratio; /* candidate ratio */ unsigned int max_candidate_count; /* max candidate count */ unsigned int age_weight; /* age weight, vblock_weight = 100 - age_weight */ unsigned long long age_threshold; /* age threshold */ }; struct f2fs_gc_control { unsigned int victim_segno; /* target victim segment number */ int init_gc_type; /* FG_GC or BG_GC */ bool no_bg_gc; /* check the space and stop bg_gc */ bool should_migrate_blocks; /* should migrate blocks */ bool err_gc_skipped; /* return EAGAIN if GC skipped */ unsigned int nr_free_secs; /* # of free sections to do GC */ }; /* * For s_flag in struct f2fs_sb_info * Modification on enum should be synchronized with s_flag array */ enum { SBI_IS_DIRTY, /* dirty flag for checkpoint */ SBI_IS_CLOSE, /* specify unmounting */ SBI_NEED_FSCK, /* need fsck.f2fs to fix */ SBI_POR_DOING, /* recovery is doing or not */ SBI_NEED_SB_WRITE, /* need to recover superblock */ SBI_NEED_CP, /* need to checkpoint */ SBI_IS_SHUTDOWN, /* shutdown by ioctl */ SBI_IS_RECOVERED, /* recovered orphan/data */ SBI_CP_DISABLED, /* CP was disabled last mount */ SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */ SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ SBI_IS_RESIZEFS, /* resizefs is in process */ SBI_IS_FREEZING, /* freezefs is in process */ SBI_IS_WRITABLE, /* remove ro mountoption transiently */ MAX_SBI_FLAG, }; enum { CP_TIME, REQ_TIME, DISCARD_TIME, GC_TIME, DISABLE_TIME, UMOUNT_DISCARD_TIMEOUT, MAX_TIME, }; /* Note that you need to keep synchronization with this gc_mode_names array */ enum { GC_NORMAL, GC_IDLE_CB, GC_IDLE_GREEDY, GC_IDLE_AT, GC_URGENT_HIGH, GC_URGENT_LOW, GC_URGENT_MID, MAX_GC_MODE, }; enum { BGGC_MODE_ON, /* background gc is on */ BGGC_MODE_OFF, /* background gc is off */ BGGC_MODE_SYNC, /* * background gc is on, migrating blocks * like foreground gc */ }; enum { FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */ FS_MODE_LFS, /* use lfs allocation only */ FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */ FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */ }; enum { ALLOC_MODE_DEFAULT, /* stay default */ ALLOC_MODE_REUSE, /* reuse segments as much as possible */ }; enum fsync_mode { FSYNC_MODE_POSIX, /* fsync follows posix semantics */ FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */ FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */ }; enum { COMPR_MODE_FS, /* * automatically compress compression * enabled files */ COMPR_MODE_USER, /* * automatical compression is disabled. * user can control the file compression * using ioctls */ }; enum { DISCARD_UNIT_BLOCK, /* basic discard unit is block */ DISCARD_UNIT_SEGMENT, /* basic discard unit is segment */ DISCARD_UNIT_SECTION, /* basic discard unit is section */ }; enum { MEMORY_MODE_NORMAL, /* memory mode for normal devices */ MEMORY_MODE_LOW, /* memory mode for low memry devices */ }; enum errors_option { MOUNT_ERRORS_READONLY, /* remount fs ro on errors */ MOUNT_ERRORS_CONTINUE, /* continue on errors */ MOUNT_ERRORS_PANIC, /* panic on errors */ }; enum { BACKGROUND, FOREGROUND, MAX_CALL_TYPE, TOTAL_CALL = FOREGROUND, }; static inline int f2fs_test_bit(unsigned int nr, char *addr); static inline void f2fs_set_bit(unsigned int nr, char *addr); static inline void f2fs_clear_bit(unsigned int nr, char *addr); /* * Layout of f2fs page.private: * * Layout A: lowest bit should be 1 * | bit0 = 1 | bit1 | bit2 | ... | bit MAX | private data .... | * bit 0 PAGE_PRIVATE_NOT_POINTER * bit 1 PAGE_PRIVATE_DUMMY_WRITE * bit 2 PAGE_PRIVATE_ONGOING_MIGRATION * bit 3 PAGE_PRIVATE_INLINE_INODE * bit 4 PAGE_PRIVATE_REF_RESOURCE * bit 5- f2fs private data * * Layout B: lowest bit should be 0 * page.private is a wrapped pointer. */ enum { PAGE_PRIVATE_NOT_POINTER, /* private contains non-pointer data */ PAGE_PRIVATE_DUMMY_WRITE, /* data page for padding aligned IO */ PAGE_PRIVATE_ONGOING_MIGRATION, /* data page which is on-going migrating */ PAGE_PRIVATE_INLINE_INODE, /* inode page contains inline data */ PAGE_PRIVATE_REF_RESOURCE, /* dirty page has referenced resources */ PAGE_PRIVATE_MAX }; /* For compression */ enum compress_algorithm_type { COMPRESS_LZO, COMPRESS_LZ4, COMPRESS_ZSTD, COMPRESS_LZORLE, COMPRESS_MAX, }; enum compress_flag { COMPRESS_CHKSUM, COMPRESS_MAX_FLAG, }; #define COMPRESS_WATERMARK 20 #define COMPRESS_PERCENT 20 #define COMPRESS_DATA_RESERVED_SIZE 4 struct compress_data { __le32 clen; /* compressed data size */ __le32 chksum; /* compressed data chksum */ __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */ u8 cdata[]; /* compressed data */ }; #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data)) #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000 #define F2FS_ZSTD_DEFAULT_CLEVEL 1 #define COMPRESS_LEVEL_OFFSET 8 /* compress context */ struct compress_ctx { struct inode *inode; /* inode the context belong to */ pgoff_t cluster_idx; /* cluster index number */ unsigned int cluster_size; /* page count in cluster */ unsigned int log_cluster_size; /* log of cluster size */ struct page **rpages; /* pages store raw data in cluster */ unsigned int nr_rpages; /* total page number in rpages */ struct page **cpages; /* pages store compressed data in cluster */ unsigned int nr_cpages; /* total page number in cpages */ unsigned int valid_nr_cpages; /* valid page number in cpages */ void *rbuf; /* virtual mapped address on rpages */ struct compress_data *cbuf; /* virtual mapped address on cpages */ size_t rlen; /* valid data length in rbuf */ size_t clen; /* valid data length in cbuf */ void *private; /* payload buffer for specified compression algorithm */ void *private2; /* extra payload buffer */ }; /* compress context for write IO path */ struct compress_io_ctx { u32 magic; /* magic number to indicate page is compressed */ struct inode *inode; /* inode the context belong to */ struct page **rpages; /* pages store raw data in cluster */ unsigned int nr_rpages; /* total page number in rpages */ atomic_t pending_pages; /* in-flight compressed page count */ }; /* Context for decompressing one cluster on the read IO path */ struct decompress_io_ctx { u32 magic; /* magic number to indicate page is compressed */ struct inode *inode; /* inode the context belong to */ pgoff_t cluster_idx; /* cluster index number */ unsigned int cluster_size; /* page count in cluster */ unsigned int log_cluster_size; /* log of cluster size */ struct page **rpages; /* pages store raw data in cluster */ unsigned int nr_rpages; /* total page number in rpages */ struct page **cpages; /* pages store compressed data in cluster */ unsigned int nr_cpages; /* total page number in cpages */ struct page **tpages; /* temp pages to pad holes in cluster */ void *rbuf; /* virtual mapped address on rpages */ struct compress_data *cbuf; /* virtual mapped address on cpages */ size_t rlen; /* valid data length in rbuf */ size_t clen; /* valid data length in cbuf */ /* * The number of compressed pages remaining to be read in this cluster. * This is initially nr_cpages. It is decremented by 1 each time a page * has been read (or failed to be read). When it reaches 0, the cluster * is decompressed (or an error is reported). * * If an error occurs before all the pages have been submitted for I/O, * then this will never reach 0. In this case the I/O submitter is * responsible for calling f2fs_decompress_end_io() instead. */ atomic_t remaining_pages; /* * Number of references to this decompress_io_ctx. * * One reference is held for I/O completion. This reference is dropped * after the pagecache pages are updated and unlocked -- either after * decompression (and verity if enabled), or after an error. * * In addition, each compressed page holds a reference while it is in a * bio. These references are necessary prevent compressed pages from * being freed while they are still in a bio. */ refcount_t refcnt; bool failed; /* IO error occurred before decompression? */ bool need_verity; /* need fs-verity verification after decompression? */ void *private; /* payload buffer for specified decompression algorithm */ void *private2; /* extra payload buffer */ struct work_struct verity_work; /* work to verify the decompressed pages */ struct work_struct free_work; /* work for late free this structure itself */ }; #define NULL_CLUSTER ((unsigned int)(~0)) #define MIN_COMPRESS_LOG_SIZE 2 #define MAX_COMPRESS_LOG_SIZE 8 #define MAX_COMPRESS_WINDOW_SIZE(log_size) ((PAGE_SIZE) << (log_size)) struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ struct f2fs_super_block *raw_super; /* raw super block pointer */ struct f2fs_rwsem sb_lock; /* lock for raw super block */ int valid_super_block; /* valid super block no */ unsigned long s_flag; /* flags for sbi */ struct mutex writepages; /* mutex for writepages() */ #ifdef CONFIG_BLK_DEV_ZONED unsigned int blocks_per_blkz; /* F2FS blocks per zone */ #endif /* for node-related operations */ struct f2fs_nm_info *nm_info; /* node manager */ struct inode *node_inode; /* cache node blocks */ /* for segment-related operations */ struct f2fs_sm_info *sm_info; /* segment manager */ /* for bio operations */ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ /* keep migration IO order for LFS mode */ struct f2fs_rwsem io_order_lock; mempool_t *write_io_dummy; /* Dummy pages */ pgoff_t page_eio_ofs[NR_PAGE_TYPE]; /* EIO page offset */ int page_eio_cnt[NR_PAGE_TYPE]; /* EIO count */ /* for checkpoint */ struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ int cur_cp_pack; /* remain current cp pack */ spinlock_t cp_lock; /* for flag in ckpt */ struct inode *meta_inode; /* cache meta blocks */ struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ struct f2fs_rwsem node_write; /* locking node writes */ struct f2fs_rwsem node_change; /* locking node change */ wait_queue_head_t cp_wait; unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ long interval_time[MAX_TIME]; /* to store thresholds */ struct ckpt_req_control cprc_info; /* for checkpoint request control */ struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ spinlock_t fsync_node_lock; /* for node entry lock */ struct list_head fsync_node_list; /* node list head */ unsigned int fsync_seg_id; /* sequence id */ unsigned int fsync_node_num; /* number of node entries */ /* for orphan inode, use 0'th array */ unsigned int max_orphans; /* max orphan inodes */ /* for inode management */ struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ struct mutex flush_lock; /* for flush exclusion */ /* for extent tree cache */ struct extent_tree_info extent_tree[NR_EXTENT_CACHES]; atomic64_t allocated_data_blocks; /* for block age extent_cache */ /* The threshold used for hot and warm data seperation*/ unsigned int hot_data_age_threshold; unsigned int warm_data_age_threshold; unsigned int last_age_weight; /* basic filesystem units */ unsigned int log_sectors_per_block; /* log2 sectors per block */ unsigned int log_blocksize; /* log2 block size */ unsigned int blocksize; /* block size */ unsigned int root_ino_num; /* root inode number*/ unsigned int node_ino_num; /* node inode number*/ unsigned int meta_ino_num; /* meta inode number*/ unsigned int log_blocks_per_seg; /* log2 blocks per segment */ unsigned int blocks_per_seg; /* blocks per segment */ unsigned int unusable_blocks_per_sec; /* unusable blocks per section */ unsigned int segs_per_sec; /* segments per section */ unsigned int secs_per_zone; /* sections per zone */ unsigned int total_sections; /* total section count */ unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ int dir_level; /* directory level */ bool readdir_ra; /* readahead inode in readdir */ u64 max_io_bytes; /* max io bytes to merge IOs */ block_t user_block_count; /* # of user blocks */ block_t total_valid_block_count; /* # of valid blocks */ block_t discard_blks; /* discard command candidats */ block_t last_valid_block_count; /* for recovery */ block_t reserved_blocks; /* configurable reserved blocks */ block_t current_reserved_blocks; /* current reserved blocks */ /* Additional tracking for no checkpoint mode */ block_t unusable_block_count; /* # of blocks saved by last cp */ unsigned int nquota_files; /* # of quota sysfile */ struct f2fs_rwsem quota_sem; /* blocking cp for flags */ /* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; /* # of allocated blocks */ struct percpu_counter alloc_valid_block_count; /* # of node block writes as roll forward recovery */ struct percpu_counter rf_node_block_count; /* writeback control */ atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */ /* valid inode count */ struct percpu_counter total_valid_inode_count; struct f2fs_mount_info mount_opt; /* mount options */ /* for cleaning operations */ struct f2fs_rwsem gc_lock; /* * semaphore for GC, avoid * race between GC and GC or CP */ struct f2fs_gc_kthread *gc_thread; /* GC thread */ struct atgc_management am; /* atgc management */ unsigned int cur_victim_sec; /* current victim section num */ unsigned int gc_mode; /* current GC state */ unsigned int next_victim_seg[2]; /* next segment in victim section */ spinlock_t gc_remaining_trials_lock; /* remaining trial count for GC_URGENT_* and GC_IDLE_* */ unsigned int gc_remaining_trials; /* for skip statistic */ unsigned long long skipped_gc_rwsem; /* FG_GC only */ /* threshold for gc trials on pinned files */ u64 gc_pin_file_threshold; struct f2fs_rwsem pin_sem; /* maximum # of trials to find a victim segment for SSR and GC */ unsigned int max_victim_search; /* migration granularity of garbage collection, unit: segment */ unsigned int migration_granularity; /* * for stat information. * one is for the LFS mode, and the other is for the SSR mode. */ #ifdef CONFIG_F2FS_STAT_FS struct f2fs_stat_info *stat_info; /* FS status information */ atomic_t meta_count[META_MAX]; /* # of meta blocks */ unsigned int segment_count[2]; /* # of allocated segments */ unsigned int block_count[2]; /* # of allocated blocks */ atomic_t inplace_count; /* # of inplace update */ /* # of lookup extent cache */ atomic64_t total_hit_ext[NR_EXTENT_CACHES]; /* # of hit rbtree extent node */ atomic64_t read_hit_rbtree[NR_EXTENT_CACHES]; /* # of hit cached extent node */ atomic64_t read_hit_cached[NR_EXTENT_CACHES]; /* # of hit largest extent node in read extent cache */ atomic64_t read_hit_largest; atomic_t inline_xattr; /* # of inline_xattr inodes */ atomic_t inline_inode; /* # of inline_data inodes */ atomic_t inline_dir; /* # of inline_dentry inodes */ atomic_t compr_inode; /* # of compressed inodes */ atomic64_t compr_blocks; /* # of compressed blocks */ atomic_t swapfile_inode; /* # of swapfile inodes */ atomic_t atomic_files; /* # of opened atomic file */ atomic_t max_aw_cnt; /* max # of atomic writes */ unsigned int io_skip_bggc; /* skip background gc for in-flight IO */ unsigned int other_skip_bggc; /* skip background gc for other reasons */ unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */ atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */ #endif spinlock_t stat_lock; /* lock for stat operations */ /* to attach REQ_META|REQ_FUA flags */ unsigned int data_io_flag; unsigned int node_io_flag; /* For sysfs support */ struct kobject s_kobj; /* /sys/fs/f2fs/<devname> */ struct completion s_kobj_unregister; struct kobject s_stat_kobj; /* /sys/fs/f2fs/<devname>/stat */ struct completion s_stat_kobj_unregister; struct kobject s_feature_list_kobj; /* /sys/fs/f2fs/<devname>/feature_list */ struct completion s_feature_list_kobj_unregister; /* For shrinker support */ struct list_head s_list; struct mutex umount_mutex; unsigned int shrinker_run_no; /* For multi devices */ int s_ndevs; /* number of devices */ struct f2fs_dev_info *devs; /* for device list */ unsigned int dirty_device; /* for checkpoint data flush */ spinlock_t dev_lock; /* protect dirty_device */ bool aligned_blksize; /* all devices has the same logical blksize */ /* For write statistics */ u64 sectors_written_start; u64 kbytes_written; /* Reference to checksum algorithm driver via cryptoapi */ struct crypto_shash *s_chksum_driver; /* Precomputed FS UUID checksum for seeding other checksums */ __u32 s_chksum_seed; struct workqueue_struct *post_read_wq; /* post read workqueue */ /* * If we are in irq context, let's update error information into * on-disk superblock in the work. */ struct work_struct s_error_work; unsigned char errors[MAX_F2FS_ERRORS]; /* error flags */ unsigned char stop_reason[MAX_STOP_REASON]; /* stop reason */ spinlock_t error_lock; /* protect errors/stop_reason array */ bool error_dirty; /* errors of sb is dirty */ struct kmem_cache *inline_xattr_slab; /* inline xattr entry */ unsigned int inline_xattr_slab_size; /* default inline xattr slab size */ /* For reclaimed segs statistics per each GC mode */ unsigned int gc_segment_mode; /* GC state for reclaimed segments */ unsigned int gc_reclaimed_segs[MAX_GC_MODE]; /* Reclaimed segs for each mode */ unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */ int max_fragment_chunk; /* max chunk size for block fragmentation mode */ int max_fragment_hole; /* max hole size for block fragmentation mode */ /* For atomic write statistics */ atomic64_t current_atomic_write; s64 peak_atomic_write; u64 committed_atomic_block; u64 revoked_atomic_block; #ifdef CONFIG_F2FS_FS_COMPRESSION struct kmem_cache *page_array_slab; /* page array entry */ unsigned int page_array_slab_size; /* default page array slab size */ /* For runtime compression statistics */ u64 compr_written_block; u64 compr_saved_block; u32 compr_new_inode; /* For compressed block cache */ struct inode *compress_inode; /* cache compressed blocks */ unsigned int compress_percent; /* cache page percentage */ unsigned int compress_watermark; /* cache page watermark */ atomic_t compress_page_hit; /* cache hit count */ #endif #ifdef CONFIG_F2FS_IOSTAT /* For app/fs IO statistics */ spinlock_t iostat_lock; unsigned long long iostat_count[NR_IO_TYPE]; unsigned long long iostat_bytes[NR_IO_TYPE]; unsigned long long prev_iostat_bytes[NR_IO_TYPE]; bool iostat_enable; unsigned long iostat_next_period; unsigned int iostat_period_ms; /* For io latency related statistics info in one iostat period */ spinlock_t iostat_lat_lock; struct iostat_lat_info *iostat_io_lat; #endif }; #ifdef CONFIG_F2FS_FAULT_INJECTION #define time_to_inject(sbi, type) __time_to_inject(sbi, type, __func__, \ __builtin_return_address(0)) static inline bool __time_to_inject(struct f2fs_sb_info *sbi, int type, const char *func, const char *parent_func) { struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; if (!ffi->inject_rate) return false; if (!IS_FAULT_SET(ffi, type)) return false; atomic_inc(&ffi->inject_ops); if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { atomic_set(&ffi->inject_ops, 0); printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", KERN_INFO, sbi->sb->s_id, f2fs_fault_name[type], func, parent_func); return true; } return false; } #else static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type) { return false; } #endif /* * Test if the mounted volume is a multi-device volume. * - For a single regular disk volume, sbi->s_ndevs is 0. * - For a single zoned disk volume, sbi->s_ndevs is 1. * - For a multi-device volume, sbi->s_ndevs is always 2 or more. */ static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi) { return sbi->s_ndevs > 1; } static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) { unsigned long now = jiffies; sbi->last_time[type] = now; /* DISCARD_TIME and GC_TIME are based on REQ_TIME */ if (type == REQ_TIME) { sbi->last_time[DISCARD_TIME] = now; sbi->last_time[GC_TIME] = now; } } static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type) { unsigned long interval = sbi->interval_time[type] * HZ; return time_after(jiffies, sbi->last_time[type] + interval); } static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, int type) { unsigned long interval = sbi->interval_time[type] * HZ; unsigned int wait_ms = 0; long delta; delta = (sbi->last_time[type] + interval) - jiffies; if (delta > 0) wait_ms = jiffies_to_msecs(delta); return wait_ms; } /* * Inline functions */ static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, const void *address, unsigned int length) { struct { struct shash_desc shash; char ctx[4]; } desc; int err; BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); desc.shash.tfm = sbi->s_chksum_driver; *(u32 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, address, length); BUG_ON(err); return *(u32 *)desc.ctx; } static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, unsigned int length) { return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length); } static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, void *buf, size_t buf_size) { return f2fs_crc32(sbi, buf, buf_size) == blk_crc; } static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc, const void *address, unsigned int length) { return __f2fs_crc32(sbi, crc, address, length); } static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) { return container_of(inode, struct f2fs_inode_info, vfs_inode); } static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode) { return F2FS_SB(inode->i_sb); } static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) { return F2FS_I_SB(mapping->host); } static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) { return F2FS_M_SB(page_file_mapping(page)); } static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) { return (struct f2fs_super_block *)(sbi->raw_super); } static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) { return (struct f2fs_checkpoint *)(sbi->ckpt); } static inline struct f2fs_node *F2FS_NODE(struct page *page) { return (struct f2fs_node *)page_address(page); } static inline struct f2fs_inode *F2FS_INODE(struct page *page) { return &((struct f2fs_node *)page_address(page))->i; } static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) { return (struct f2fs_nm_info *)(sbi->nm_info); } static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) { return (struct f2fs_sm_info *)(sbi->sm_info); } static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) { return (struct sit_info *)(SM_I(sbi)->sit_info); } static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) { return (struct free_segmap_info *)(SM_I(sbi)->free_info); } static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) { return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); } static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi) { return sbi->meta_inode->i_mapping; } static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) { return sbi->node_inode->i_mapping; } static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) { return test_bit(type, &sbi->s_flag); } static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) { set_bit(type, &sbi->s_flag); } static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) { clear_bit(type, &sbi->s_flag); } static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) { return le64_to_cpu(cp->checkpoint_ver); } static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type) { if (type < F2FS_MAX_QUOTAS) return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]); return 0; } static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp) { size_t crc_offset = le32_to_cpu(cp->checksum_offset); return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset))); } static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) { unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); return ckpt_flags & f; } static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) { return __is_set_ckpt_flags(F2FS_CKPT(sbi), f); } static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) { unsigned int ckpt_flags; ckpt_flags = le32_to_cpu(cp->ckpt_flags); ckpt_flags |= f; cp->ckpt_flags = cpu_to_le32(ckpt_flags); } static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) { unsigned long flags; spin_lock_irqsave(&sbi->cp_lock, flags); __set_ckpt_flags(F2FS_CKPT(sbi), f); spin_unlock_irqrestore(&sbi->cp_lock, flags); } static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) { unsigned int ckpt_flags; ckpt_flags = le32_to_cpu(cp->ckpt_flags); ckpt_flags &= (~f); cp->ckpt_flags = cpu_to_le32(ckpt_flags); } static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f) { unsigned long flags; spin_lock_irqsave(&sbi->cp_lock, flags); __clear_ckpt_flags(F2FS_CKPT(sbi), f); spin_unlock_irqrestore(&sbi->cp_lock, flags); } #define init_f2fs_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ __init_f2fs_rwsem((sem), #sem, &__key); \ } while (0) static inline void __init_f2fs_rwsem(struct f2fs_rwsem *sem, const char *sem_name, struct lock_class_key *key) { __init_rwsem(&sem->internal_rwsem, sem_name, key); #ifdef CONFIG_F2FS_UNFAIR_RWSEM init_waitqueue_head(&sem->read_waiters); #endif } static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) { return rwsem_is_locked(&sem->internal_rwsem); } static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) { return rwsem_is_contended(&sem->internal_rwsem); } static inline void f2fs_down_read(struct f2fs_rwsem *sem) { #ifdef CONFIG_F2FS_UNFAIR_RWSEM wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); #else down_read(&sem->internal_rwsem); #endif } static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) { return down_read_trylock(&sem->internal_rwsem); } static inline void f2fs_up_read(struct f2fs_rwsem *sem) { up_read(&sem->internal_rwsem); } static inline void f2fs_down_write(struct f2fs_rwsem *sem) { down_write(&sem->internal_rwsem); } #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) { down_read_nested(&sem->internal_rwsem, subclass); } static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass) { down_write_nested(&sem->internal_rwsem, subclass); } #else #define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) #define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem) #endif static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) { return down_write_trylock(&sem->internal_rwsem); } static inline void f2fs_up_write(struct f2fs_rwsem *sem) { up_write(&sem->internal_rwsem); #ifdef CONFIG_F2FS_UNFAIR_RWSEM wake_up_all(&sem->read_waiters); #endif } static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) { f2fs_down_read(&sbi->cp_rwsem); } static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) { if (time_to_inject(sbi, FAULT_LOCK_OP)) return 0; return f2fs_down_read_trylock(&sbi->cp_rwsem); } static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) { f2fs_up_read(&sbi->cp_rwsem); } static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) { f2fs_down_write(&sbi->cp_rwsem); } static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) { f2fs_up_write(&sbi->cp_rwsem); } static inline int __get_cp_reason(struct f2fs_sb_info *sbi) { int reason = CP_SYNC; if (test_opt(sbi, FASTBOOT)) reason = CP_FASTBOOT; if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) reason = CP_UMOUNT; return reason; } static inline bool __remain_node_summaries(int reason) { return (reason & (CP_UMOUNT | CP_FASTBOOT)); } static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) { return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) || is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG)); } /* * Check whether the inode has blocks or not */ static inline int F2FS_HAS_BLOCKS(struct inode *inode) { block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0; return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block; } static inline bool f2fs_has_xattr_block(unsigned int ofs) { return ofs == XATTR_NODE_OFFSET; } static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi, struct inode *inode, bool cap) { if (!inode) return true; if (!test_opt(sbi, RESERVE_ROOT)) return false; if (IS_NOQUOTA(inode)) return true; if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid())) return true; if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) && in_group_p(F2FS_OPTION(sbi).s_resgid)) return true; if (cap && capable(CAP_SYS_RESOURCE)) return true; return false; } static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool); static inline int inc_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, blkcnt_t *count) { blkcnt_t diff = 0, release = 0; block_t avail_user_block_count; int ret; ret = dquot_reserve_block(inode, *count); if (ret) return ret; if (time_to_inject(sbi, FAULT_BLOCK)) { release = *count; goto release_quota; } /* * let's increase this in prior to actual block count change in order * for f2fs_sync_file to avoid data races when deciding checkpoint. */ percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); spin_lock(&sbi->stat_lock); sbi->total_valid_block_count += (block_t)(*count); avail_user_block_count = sbi->user_block_count - sbi->current_reserved_blocks; if (!__allow_reserved_blocks(sbi, inode, true)) avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; if (F2FS_IO_ALIGNED(sbi)) avail_user_block_count -= sbi->blocks_per_seg * SM_I(sbi)->additional_reserved_segments; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { if (avail_user_block_count > sbi->unusable_block_count) avail_user_block_count -= sbi->unusable_block_count; else avail_user_block_count = 0; } if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { diff = sbi->total_valid_block_count - avail_user_block_count; if (diff > *count) diff = *count; *count -= diff; release = diff; sbi->total_valid_block_count -= diff; if (!*count) { spin_unlock(&sbi->stat_lock); goto enospc; } } spin_unlock(&sbi->stat_lock); if (unlikely(release)) { percpu_counter_sub(&sbi->alloc_valid_block_count, release); dquot_release_reservation_block(inode, release); } f2fs_i_blocks_write(inode, *count, true, true); return 0; enospc: percpu_counter_sub(&sbi->alloc_valid_block_count, release); release_quota: dquot_release_reservation_block(inode, release); return -ENOSPC; } __printf(2, 3) void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...); #define f2fs_err(sbi, fmt, ...) \ f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__) #define f2fs_warn(sbi, fmt, ...) \ f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__) #define f2fs_notice(sbi, fmt, ...) \ f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__) #define f2fs_info(sbi, fmt, ...) \ f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__) #define f2fs_debug(sbi, fmt, ...) \ f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__) #define PAGE_PRIVATE_GET_FUNC(name, flagname) \ static inline bool page_private_##name(struct page *page) \ { \ return PagePrivate(page) && \ test_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)) && \ test_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ } #define PAGE_PRIVATE_SET_FUNC(name, flagname) \ static inline void set_page_private_##name(struct page *page) \ { \ if (!PagePrivate(page)) \ attach_page_private(page, (void *)0); \ set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); \ set_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ } #define PAGE_PRIVATE_CLEAR_FUNC(name, flagname) \ static inline void clear_page_private_##name(struct page *page) \ { \ clear_bit(PAGE_PRIVATE_##flagname, &page_private(page)); \ if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) \ detach_page_private(page); \ } PAGE_PRIVATE_GET_FUNC(nonpointer, NOT_POINTER); PAGE_PRIVATE_GET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_GET_FUNC(gcing, ONGOING_MIGRATION); PAGE_PRIVATE_GET_FUNC(dummy, DUMMY_WRITE); PAGE_PRIVATE_SET_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_SET_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_SET_FUNC(gcing, ONGOING_MIGRATION); PAGE_PRIVATE_SET_FUNC(dummy, DUMMY_WRITE); PAGE_PRIVATE_CLEAR_FUNC(reference, REF_RESOURCE); PAGE_PRIVATE_CLEAR_FUNC(inline, INLINE_INODE); PAGE_PRIVATE_CLEAR_FUNC(gcing, ONGOING_MIGRATION); PAGE_PRIVATE_CLEAR_FUNC(dummy, DUMMY_WRITE); static inline unsigned long get_page_private_data(struct page *page) { unsigned long data = page_private(page); if (!test_bit(PAGE_PRIVATE_NOT_POINTER, &data)) return 0; return data >> PAGE_PRIVATE_MAX; } static inline void set_page_private_data(struct page *page, unsigned long data) { if (!PagePrivate(page)) attach_page_private(page, (void *)0); set_bit(PAGE_PRIVATE_NOT_POINTER, &page_private(page)); page_private(page) |= data << PAGE_PRIVATE_MAX; } static inline void clear_page_private_data(struct page *page) { page_private(page) &= GENMASK(PAGE_PRIVATE_MAX - 1, 0); if (page_private(page) == BIT(PAGE_PRIVATE_NOT_POINTER)) detach_page_private(page); } static inline void clear_page_private_all(struct page *page) { clear_page_private_data(page); clear_page_private_reference(page); clear_page_private_gcing(page); clear_page_private_inline(page); f2fs_bug_on(F2FS_P_SB(page), page_private(page)); } static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, struct inode *inode, block_t count) { blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK; spin_lock(&sbi->stat_lock); f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count); sbi->total_valid_block_count -= (block_t)count; if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks = min(sbi->reserved_blocks, sbi->current_reserved_blocks + count); spin_unlock(&sbi->stat_lock); if (unlikely(inode->i_blocks < sectors)) { f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", inode->i_ino, (unsigned long long)inode->i_blocks, (unsigned long long)sectors); set_sbi_flag(sbi, SBI_NEED_FSCK); return; } f2fs_i_blocks_write(inode, count, false, true); } static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) { atomic_inc(&sbi->nr_pages[count_type]); if (count_type == F2FS_DIRTY_DENTS || count_type == F2FS_DIRTY_NODES || count_type == F2FS_DIRTY_META || count_type == F2FS_DIRTY_QDATA || count_type == F2FS_DIRTY_IMETA) set_sbi_flag(sbi, SBI_IS_DIRTY); } static inline void inode_inc_dirty_pages(struct inode *inode) { atomic_inc(&F2FS_I(inode)->dirty_pages); inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); if (IS_NOQUOTA(inode)) inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) { atomic_dec(&sbi->nr_pages[count_type]); } static inline void inode_dec_dirty_pages(struct inode *inode) { if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return; atomic_dec(&F2FS_I(inode)->dirty_pages); dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); if (IS_NOQUOTA(inode)) dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA); } static inline void inc_atomic_write_cnt(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); u64 current_write; fi->atomic_write_cnt++; atomic64_inc(&sbi->current_atomic_write); current_write = atomic64_read(&sbi->current_atomic_write); if (current_write > sbi->peak_atomic_write) sbi->peak_atomic_write = current_write; } static inline void release_atomic_write_cnt(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); atomic64_sub(fi->atomic_write_cnt, &sbi->current_atomic_write); fi->atomic_write_cnt = 0; } static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) { return atomic_read(&sbi->nr_pages[count_type]); } static inline int get_dirty_pages(struct inode *inode) { return atomic_read(&F2FS_I(inode)->dirty_pages); } static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) { unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> sbi->log_blocks_per_seg; return segs / sbi->segs_per_sec; } static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) { return sbi->total_valid_block_count; } static inline block_t discard_blocks(struct f2fs_sb_info *sbi) { return sbi->discard_blks; } static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); /* return NAT or SIT bitmap */ if (flag == NAT_BITMAP) return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); else if (flag == SIT_BITMAP) return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); return 0; } static inline block_t __cp_payload(struct f2fs_sb_info *sbi) { return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); } static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); void *tmp_ptr = &ckpt->sit_nat_version_bitmap; int offset; if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) { offset = (flag == SIT_BITMAP) ? le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0; /* * if large_nat_bitmap feature is enabled, leave checksum * protection for all nat/sit bitmaps. */ return tmp_ptr + offset + sizeof(__le32); } if (__cp_payload(sbi) > 0) { if (flag == NAT_BITMAP) return tmp_ptr; else return (unsigned char *)ckpt + F2FS_BLKSIZE; } else { offset = (flag == NAT_BITMAP) ? le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; return tmp_ptr + offset; } } static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) { block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); if (sbi->cur_cp_pack == 2) start_addr += sbi->blocks_per_seg; return start_addr; } static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi) { block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); if (sbi->cur_cp_pack == 1) start_addr += sbi->blocks_per_seg; return start_addr; } static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi) { sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1; } static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) { return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); } extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync); static inline int inc_valid_node_count(struct f2fs_sb_info *sbi, struct inode *inode, bool is_inode) { block_t valid_block_count; unsigned int valid_node_count, user_block_count; int err; if (is_inode) { if (inode) { err = dquot_alloc_inode(inode); if (err) return err; } } else { err = dquot_reserve_block(inode, 1); if (err) return err; } if (time_to_inject(sbi, FAULT_BLOCK)) goto enospc; spin_lock(&sbi->stat_lock); valid_block_count = sbi->total_valid_block_count + sbi->current_reserved_blocks + 1; if (!__allow_reserved_blocks(sbi, inode, false)) valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; if (F2FS_IO_ALIGNED(sbi)) valid_block_count += sbi->blocks_per_seg * SM_I(sbi)->additional_reserved_segments; user_block_count = sbi->user_block_count; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) user_block_count -= sbi->unusable_block_count; if (unlikely(valid_block_count > user_block_count)) { spin_unlock(&sbi->stat_lock); goto enospc; } valid_node_count = sbi->total_valid_node_count + 1; if (unlikely(valid_node_count > sbi->total_node_count)) { spin_unlock(&sbi->stat_lock); goto enospc; } sbi->total_valid_node_count++; sbi->total_valid_block_count++; spin_unlock(&sbi->stat_lock); if (inode) { if (is_inode) f2fs_mark_inode_dirty_sync(inode, true); else f2fs_i_blocks_write(inode, 1, true, true); } percpu_counter_inc(&sbi->alloc_valid_block_count); return 0; enospc: if (is_inode) { if (inode) dquot_free_inode(inode); } else { dquot_release_reservation_block(inode, 1); } return -ENOSPC; } static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, struct inode *inode, bool is_inode) { spin_lock(&sbi->stat_lock); if (unlikely(!sbi->total_valid_block_count || !sbi->total_valid_node_count)) { f2fs_warn(sbi, "dec_valid_node_count: inconsistent block counts, total_valid_block:%u, total_valid_node:%u", sbi->total_valid_block_count, sbi->total_valid_node_count); set_sbi_flag(sbi, SBI_NEED_FSCK); } else { sbi->total_valid_block_count--; sbi->total_valid_node_count--; } if (sbi->reserved_blocks && sbi->current_reserved_blocks < sbi->reserved_blocks) sbi->current_reserved_blocks++; spin_unlock(&sbi->stat_lock); if (is_inode) { dquot_free_inode(inode); } else { if (unlikely(inode->i_blocks == 0)) { f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu", inode->i_ino, (unsigned long long)inode->i_blocks); set_sbi_flag(sbi, SBI_NEED_FSCK); return; } f2fs_i_blocks_write(inode, 1, false, true); } } static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) { return sbi->total_valid_node_count; } static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) { percpu_counter_inc(&sbi->total_valid_inode_count); } static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) { percpu_counter_dec(&sbi->total_valid_inode_count); } static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) { return percpu_counter_sum_positive(&sbi->total_valid_inode_count); } static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, pgoff_t index, bool for_write) { struct page *page; unsigned int flags; if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) { if (!for_write) page = find_get_page_flags(mapping, index, FGP_LOCK | FGP_ACCESSED); else page = find_lock_page(mapping, index); if (page) return page; if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) return NULL; } if (!for_write) return grab_cache_page(mapping, index); flags = memalloc_nofs_save(); page = grab_cache_page_write_begin(mapping, index); memalloc_nofs_restore(flags); return page; } static inline struct page *f2fs_pagecache_get_page( struct address_space *mapping, pgoff_t index, fgf_t fgp_flags, gfp_t gfp_mask) { if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) return NULL; return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); } static inline void f2fs_put_page(struct page *page, int unlock) { if (!page) return; if (unlock) { f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); unlock_page(page); } put_page(page); } static inline void f2fs_put_dnode(struct dnode_of_data *dn) { if (dn->node_page) f2fs_put_page(dn->node_page, 1); if (dn->inode_page && dn->node_page != dn->inode_page) f2fs_put_page(dn->inode_page, 0); dn->node_page = NULL; dn->inode_page = NULL; } static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, size_t size) { return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL); } static inline void *f2fs_kmem_cache_alloc_nofail(struct kmem_cache *cachep, gfp_t flags) { void *entry; entry = kmem_cache_alloc(cachep, flags); if (!entry) entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); return entry; } static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags, bool nofail, struct f2fs_sb_info *sbi) { if (nofail) return f2fs_kmem_cache_alloc_nofail(cachep, flags); if (time_to_inject(sbi, FAULT_SLAB_ALLOC)) return NULL; return kmem_cache_alloc(cachep, flags); } static inline bool is_inflight_io(struct f2fs_sb_info *sbi, int type) { if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || get_pages(sbi, F2FS_WB_CP_DATA) || get_pages(sbi, F2FS_DIO_READ) || get_pages(sbi, F2FS_DIO_WRITE)) return true; if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info && atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) return true; if (SM_I(sbi) && SM_I(sbi)->fcc_info && atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) return true; return false; } static inline bool is_idle(struct f2fs_sb_info *sbi, int type) { if (sbi->gc_mode == GC_URGENT_HIGH) return true; if (is_inflight_io(sbi, type)) return false; if (sbi->gc_mode == GC_URGENT_MID) return true; if (sbi->gc_mode == GC_URGENT_LOW && (type == DISCARD_TIME || type == GC_TIME)) return true; return f2fs_time_over(sbi, type); } static inline void f2fs_radix_tree_insert(struct radix_tree_root *root, unsigned long index, void *item) { while (radix_tree_insert(root, index, item)) cond_resched(); } #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) static inline bool IS_INODE(struct page *page) { struct f2fs_node *p = F2FS_NODE(page); return RAW_IS_INODE(p); } static inline int offset_in_addr(struct f2fs_inode *i) { return (i->i_inline & F2FS_EXTRA_ATTR) ? (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0; } static inline __le32 *blkaddr_in_node(struct f2fs_node *node) { return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; } static inline int f2fs_has_extra_attr(struct inode *inode); static inline block_t data_blkaddr(struct inode *inode, struct page *node_page, unsigned int offset) { struct f2fs_node *raw_node; __le32 *addr_array; int base = 0; bool is_inode = IS_INODE(node_page); raw_node = F2FS_NODE(node_page); if (is_inode) { if (!inode) /* from GC path only */ base = offset_in_addr(&raw_node->i); else if (f2fs_has_extra_attr(inode)) base = get_extra_isize(inode); } addr_array = blkaddr_in_node(raw_node); return le32_to_cpu(addr_array[base + offset]); } static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn) { return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node); } static inline int f2fs_test_bit(unsigned int nr, char *addr) { int mask; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); return mask & *addr; } static inline void f2fs_set_bit(unsigned int nr, char *addr) { int mask; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); *addr |= mask; } static inline void f2fs_clear_bit(unsigned int nr, char *addr) { int mask; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); *addr &= ~mask; } static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr) { int mask; int ret; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); ret = mask & *addr; *addr |= mask; return ret; } static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr) { int mask; int ret; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); ret = mask & *addr; *addr &= ~mask; return ret; } static inline void f2fs_change_bit(unsigned int nr, char *addr) { int mask; addr += (nr >> 3); mask = BIT(7 - (nr & 0x07)); *addr ^= mask; } /* * On-disk inode flags (f2fs_inode::i_flags) */ #define F2FS_COMPR_FL 0x00000004 /* Compress file */ #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */ #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ #define F2FS_QUOTA_DEFAULT_FL (F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL) /* Flags that should be inherited by new inodes from their parent. */ #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ F2FS_CASEFOLD_FL) /* Flags that are appropriate for regular files (all but dir-specific ones). */ #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ F2FS_CASEFOLD_FL)) /* Flags that are appropriate for non-directories/regular files. */ #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags) { if (S_ISDIR(mode)) return flags; else if (S_ISREG(mode)) return flags & F2FS_REG_FLMASK; else return flags & F2FS_OTHER_FLMASK; } static inline void __mark_inode_dirty_flag(struct inode *inode, int flag, bool set) { switch (flag) { case FI_INLINE_XATTR: case FI_INLINE_DATA: case FI_INLINE_DENTRY: case FI_NEW_INODE: if (set) return; fallthrough; case FI_DATA_EXIST: case FI_INLINE_DOTS: case FI_PIN_FILE: case FI_COMPRESS_RELEASED: f2fs_mark_inode_dirty_sync(inode, true); } } static inline void set_inode_flag(struct inode *inode, int flag) { set_bit(flag, F2FS_I(inode)->flags); __mark_inode_dirty_flag(inode, flag, true); } static inline int is_inode_flag_set(struct inode *inode, int flag) { return test_bit(flag, F2FS_I(inode)->flags); } static inline void clear_inode_flag(struct inode *inode, int flag) { clear_bit(flag, F2FS_I(inode)->flags); __mark_inode_dirty_flag(inode, flag, false); } static inline bool f2fs_verity_in_progress(struct inode *inode) { return IS_ENABLED(CONFIG_FS_VERITY) && is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS); } static inline void set_acl_inode(struct inode *inode, umode_t mode) { F2FS_I(inode)->i_acl_mode = mode; set_inode_flag(inode, FI_ACL_MODE); f2fs_mark_inode_dirty_sync(inode, false); } static inline void f2fs_i_links_write(struct inode *inode, bool inc) { if (inc) inc_nlink(inode); else drop_nlink(inode); f2fs_mark_inode_dirty_sync(inode, true); } static inline void f2fs_i_blocks_write(struct inode *inode, block_t diff, bool add, bool claim) { bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); /* add = 1, claim = 1 should be dquot_reserve_block in pair */ if (add) { if (claim) dquot_claim_block(inode, diff); else dquot_alloc_block_nofail(inode, diff); } else { dquot_free_block(inode, diff); } f2fs_mark_inode_dirty_sync(inode, true); if (clean || recover) set_inode_flag(inode, FI_AUTO_RECOVER); } static inline bool f2fs_is_atomic_file(struct inode *inode); static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size) { bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE); bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER); if (i_size_read(inode) == i_size) return; i_size_write(inode, i_size); if (f2fs_is_atomic_file(inode)) return; f2fs_mark_inode_dirty_sync(inode, true); if (clean || recover) set_inode_flag(inode, FI_AUTO_RECOVER); } static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth) { F2FS_I(inode)->i_current_depth = depth; f2fs_mark_inode_dirty_sync(inode, true); } static inline void f2fs_i_gc_failures_write(struct inode *inode, unsigned int count) { F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count; f2fs_mark_inode_dirty_sync(inode, true); } static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid) { F2FS_I(inode)->i_xattr_nid = xnid; f2fs_mark_inode_dirty_sync(inode, true); } static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino) { F2FS_I(inode)->i_pino = pino; f2fs_mark_inode_dirty_sync(inode, true); } static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri) { struct f2fs_inode_info *fi = F2FS_I(inode); if (ri->i_inline & F2FS_INLINE_XATTR) set_bit(FI_INLINE_XATTR, fi->flags); if (ri->i_inline & F2FS_INLINE_DATA) set_bit(FI_INLINE_DATA, fi->flags); if (ri->i_inline & F2FS_INLINE_DENTRY) set_bit(FI_INLINE_DENTRY, fi->flags); if (ri->i_inline & F2FS_DATA_EXIST) set_bit(FI_DATA_EXIST, fi->flags); if (ri->i_inline & F2FS_INLINE_DOTS) set_bit(FI_INLINE_DOTS, fi->flags); if (ri->i_inline & F2FS_EXTRA_ATTR) set_bit(FI_EXTRA_ATTR, fi->flags); if (ri->i_inline & F2FS_PIN_FILE) set_bit(FI_PIN_FILE, fi->flags); if (ri->i_inline & F2FS_COMPRESS_RELEASED) set_bit(FI_COMPRESS_RELEASED, fi->flags); } static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri) { ri->i_inline = 0; if (is_inode_flag_set(inode, FI_INLINE_XATTR)) ri->i_inline |= F2FS_INLINE_XATTR; if (is_inode_flag_set(inode, FI_INLINE_DATA)) ri->i_inline |= F2FS_INLINE_DATA; if (is_inode_flag_set(inode, FI_INLINE_DENTRY)) ri->i_inline |= F2FS_INLINE_DENTRY; if (is_inode_flag_set(inode, FI_DATA_EXIST)) ri->i_inline |= F2FS_DATA_EXIST; if (is_inode_flag_set(inode, FI_INLINE_DOTS)) ri->i_inline |= F2FS_INLINE_DOTS; if (is_inode_flag_set(inode, FI_EXTRA_ATTR)) ri->i_inline |= F2FS_EXTRA_ATTR; if (is_inode_flag_set(inode, FI_PIN_FILE)) ri->i_inline |= F2FS_PIN_FILE; if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) ri->i_inline |= F2FS_COMPRESS_RELEASED; } static inline int f2fs_has_extra_attr(struct inode *inode) { return is_inode_flag_set(inode, FI_EXTRA_ATTR); } static inline int f2fs_has_inline_xattr(struct inode *inode) { return is_inode_flag_set(inode, FI_INLINE_XATTR); } static inline int f2fs_compressed_file(struct inode *inode) { return S_ISREG(inode->i_mode) && is_inode_flag_set(inode, FI_COMPRESSED_FILE); } static inline bool f2fs_need_compress_data(struct inode *inode) { int compress_mode = F2FS_OPTION(F2FS_I_SB(inode)).compress_mode; if (!f2fs_compressed_file(inode)) return false; if (compress_mode == COMPR_MODE_FS) return true; else if (compress_mode == COMPR_MODE_USER && is_inode_flag_set(inode, FI_ENABLE_COMPRESS)) return true; return false; } static inline unsigned int addrs_per_inode(struct inode *inode) { unsigned int addrs = CUR_ADDRS_PER_INODE(inode) - get_inline_xattr_addrs(inode); if (!f2fs_compressed_file(inode)) return addrs; return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size); } static inline unsigned int addrs_per_block(struct inode *inode) { if (!f2fs_compressed_file(inode)) return DEF_ADDRS_PER_BLOCK; return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size); } static inline void *inline_xattr_addr(struct inode *inode, struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE - get_inline_xattr_addrs(inode)]); } static inline int inline_xattr_size(struct inode *inode) { if (f2fs_has_inline_xattr(inode)) return get_inline_xattr_addrs(inode) * sizeof(__le32); return 0; } /* * Notice: check inline_data flag without inode page lock is unsafe. * It could change at any time by f2fs_convert_inline_page(). */ static inline int f2fs_has_inline_data(struct inode *inode) { return is_inode_flag_set(inode, FI_INLINE_DATA); } static inline int f2fs_exist_data(struct inode *inode) { return is_inode_flag_set(inode, FI_DATA_EXIST); } static inline int f2fs_has_inline_dots(struct inode *inode) { return is_inode_flag_set(inode, FI_INLINE_DOTS); } static inline int f2fs_is_mmap_file(struct inode *inode) { return is_inode_flag_set(inode, FI_MMAP_FILE); } static inline bool f2fs_is_pinned_file(struct inode *inode) { return is_inode_flag_set(inode, FI_PIN_FILE); } static inline bool f2fs_is_atomic_file(struct inode *inode) { return is_inode_flag_set(inode, FI_ATOMIC_FILE); } static inline bool f2fs_is_cow_file(struct inode *inode) { return is_inode_flag_set(inode, FI_COW_FILE); } static inline bool f2fs_is_first_block_written(struct inode *inode) { return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN); } static inline bool f2fs_is_drop_cache(struct inode *inode) { return is_inode_flag_set(inode, FI_DROP_CACHE); } static inline void *inline_data_addr(struct inode *inode, struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); int extra_size = get_extra_isize(inode); return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]); } static inline int f2fs_has_inline_dentry(struct inode *inode) { return is_inode_flag_set(inode, FI_INLINE_DENTRY); } static inline int is_file(struct inode *inode, int type) { return F2FS_I(inode)->i_advise & type; } static inline void set_file(struct inode *inode, int type) { if (is_file(inode, type)) return; F2FS_I(inode)->i_advise |= type; f2fs_mark_inode_dirty_sync(inode, true); } static inline void clear_file(struct inode *inode, int type) { if (!is_file(inode, type)) return; F2FS_I(inode)->i_advise &= ~type; f2fs_mark_inode_dirty_sync(inode, true); } static inline bool f2fs_is_time_consistent(struct inode *inode) { struct timespec64 ts = inode_get_atime(inode); if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &ts)) return false; ts = inode_get_ctime(inode); if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &ts)) return false; ts = inode_get_mtime(inode); if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &ts)) return false; return true; } static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync) { bool ret; if (dsync) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); spin_lock(&sbi->inode_lock[DIRTY_META]); ret = list_empty(&F2FS_I(inode)->gdirty_list); spin_unlock(&sbi->inode_lock[DIRTY_META]); return ret; } if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) || file_keep_isize(inode) || i_size_read(inode) & ~PAGE_MASK) return false; if (!f2fs_is_time_consistent(inode)) return false; spin_lock(&F2FS_I(inode)->i_size_lock); ret = F2FS_I(inode)->last_disk_size == i_size_read(inode); spin_unlock(&F2FS_I(inode)->i_size_lock); return ret; } static inline bool f2fs_readonly(struct super_block *sb) { return sb_rdonly(sb); } static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) { return is_set_ckpt_flags(sbi, CP_ERROR_FLAG); } static inline bool is_dot_dotdot(const u8 *name, size_t len) { if (len == 1 && name[0] == '.') return true; if (len == 2 && name[0] == '.' && name[1] == '.') return true; return false; } static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { if (time_to_inject(sbi, FAULT_KMALLOC)) return NULL; return kmalloc(size, flags); } static inline void *f2fs_getname(struct f2fs_sb_info *sbi) { if (time_to_inject(sbi, FAULT_KMALLOC)) return NULL; return __getname(); } static inline void f2fs_putname(char *buf) { __putname(buf); } static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO); } static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { if (time_to_inject(sbi, FAULT_KVMALLOC)) return NULL; return kvmalloc(size, flags); } static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi, size_t size, gfp_t flags) { return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO); } static inline int get_extra_isize(struct inode *inode) { return F2FS_I(inode)->i_extra_isize / sizeof(__le32); } static inline int get_inline_xattr_addrs(struct inode *inode) { return F2FS_I(inode)->i_inline_xattr_size; } #define f2fs_get_inode_mode(i) \ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \ (F2FS_I(i)->i_acl_mode) : ((i)->i_mode)) #define F2FS_MIN_EXTRA_ATTR_SIZE (sizeof(__le32)) #define F2FS_TOTAL_EXTRA_ATTR_SIZE \ (offsetof(struct f2fs_inode, i_extra_end) - \ offsetof(struct f2fs_inode, i_extra_isize)) \ #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr)) #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \ ((offsetof(typeof(*(f2fs_inode)), field) + \ sizeof((f2fs_inode)->field)) \ <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \ #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1) #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META) bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); static inline void verify_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.", blkaddr, type); f2fs_bug_on(sbi, 1); } } static inline bool __is_valid_data_blkaddr(block_t blkaddr) { if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR || blkaddr == COMPRESS_ADDR) return false; return true; } /* * file.c */ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock); int f2fs_truncate(struct inode *inode); int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags); int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end); void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count); int f2fs_precache_extents(struct inode *inode); int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa); int f2fs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid); int f2fs_pin_file_control(struct inode *inode, bool inc); /* * inode.c */ void f2fs_set_inode_flags(struct inode *inode); bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page); void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page); struct inode *f2fs_iget(struct super_block *sb, unsigned long ino); struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino); int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink); void f2fs_update_inode(struct inode *inode, struct page *node_page); void f2fs_update_inode_page(struct inode *inode); int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc); void f2fs_evict_inode(struct inode *inode); void f2fs_handle_failed_inode(struct inode *inode); /* * namei.c */ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name, bool hot, bool set); struct dentry *f2fs_get_parent(struct dentry *child); int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct inode **new_inode); /* * dir.c */ int f2fs_init_casefolded_name(const struct inode *dir, struct f2fs_filename *fname); int f2fs_setup_filename(struct inode *dir, const struct qstr *iname, int lookup, struct f2fs_filename *fname); int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, struct f2fs_filename *fname); void f2fs_free_filename(struct f2fs_filename *fname); struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, const struct f2fs_filename *fname, int *max_slots); int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, struct f2fs_dentry_ptr *d); struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir, const struct f2fs_filename *fname, struct page *dpage); void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode, unsigned int current_depth); int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots); void f2fs_drop_nlink(struct inode *dir, struct inode *inode); struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, const struct f2fs_filename *fname, struct page **res_page); struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir, const struct qstr *child, struct page **res_page); struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p); ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr, struct page **page); void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, struct page *page, struct inode *inode); bool f2fs_has_enough_room(struct inode *dir, struct page *ipage, const struct f2fs_filename *fname); void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, const struct fscrypt_str *name, f2fs_hash_t name_hash, unsigned int bit_pos); int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); int f2fs_do_add_link(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, struct inode *inode); int f2fs_do_tmpfile(struct inode *inode, struct inode *dir); bool f2fs_empty_dir(struct inode *dir); static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) { if (fscrypt_is_nokey_name(dentry)) return -ENOKEY; return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name, inode, inode->i_ino, inode->i_mode); } /* * super.c */ int f2fs_inode_dirtied(struct inode *inode, bool sync); void f2fs_inode_synced(struct inode *inode); int f2fs_dquot_initialize(struct inode *inode); int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly); int f2fs_quota_sync(struct super_block *sb, int type); loff_t max_file_blocks(struct inode *inode); void f2fs_quota_off_umount(struct super_block *sb); void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag); void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, bool irq_context); void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error); void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error); int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); int f2fs_sync_fs(struct super_block *sb, int sync); int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); /* * hash.c */ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname); /* * node.c */ struct node_info; int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type); bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page); void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi); void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page); void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi); int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni, bool checkpoint_context); pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); int f2fs_truncate_xattr_node(struct inode *inode); int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, unsigned int seq_id); bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi); int f2fs_remove_inode_page(struct inode *inode); struct page *f2fs_new_inode_page(struct inode *inode); struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs); void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); struct page *f2fs_get_node_page_ra(struct page *parent, int start); int f2fs_move_node_page(struct page *node_page, int gc_type); void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic, unsigned int *seq_id); int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc, bool do_balance, enum iostat_type io_type); int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount); bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum); void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi); int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); int f2fs_build_node_manager(struct f2fs_sb_info *sbi); void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi); int __init f2fs_create_node_manager_caches(void); void f2fs_destroy_node_manager_caches(void); /* * segment.c */ bool f2fs_need_SSR(struct f2fs_sb_info *sbi); int f2fs_commit_atomic_write(struct inode *inode); void f2fs_abort_atomic_write(struct inode *inode, bool clean); void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need); void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg); int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi); bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi); void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno); void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi); void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi); void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi); void f2fs_get_new_segment(struct f2fs_sb_info *sbi, unsigned int *newseg, bool new_sec, int dir); void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end); void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force); void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno); void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr); void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, enum iostat_type io_type); void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio); void f2fs_outplace_write_data(struct dnode_of_data *dn, struct f2fs_io_info *fio); int f2fs_inplace_write_data(struct f2fs_io_info *fio); void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr, bool recover_curseg, bool recover_newaddr, bool from_gc); void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, block_t old_addr, block_t new_addr, unsigned char version, bool recover_curseg, bool recover_newaddr); void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio); void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino, block_t blkaddr, unsigned int blkcnt); void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered, bool locked); void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr, block_t len); void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk); void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk); int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type, unsigned int val, int alloc); void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc); int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi); int f2fs_check_write_pointer(struct f2fs_sb_info *sbi); int f2fs_build_segment_manager(struct f2fs_sb_info *sbi); void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi); int __init f2fs_create_segment_manager_caches(void); void f2fs_destroy_segment_manager_caches(void); int f2fs_rw_hint_to_seg_type(enum rw_hint hint); unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi, unsigned int segno); unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi, unsigned int segno); #define DEF_FRAGMENT_SIZE 4 #define MIN_FRAGMENT_SIZE 1 #define MAX_FRAGMENT_SIZE 512 static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi) { return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG || F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK; } /* * checkpoint.c */ void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io, unsigned char reason); void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi); struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index); struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index); bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type); int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type, bool sync); void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index, unsigned int ra_blocks); long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type, long nr_to_write, enum iostat_type io_type); void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type); void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all); bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode); void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type); bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino, unsigned int devidx, int type); int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi); void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi); void f2fs_add_orphan_inode(struct inode *inode); void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino); int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi); int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi); void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio); void f2fs_remove_dirty_inode(struct inode *inode); int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type, bool from_cp); void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type); u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi); int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc); void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi); int __init f2fs_create_checkpoint_caches(void); void f2fs_destroy_checkpoint_caches(void); int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi); int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi); void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi); void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi); /* * data.c */ int __init f2fs_init_bioset(void); void f2fs_destroy_bioset(void); int f2fs_init_bio_entry_cache(void); void f2fs_destroy_bio_entry_cache(void); void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type); int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi); void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type); void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi, struct inode *inode, struct page *page, nid_t ino, enum page_type type); void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, struct bio **bio, struct page *page); void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); int f2fs_submit_page_bio(struct f2fs_io_info *fio); int f2fs_merge_page_bio(struct f2fs_io_info *fio); void f2fs_submit_page_write(struct f2fs_io_info *fio); struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, block_t blk_addr, sector_t *sector); int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr); void f2fs_set_data_blkaddr(struct dnode_of_data *dn); void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr); int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count); int f2fs_reserve_new_block(struct dnode_of_data *dn); int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index); int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index); struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index, blk_opf_t op_flags, bool for_write, pgoff_t *next_pgofs); struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index, pgoff_t *next_pgofs); struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, bool for_write); struct page *f2fs_get_new_data_page(struct inode *inode, struct page *ipage, pgoff_t index, bool new_i_size); int f2fs_do_write_data_page(struct f2fs_io_info *fio); int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len); int f2fs_encrypt_one_page(struct f2fs_io_info *fio); bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio); bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio); int f2fs_write_single_data_page(struct page *page, int *submitted, struct bio **bio, sector_t *last_block, struct writeback_control *wbc, enum iostat_type io_type, int compr_blocks, bool allow_balance); void f2fs_write_failed(struct inode *inode, loff_t to); void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length); bool f2fs_release_folio(struct folio *folio, gfp_t wait); bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len); void f2fs_clear_page_cache_dirty_tag(struct page *page); int f2fs_init_post_read_processing(void); void f2fs_destroy_post_read_processing(void); int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi); void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi); extern const struct iomap_ops f2fs_iomap_ops; /* * gc.c */ int f2fs_start_gc_thread(struct f2fs_sb_info *sbi); void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi); block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode); int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control); void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); int f2fs_resize_fs(struct file *filp, __u64 block_count); int __init f2fs_create_garbage_collection_cache(void); void f2fs_destroy_garbage_collection_cache(void); /* victim selection function for cleaning and SSR */ int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result, int gc_type, int type, char alloc_mode, unsigned long long age); /* * recovery.c */ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only); bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi); int __init f2fs_create_recovery_cache(void); void f2fs_destroy_recovery_cache(void); /* * debug.c */ #ifdef CONFIG_F2FS_STAT_FS struct f2fs_stat_info { struct list_head stat_list; struct f2fs_sb_info *sbi; int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; int main_area_segs, main_area_sections, main_area_zones; unsigned long long hit_cached[NR_EXTENT_CACHES]; unsigned long long hit_rbtree[NR_EXTENT_CACHES]; unsigned long long total_ext[NR_EXTENT_CACHES]; unsigned long long hit_total[NR_EXTENT_CACHES]; int ext_tree[NR_EXTENT_CACHES]; int zombie_tree[NR_EXTENT_CACHES]; int ext_node[NR_EXTENT_CACHES]; /* to count memory footprint */ unsigned long long ext_mem[NR_EXTENT_CACHES]; /* for read extent cache */ unsigned long long hit_largest; /* for block age extent cache */ unsigned long long allocated_data_blocks; int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta; int ndirty_data, ndirty_qdata; unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all; int nats, dirty_nats, sits, dirty_sits; int free_nids, avail_nids, alloc_nids; int total_count, utilization; int nr_wb_cp_data, nr_wb_data; int nr_rd_data, nr_rd_node, nr_rd_meta; int nr_dio_read, nr_dio_write; unsigned int io_skip_bggc, other_skip_bggc; int nr_flushing, nr_flushed, flush_list_empty; int nr_discarding, nr_discarded; int nr_discard_cmd; unsigned int undiscard_blks; int nr_issued_ckpt, nr_total_ckpt, nr_queued_ckpt; unsigned int cur_ckpt_time, peak_ckpt_time; int inline_xattr, inline_inode, inline_dir, append, update, orphans; int compr_inode, swapfile_inode; unsigned long long compr_blocks; int aw_cnt, max_aw_cnt; unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks; unsigned int bimodal, avg_vblocks; int util_free, util_valid, util_invalid; int rsvd_segs, overp_segs; int dirty_count, node_pages, meta_pages, compress_pages; int compress_page_hit; int prefree_count, free_segs, free_secs; int cp_call_count[MAX_CALL_TYPE], cp_count; int gc_call_count[MAX_CALL_TYPE]; int gc_segs[2][2]; int gc_secs[2][2]; int tot_blks, data_blks, node_blks; int bg_data_blks, bg_node_blks; int curseg[NR_CURSEG_TYPE]; int cursec[NR_CURSEG_TYPE]; int curzone[NR_CURSEG_TYPE]; unsigned int dirty_seg[NR_CURSEG_TYPE]; unsigned int full_seg[NR_CURSEG_TYPE]; unsigned int valid_blks[NR_CURSEG_TYPE]; unsigned int meta_count[META_MAX]; unsigned int segment_count[2]; unsigned int block_count[2]; unsigned int inplace_count; unsigned long long base_mem, cache_mem, page_mem; }; static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) { return (struct f2fs_stat_info *)sbi->stat_info; } #define stat_inc_cp_call_count(sbi, foreground) \ atomic_inc(&sbi->cp_call_count[(foreground)]) #define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++) #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++) #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++) #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++) #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--) #define stat_inc_total_hit(sbi, type) (atomic64_inc(&(sbi)->total_hit_ext[type])) #define stat_inc_rbtree_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_rbtree[type])) #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest)) #define stat_inc_cached_node_hit(sbi, type) (atomic64_inc(&(sbi)->read_hit_cached[type])) #define stat_inc_inline_xattr(inode) \ do { \ if (f2fs_has_inline_xattr(inode)) \ (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \ } while (0) #define stat_dec_inline_xattr(inode) \ do { \ if (f2fs_has_inline_xattr(inode)) \ (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \ } while (0) #define stat_inc_inline_inode(inode) \ do { \ if (f2fs_has_inline_data(inode)) \ (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \ } while (0) #define stat_dec_inline_inode(inode) \ do { \ if (f2fs_has_inline_data(inode)) \ (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \ } while (0) #define stat_inc_inline_dir(inode) \ do { \ if (f2fs_has_inline_dentry(inode)) \ (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \ } while (0) #define stat_dec_inline_dir(inode) \ do { \ if (f2fs_has_inline_dentry(inode)) \ (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \ } while (0) #define stat_inc_compr_inode(inode) \ do { \ if (f2fs_compressed_file(inode)) \ (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \ } while (0) #define stat_dec_compr_inode(inode) \ do { \ if (f2fs_compressed_file(inode)) \ (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \ } while (0) #define stat_add_compr_blocks(inode, blocks) \ (atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks)) #define stat_sub_compr_blocks(inode, blocks) \ (atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks)) #define stat_inc_swapfile_inode(inode) \ (atomic_inc(&F2FS_I_SB(inode)->swapfile_inode)) #define stat_dec_swapfile_inode(inode) \ (atomic_dec(&F2FS_I_SB(inode)->swapfile_inode)) #define stat_inc_atomic_inode(inode) \ (atomic_inc(&F2FS_I_SB(inode)->atomic_files)) #define stat_dec_atomic_inode(inode) \ (atomic_dec(&F2FS_I_SB(inode)->atomic_files)) #define stat_inc_meta_count(sbi, blkaddr) \ do { \ if (blkaddr < SIT_I(sbi)->sit_base_addr) \ atomic_inc(&(sbi)->meta_count[META_CP]); \ else if (blkaddr < NM_I(sbi)->nat_blkaddr) \ atomic_inc(&(sbi)->meta_count[META_SIT]); \ else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \ atomic_inc(&(sbi)->meta_count[META_NAT]); \ else if (blkaddr < SM_I(sbi)->main_blkaddr) \ atomic_inc(&(sbi)->meta_count[META_SSA]); \ } while (0) #define stat_inc_seg_type(sbi, curseg) \ ((sbi)->segment_count[(curseg)->alloc_type]++) #define stat_inc_block_count(sbi, curseg) \ ((sbi)->block_count[(curseg)->alloc_type]++) #define stat_inc_inplace_blocks(sbi) \ (atomic_inc(&(sbi)->inplace_count)) #define stat_update_max_atomic_write(inode) \ do { \ int cur = atomic_read(&F2FS_I_SB(inode)->atomic_files); \ int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \ if (cur > max) \ atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \ } while (0) #define stat_inc_gc_call_count(sbi, foreground) \ (F2FS_STAT(sbi)->gc_call_count[(foreground)]++) #define stat_inc_gc_sec_count(sbi, type, gc_type) \ (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++) #define stat_inc_gc_seg_count(sbi, type, gc_type) \ (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++) #define stat_inc_tot_blk_count(si, blks) \ ((si)->tot_blks += (blks)) #define stat_inc_data_blk_count(sbi, blks, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ stat_inc_tot_blk_count(si, blks); \ si->data_blks += (blks); \ si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ } while (0) #define stat_inc_node_blk_count(sbi, blks, gc_type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ stat_inc_tot_blk_count(si, blks); \ si->node_blks += (blks); \ si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \ } while (0) int f2fs_build_stats(struct f2fs_sb_info *sbi); void f2fs_destroy_stats(struct f2fs_sb_info *sbi); void __init f2fs_create_root_stats(void); void f2fs_destroy_root_stats(void); void f2fs_update_sit_info(struct f2fs_sb_info *sbi); #else #define stat_inc_cp_call_count(sbi, foreground) do { } while (0) #define stat_inc_cp_count(sbi) do { } while (0) #define stat_io_skip_bggc_count(sbi) do { } while (0) #define stat_other_skip_bggc_count(sbi) do { } while (0) #define stat_inc_dirty_inode(sbi, type) do { } while (0) #define stat_dec_dirty_inode(sbi, type) do { } while (0) #define stat_inc_total_hit(sbi, type) do { } while (0) #define stat_inc_rbtree_node_hit(sbi, type) do { } while (0) #define stat_inc_largest_node_hit(sbi) do { } while (0) #define stat_inc_cached_node_hit(sbi, type) do { } while (0) #define stat_inc_inline_xattr(inode) do { } while (0) #define stat_dec_inline_xattr(inode) do { } while (0) #define stat_inc_inline_inode(inode) do { } while (0) #define stat_dec_inline_inode(inode) do { } while (0) #define stat_inc_inline_dir(inode) do { } while (0) #define stat_dec_inline_dir(inode) do { } while (0) #define stat_inc_compr_inode(inode) do { } while (0) #define stat_dec_compr_inode(inode) do { } while (0) #define stat_add_compr_blocks(inode, blocks) do { } while (0) #define stat_sub_compr_blocks(inode, blocks) do { } while (0) #define stat_inc_swapfile_inode(inode) do { } while (0) #define stat_dec_swapfile_inode(inode) do { } while (0) #define stat_inc_atomic_inode(inode) do { } while (0) #define stat_dec_atomic_inode(inode) do { } while (0) #define stat_update_max_atomic_write(inode) do { } while (0) #define stat_inc_meta_count(sbi, blkaddr) do { } while (0) #define stat_inc_seg_type(sbi, curseg) do { } while (0) #define stat_inc_block_count(sbi, curseg) do { } while (0) #define stat_inc_inplace_blocks(sbi) do { } while (0) #define stat_inc_gc_call_count(sbi, foreground) do { } while (0) #define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0) #define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0) #define stat_inc_tot_blk_count(si, blks) do { } while (0) #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0) #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0) static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } static inline void __init f2fs_create_root_stats(void) { } static inline void f2fs_destroy_root_stats(void) { } static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {} #endif extern const struct file_operations f2fs_dir_operations; extern const struct file_operations f2fs_file_operations; extern const struct inode_operations f2fs_file_inode_operations; extern const struct address_space_operations f2fs_dblock_aops; extern const struct address_space_operations f2fs_node_aops; extern const struct address_space_operations f2fs_meta_aops; extern const struct inode_operations f2fs_dir_inode_operations; extern const struct inode_operations f2fs_symlink_inode_operations; extern const struct inode_operations f2fs_encrypted_symlink_inode_operations; extern const struct inode_operations f2fs_special_inode_operations; extern struct kmem_cache *f2fs_inode_entry_slab; /* * inline.c */ bool f2fs_may_inline_data(struct inode *inode); bool f2fs_sanity_check_inline_data(struct inode *inode); bool f2fs_may_inline_dentry(struct inode *inode); void f2fs_do_read_inline_data(struct page *page, struct page *ipage); void f2fs_truncate_inline_inode(struct inode *inode, struct page *ipage, u64 from); int f2fs_read_inline_data(struct inode *inode, struct page *page); int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct page *page); int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, struct page **res_page); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, struct inode *inode, nid_t ino, umode_t mode); void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, struct inode *dir, struct inode *inode); bool f2fs_empty_inline_dir(struct inode *dir); int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, struct fscrypt_str *fstr); int f2fs_inline_data_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len); /* * shrinker.c */ unsigned long f2fs_shrink_count(struct shrinker *shrink, struct shrink_control *sc); unsigned long f2fs_shrink_scan(struct shrinker *shrink, struct shrink_control *sc); void f2fs_join_shrinker(struct f2fs_sb_info *sbi); void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); /* * extent_cache.c */ bool sanity_check_extent_cache(struct inode *inode); void f2fs_init_extent_tree(struct inode *inode); void f2fs_drop_extent_tree(struct inode *inode); void f2fs_destroy_extent_node(struct inode *inode); void f2fs_destroy_extent_tree(struct inode *inode); void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi); int __init f2fs_create_extent_cache(void); void f2fs_destroy_extent_cache(void); /* read extent cache ops */ void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage); bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs, struct extent_info *ei); bool f2fs_lookup_read_extent_cache_block(struct inode *inode, pgoff_t index, block_t *blkaddr); void f2fs_update_read_extent_cache(struct dnode_of_data *dn); void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn, pgoff_t fofs, block_t blkaddr, unsigned int len); unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); /* block age extent cache ops */ void f2fs_init_age_extent_tree(struct inode *inode); bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs, struct extent_info *ei); void f2fs_update_age_extent_cache(struct dnode_of_data *dn); void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn, pgoff_t fofs, unsigned int len); unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); /* * sysfs.c */ #define MIN_RA_MUL 2 #define MAX_RA_MUL 256 int __init f2fs_init_sysfs(void); void f2fs_exit_sysfs(void); int f2fs_register_sysfs(struct f2fs_sb_info *sbi); void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi); /* verity.c */ extern const struct fsverity_operations f2fs_verityops; /* * crypto support */ static inline bool f2fs_encrypted_file(struct inode *inode) { return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); } static inline void f2fs_set_encrypted_inode(struct inode *inode) { #ifdef CONFIG_FS_ENCRYPTION file_set_encrypt(inode); f2fs_set_inode_flags(inode); #endif } /* * Returns true if the reads of the inode's data need to undergo some * postprocessing step, like decryption or authenticity verification. */ static inline bool f2fs_post_read_required(struct inode *inode) { return f2fs_encrypted_file(inode) || fsverity_active(inode) || f2fs_compressed_file(inode); } /* * compress.c */ #ifdef CONFIG_F2FS_FS_COMPRESSION bool f2fs_is_compressed_page(struct page *page); struct page *f2fs_compress_control_page(struct page *page); int f2fs_prepare_compress_overwrite(struct inode *inode, struct page **pagep, pgoff_t index, void **fsdata); bool f2fs_compress_write_end(struct inode *inode, void *fsdata, pgoff_t index, unsigned copied); int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock); void f2fs_compress_write_end_io(struct bio *bio, struct page *page); bool f2fs_is_compress_backend_ready(struct inode *inode); bool f2fs_is_compress_level_valid(int alg, int lvl); int __init f2fs_init_compress_mempool(void); void f2fs_destroy_compress_mempool(void); void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task); void f2fs_end_read_compressed_page(struct page *page, bool failed, block_t blkaddr, bool in_task); bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, int index, int nr_pages, bool uptodate); bool f2fs_sanity_check_cluster(struct dnode_of_data *dn); void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); int f2fs_write_multi_pages(struct compress_ctx *cc, int *submitted, struct writeback_control *wbc, enum iostat_type io_type); int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index); void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, pgoff_t fofs, block_t blkaddr, unsigned int llen, unsigned int c_len); int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, unsigned nr_pages, sector_t *last_block_in_bio, bool is_readahead, bool for_write); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed, bool in_task); void f2fs_put_page_dic(struct page *page, bool in_task); unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn); int f2fs_init_compress_ctx(struct compress_ctx *cc); void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse); void f2fs_init_compress_info(struct f2fs_sb_info *sbi); int f2fs_init_compress_inode(struct f2fs_sb_info *sbi); void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi); int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi); void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); int __init f2fs_init_compress_cache(void); void f2fs_destroy_compress_cache(void); struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr); bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, block_t blkaddr); void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino); #define inc_compr_inode_stat(inode) \ do { \ struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ sbi->compr_new_inode++; \ } while (0) #define add_compr_block_stat(inode, blocks) \ do { \ struct f2fs_sb_info *sbi = F2FS_I_SB(inode); \ int diff = F2FS_I(inode)->i_cluster_size - blocks; \ sbi->compr_written_block += blocks; \ sbi->compr_saved_block += diff; \ } while (0) #else static inline bool f2fs_is_compressed_page(struct page *page) { return false; } static inline bool f2fs_is_compress_backend_ready(struct inode *inode) { if (!f2fs_compressed_file(inode)) return true; /* not support compression */ return false; } static inline bool f2fs_is_compress_level_valid(int alg, int lvl) { return false; } static inline struct page *f2fs_compress_control_page(struct page *page) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); } static inline int __init f2fs_init_compress_mempool(void) { return 0; } static inline void f2fs_destroy_compress_mempool(void) { } static inline void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task) { } static inline void f2fs_end_read_compressed_page(struct page *page, bool failed, block_t blkaddr, bool in_task) { WARN_ON_ONCE(1); } static inline void f2fs_put_page_dic(struct page *page, bool in_task) { WARN_ON_ONCE(1); } static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; } static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; } static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { } static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } static inline int __init f2fs_init_compress_cache(void) { return 0; } static inline void f2fs_destroy_compress_cache(void) { } static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr) { } static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr) { } static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, block_t blkaddr) { return false; } static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino) { } #define inc_compr_inode_stat(inode) do { } while (0) static inline void f2fs_update_read_extent_tree_range_compressed( struct inode *inode, pgoff_t fofs, block_t blkaddr, unsigned int llen, unsigned int c_len) { } #endif static inline int set_compress_context(struct inode *inode) { #ifdef CONFIG_F2FS_FS_COMPRESSION struct f2fs_sb_info *sbi = F2FS_I_SB(inode); F2FS_I(inode)->i_compress_algorithm = F2FS_OPTION(sbi).compress_algorithm; F2FS_I(inode)->i_log_cluster_size = F2FS_OPTION(sbi).compress_log_size; F2FS_I(inode)->i_compress_flag = F2FS_OPTION(sbi).compress_chksum ? BIT(COMPRESS_CHKSUM) : 0; F2FS_I(inode)->i_cluster_size = BIT(F2FS_I(inode)->i_log_cluster_size); if ((F2FS_I(inode)->i_compress_algorithm == COMPRESS_LZ4 || F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD) && F2FS_OPTION(sbi).compress_level) F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level; F2FS_I(inode)->i_flags |= F2FS_COMPR_FL; set_inode_flag(inode, FI_COMPRESSED_FILE); stat_inc_compr_inode(inode); inc_compr_inode_stat(inode); f2fs_mark_inode_dirty_sync(inode, true); return 0; #else return -EOPNOTSUPP; #endif } static inline bool f2fs_disable_compressed_file(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); if (!f2fs_compressed_file(inode)) return true; if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode)) return false; fi->i_flags &= ~F2FS_COMPR_FL; stat_dec_compr_inode(inode); clear_inode_flag(inode, FI_COMPRESSED_FILE); f2fs_mark_inode_dirty_sync(inode, true); return true; } #define F2FS_FEATURE_FUNCS(name, flagname) \ static inline bool f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \ { \ return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \ } F2FS_FEATURE_FUNCS(encrypt, ENCRYPT); F2FS_FEATURE_FUNCS(blkzoned, BLKZONED); F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR); F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA); F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM); F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR); F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO); F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME); F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); F2FS_FEATURE_FUNCS(verity, VERITY); F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); F2FS_FEATURE_FUNCS(casefold, CASEFOLD); F2FS_FEATURE_FUNCS(compression, COMPRESSION); F2FS_FEATURE_FUNCS(readonly, RO); #ifdef CONFIG_BLK_DEV_ZONED static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, block_t blkaddr) { unsigned int zno = blkaddr / sbi->blocks_per_blkz; return test_bit(zno, FDEV(devi).blkz_seq); } #endif static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi, struct block_device *bdev) { int i; if (!f2fs_is_multi_device(sbi)) return 0; for (i = 0; i < sbi->s_ndevs; i++) if (FDEV(i).bdev == bdev) return i; WARN_ON(1); return -1; } static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi) { return f2fs_sb_has_blkzoned(sbi); } static inline bool f2fs_bdev_support_discard(struct block_device *bdev) { return bdev_max_discard_sectors(bdev) || bdev_is_zoned(bdev); } static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi) { int i; if (!f2fs_is_multi_device(sbi)) return f2fs_bdev_support_discard(sbi->sb->s_bdev); for (i = 0; i < sbi->s_ndevs; i++) if (f2fs_bdev_support_discard(FDEV(i).bdev)) return true; return false; } static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi) { return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) || f2fs_hw_should_discard(sbi); } static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi) { int i; if (!f2fs_is_multi_device(sbi)) return bdev_read_only(sbi->sb->s_bdev); for (i = 0; i < sbi->s_ndevs; i++) if (bdev_read_only(FDEV(i).bdev)) return true; return false; } static inline bool f2fs_dev_is_readonly(struct f2fs_sb_info *sbi) { return f2fs_sb_has_readonly(sbi) || f2fs_hw_is_readonly(sbi); } static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi) { return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS; } static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi) { return F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW; } static inline bool f2fs_may_compress(struct inode *inode) { if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) || f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) || f2fs_is_mmap_file(inode)) return false; return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode); } static inline void f2fs_i_compr_blocks_update(struct inode *inode, u64 blocks, bool add) { struct f2fs_inode_info *fi = F2FS_I(inode); int diff = fi->i_cluster_size - blocks; /* don't update i_compr_blocks if saved blocks were released */ if (!add && !atomic_read(&fi->i_compr_blocks)) return; if (add) { atomic_add(diff, &fi->i_compr_blocks); stat_add_compr_blocks(inode, diff); } else { atomic_sub(diff, &fi->i_compr_blocks); stat_sub_compr_blocks(inode, diff); } f2fs_mark_inode_dirty_sync(inode, true); } static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi, int flag) { if (!f2fs_is_multi_device(sbi)) return false; if (flag != F2FS_GET_BLOCK_DIO) return false; return sbi->aligned_blksize; } static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx) { return fsverity_active(inode) && idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); } #ifdef CONFIG_F2FS_FAULT_INJECTION extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, unsigned int type); #else #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0) #endif static inline bool is_journalled_quota(struct f2fs_sb_info *sbi) { #ifdef CONFIG_QUOTA if (f2fs_sb_has_quota_ino(sbi)) return true; if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) return true; #endif return false; } static inline bool f2fs_block_unit_discard(struct f2fs_sb_info *sbi) { return F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK; } static inline void f2fs_io_schedule_timeout(long timeout) { set_current_state(TASK_UNINTERRUPTIBLE); io_schedule_timeout(timeout); } static inline void f2fs_handle_page_eio(struct f2fs_sb_info *sbi, pgoff_t ofs, enum page_type type) { if (unlikely(f2fs_cp_error(sbi))) return; if (ofs == sbi->page_eio_ofs[type]) { if (sbi->page_eio_cnt[type]++ == MAX_RETRY_PAGE_EIO) set_ckpt_flags(sbi, CP_ERROR_FLAG); } else { sbi->page_eio_ofs[type] = ofs; sbi->page_eio_cnt[type] = 0; } } static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi) { return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb); } #define EFSBADCRC EBADMSG /* Bad CRC detected */ #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ #endif /* _LINUX_F2FS_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 | // SPDX-License-Identifier: GPL-2.0 /* * udc.c - Core UDC Framework * * Copyright (C) 2016 Intel Corporation * Author: Felipe Balbi <felipe.balbi@linux.intel.com> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM gadget #if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define __UDC_TRACE_H #include <linux/types.h> #include <linux/tracepoint.h> #include <asm/byteorder.h> #include <linux/usb/gadget.h> DECLARE_EVENT_CLASS(udc_log_gadget, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret), TP_STRUCT__entry( __field(enum usb_device_speed, speed) __field(enum usb_device_speed, max_speed) __field(enum usb_device_state, state) __field(unsigned, mA) __field(unsigned, sg_supported) __field(unsigned, is_otg) __field(unsigned, is_a_peripheral) __field(unsigned, b_hnp_enable) __field(unsigned, a_hnp_support) __field(unsigned, hnp_polling_support) __field(unsigned, host_request_flag) __field(unsigned, quirk_ep_out_aligned_size) __field(unsigned, quirk_altset_not_supp) __field(unsigned, quirk_stall_not_supp) __field(unsigned, quirk_zlp_not_supp) __field(unsigned, is_selfpowered) __field(unsigned, deactivated) __field(unsigned, connected) __field(int, ret) ), TP_fast_assign( __entry->speed = g->speed; __entry->max_speed = g->max_speed; __entry->state = g->state; __entry->mA = g->mA; __entry->sg_supported = g->sg_supported; __entry->is_otg = g->is_otg; __entry->is_a_peripheral = g->is_a_peripheral; __entry->b_hnp_enable = g->b_hnp_enable; __entry->a_hnp_support = g->a_hnp_support; __entry->hnp_polling_support = g->hnp_polling_support; __entry->host_request_flag = g->host_request_flag; __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size; __entry->quirk_altset_not_supp = g->quirk_altset_not_supp; __entry->quirk_stall_not_supp = g->quirk_stall_not_supp; __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp; __entry->is_selfpowered = g->is_selfpowered; __entry->deactivated = g->deactivated; __entry->connected = g->connected; __entry->ret = ret; ), TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d", __entry->speed, __entry->max_speed, __entry->state, __entry->mA, __entry->sg_supported ? "sg:" : "", __entry->is_otg ? "OTG:" : "", __entry->is_a_peripheral ? "a_peripheral:" : "", __entry->b_hnp_enable ? "b_hnp:" : "", __entry->a_hnp_support ? "a_hnp:" : "", __entry->hnp_polling_support ? "hnp_poll:" : "", __entry->host_request_flag ? "hostreq:" : "", __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "", __entry->quirk_altset_not_supp ? "no_altset:" : "", __entry->quirk_stall_not_supp ? "no_stall:" : "", __entry->quirk_zlp_not_supp ? "no_zlp" : "", __entry->is_selfpowered ? "self-powered:" : "bus-powered:", __entry->deactivated ? "deactivated:" : "activated:", __entry->connected ? "connected" : "disconnected", __entry->ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_set_remote_wakeup, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_activate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DECLARE_EVENT_CLASS(udc_log_ep, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret), TP_STRUCT__entry( __string(name, ep->name) __field(unsigned, maxpacket) __field(unsigned, maxpacket_limit) __field(unsigned, max_streams) __field(unsigned, mult) __field(unsigned, maxburst) __field(u8, address) __field(bool, claimed) __field(bool, enabled) __field(int, ret) ), TP_fast_assign( __assign_str(name, ep->name); __entry->maxpacket = ep->maxpacket; __entry->maxpacket_limit = ep->maxpacket_limit; __entry->max_streams = ep->max_streams; __entry->mult = ep->mult; __entry->maxburst = ep->maxburst; __entry->address = ep->address, __entry->claimed = ep->claimed; __entry->enabled = ep->enabled; __entry->ret = ret; ), TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d", __get_str(name), __entry->maxpacket, __entry->maxpacket_limit, __entry->max_streams, __entry->mult, __entry->maxburst, __entry->address, __entry->claimed ? "claimed:" : "released:", __entry->enabled ? "enabled" : "disabled", ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_enable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_disable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DECLARE_EVENT_CLASS(udc_log_req, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret), TP_STRUCT__entry( __string(name, ep->name) __field(unsigned, length) __field(unsigned, actual) __field(unsigned, num_sgs) __field(unsigned, num_mapped_sgs) __field(unsigned, stream_id) __field(unsigned, no_interrupt) __field(unsigned, zero) __field(unsigned, short_not_ok) __field(int, status) __field(int, ret) __field(struct usb_request *, req) ), TP_fast_assign( __assign_str(name, ep->name); __entry->length = req->length; __entry->actual = req->actual; __entry->num_sgs = req->num_sgs; __entry->num_mapped_sgs = req->num_mapped_sgs; __entry->stream_id = req->stream_id; __entry->no_interrupt = req->no_interrupt; __entry->zero = req->zero; __entry->short_not_ok = req->short_not_ok; __entry->status = req->status; __entry->ret = ret; __entry->req = req; ), TP_printk("%s: req %p length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d", __get_str(name),__entry->req, __entry->actual, __entry->length, __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id, __entry->zero ? "Z" : "z", __entry->short_not_ok ? "S" : "s", __entry->no_interrupt ? "i" : "I", __entry->status, __entry->ret ) ); DEFINE_EVENT(udc_log_req, usb_ep_alloc_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_free_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_queue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_dequeue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); #endif /* __UDC_TRACE_H */ /* this part has to be here */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> |
15 15 5 1 13 5 13 15 5 2 2 2 13 373 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 | // SPDX-License-Identifier: GPL-2.0-only /* * (C) 2007 Patrick McHardy <kaber@trash.net> */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/gen_stats.h> #include <linux/jhash.h> #include <linux/rtnetlink.h> #include <linux/random.h> #include <linux/slab.h> #include <net/gen_stats.h> #include <net/netlink.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_RATEEST.h> #include <net/netfilter/xt_rateest.h> #define RATEEST_HSIZE 16 struct xt_rateest_net { struct mutex hash_lock; struct hlist_head hash[RATEEST_HSIZE]; }; static unsigned int xt_rateest_id; static unsigned int jhash_rnd __read_mostly; static unsigned int xt_rateest_hash(const char *name) { return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) & (RATEEST_HSIZE - 1); } static void xt_rateest_hash_insert(struct xt_rateest_net *xn, struct xt_rateest *est) { unsigned int h; h = xt_rateest_hash(est->name); hlist_add_head(&est->list, &xn->hash[h]); } static struct xt_rateest *__xt_rateest_lookup(struct xt_rateest_net *xn, const char *name) { struct xt_rateest *est; unsigned int h; h = xt_rateest_hash(name); hlist_for_each_entry(est, &xn->hash[h], list) { if (strcmp(est->name, name) == 0) { est->refcnt++; return est; } } return NULL; } struct xt_rateest *xt_rateest_lookup(struct net *net, const char *name) { struct xt_rateest_net *xn = net_generic(net, xt_rateest_id); struct xt_rateest *est; mutex_lock(&xn->hash_lock); est = __xt_rateest_lookup(xn, name); mutex_unlock(&xn->hash_lock); return est; } EXPORT_SYMBOL_GPL(xt_rateest_lookup); void xt_rateest_put(struct net *net, struct xt_rateest *est) { struct xt_rateest_net *xn = net_generic(net, xt_rateest_id); mutex_lock(&xn->hash_lock); if (--est->refcnt == 0) { hlist_del(&est->list); gen_kill_estimator(&est->rate_est); /* * gen_estimator est_timer() might access est->lock or bstats, * wait a RCU grace period before freeing 'est' */ kfree_rcu(est, rcu); } mutex_unlock(&xn->hash_lock); } EXPORT_SYMBOL_GPL(xt_rateest_put); static unsigned int xt_rateest_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_rateest_target_info *info = par->targinfo; struct gnet_stats_basic_sync *stats = &info->est->bstats; spin_lock_bh(&info->est->lock); u64_stats_add(&stats->bytes, skb->len); u64_stats_inc(&stats->packets); spin_unlock_bh(&info->est->lock); return XT_CONTINUE; } static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par) { struct xt_rateest_net *xn = net_generic(par->net, xt_rateest_id); struct xt_rateest_target_info *info = par->targinfo; struct xt_rateest *est; struct { struct nlattr opt; struct gnet_estimator est; } cfg; int ret; if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name)) return -ENAMETOOLONG; net_get_random_once(&jhash_rnd, sizeof(jhash_rnd)); mutex_lock(&xn->hash_lock); est = __xt_rateest_lookup(xn, info->name); if (est) { mutex_unlock(&xn->hash_lock); /* * If estimator parameters are specified, they must match the * existing estimator. */ if ((!info->interval && !info->ewma_log) || (info->interval != est->params.interval || info->ewma_log != est->params.ewma_log)) { xt_rateest_put(par->net, est); return -EINVAL; } info->est = est; return 0; } ret = -ENOMEM; est = kzalloc(sizeof(*est), GFP_KERNEL); if (!est) goto err1; gnet_stats_basic_sync_init(&est->bstats); strscpy(est->name, info->name, sizeof(est->name)); spin_lock_init(&est->lock); est->refcnt = 1; est->params.interval = info->interval; est->params.ewma_log = info->ewma_log; cfg.opt.nla_len = nla_attr_size(sizeof(cfg.est)); cfg.opt.nla_type = TCA_STATS_RATE_EST; cfg.est.interval = info->interval; cfg.est.ewma_log = info->ewma_log; ret = gen_new_estimator(&est->bstats, NULL, &est->rate_est, &est->lock, NULL, &cfg.opt); if (ret < 0) goto err2; info->est = est; xt_rateest_hash_insert(xn, est); mutex_unlock(&xn->hash_lock); return 0; err2: kfree(est); err1: mutex_unlock(&xn->hash_lock); return ret; } static void xt_rateest_tg_destroy(const struct xt_tgdtor_param *par) { struct xt_rateest_target_info *info = par->targinfo; xt_rateest_put(par->net, info->est); } static struct xt_target xt_rateest_tg_reg __read_mostly = { .name = "RATEEST", .revision = 0, .family = NFPROTO_UNSPEC, .target = xt_rateest_tg, .checkentry = xt_rateest_tg_checkentry, .destroy = xt_rateest_tg_destroy, .targetsize = sizeof(struct xt_rateest_target_info), .usersize = offsetof(struct xt_rateest_target_info, est), .me = THIS_MODULE, }; static __net_init int xt_rateest_net_init(struct net *net) { struct xt_rateest_net *xn = net_generic(net, xt_rateest_id); int i; mutex_init(&xn->hash_lock); for (i = 0; i < ARRAY_SIZE(xn->hash); i++) INIT_HLIST_HEAD(&xn->hash[i]); return 0; } static struct pernet_operations xt_rateest_net_ops = { .init = xt_rateest_net_init, .id = &xt_rateest_id, .size = sizeof(struct xt_rateest_net), }; static int __init xt_rateest_tg_init(void) { int err = register_pernet_subsys(&xt_rateest_net_ops); if (err) return err; return xt_register_target(&xt_rateest_tg_reg); } static void __exit xt_rateest_tg_fini(void) { xt_unregister_target(&xt_rateest_tg_reg); unregister_pernet_subsys(&xt_rateest_net_ops); } MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: packet rate estimator"); MODULE_ALIAS("ipt_RATEEST"); MODULE_ALIAS("ip6t_RATEEST"); module_init(xt_rateest_tg_init); module_exit(xt_rateest_tg_fini); |
3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 | // SPDX-License-Identifier: GPL-2.0-or-later /* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, * randomly fail to work with new releases, misbehave and/or generally * screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * History * X.25 001 Jonathan Naylor Started coding. * X.25 002 Jonathan Naylor New timer architecture. * mar/20/00 Daniela Squassoni Disabling/enabling of facilities * negotiation. * 2000-09-04 Henner Eisen dev_hold() / dev_put() for x25_neigh. */ #define pr_fmt(fmt) "X25: " fmt #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/uaccess.h> #include <linux/init.h> #include <net/x25.h> LIST_HEAD(x25_neigh_list); DEFINE_RWLOCK(x25_neigh_list_lock); static void x25_t20timer_expiry(struct timer_list *); static void x25_transmit_restart_confirmation(struct x25_neigh *nb); static void x25_transmit_restart_request(struct x25_neigh *nb); /* * Linux set/reset timer routines */ static inline void x25_start_t20timer(struct x25_neigh *nb) { mod_timer(&nb->t20timer, jiffies + nb->t20); } static void x25_t20timer_expiry(struct timer_list *t) { struct x25_neigh *nb = from_timer(nb, t, t20timer); x25_transmit_restart_request(nb); x25_start_t20timer(nb); } static inline void x25_stop_t20timer(struct x25_neigh *nb) { del_timer(&nb->t20timer); } /* * This handles all restart and diagnostic frames. */ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, unsigned short frametype) { struct sk_buff *skbn; switch (frametype) { case X25_RESTART_REQUEST: switch (nb->state) { case X25_LINK_STATE_0: /* This can happen when the x25 module just gets loaded * and doesn't know layer 2 has already connected */ nb->state = X25_LINK_STATE_3; x25_transmit_restart_confirmation(nb); break; case X25_LINK_STATE_2: x25_stop_t20timer(nb); nb->state = X25_LINK_STATE_3; break; case X25_LINK_STATE_3: /* clear existing virtual calls */ x25_kill_by_neigh(nb); x25_transmit_restart_confirmation(nb); break; } break; case X25_RESTART_CONFIRMATION: switch (nb->state) { case X25_LINK_STATE_2: x25_stop_t20timer(nb); nb->state = X25_LINK_STATE_3; break; case X25_LINK_STATE_3: /* clear existing virtual calls */ x25_kill_by_neigh(nb); x25_transmit_restart_request(nb); nb->state = X25_LINK_STATE_2; x25_start_t20timer(nb); break; } break; case X25_DIAGNOSTIC: if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4)) break; pr_warn("diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]); break; default: pr_warn("received unknown %02X with LCI 000\n", frametype); break; } if (nb->state == X25_LINK_STATE_3) while ((skbn = skb_dequeue(&nb->queue)) != NULL) x25_send_frame(skbn, nb); } /* * This routine is called when a Restart Request is needed */ static void x25_transmit_restart_request(struct x25_neigh *nb) { unsigned char *dptr; int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; skb_reserve(skb, X25_MAX_L2_LEN); dptr = skb_put(skb, X25_STD_MIN_LEN + 2); *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ; *dptr++ = 0x00; *dptr++ = X25_RESTART_REQUEST; *dptr++ = 0x00; *dptr++ = 0; skb->sk = NULL; x25_send_frame(skb, nb); } /* * This routine is called when a Restart Confirmation is needed */ static void x25_transmit_restart_confirmation(struct x25_neigh *nb) { unsigned char *dptr; int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; skb_reserve(skb, X25_MAX_L2_LEN); dptr = skb_put(skb, X25_STD_MIN_LEN); *dptr++ = nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ; *dptr++ = 0x00; *dptr++ = X25_RESTART_CONFIRMATION; skb->sk = NULL; x25_send_frame(skb, nb); } /* * This routine is called when a Clear Request is needed outside of the context * of a connected socket. */ void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci, unsigned char cause) { unsigned char *dptr; int len = X25_MAX_L2_LEN + X25_STD_MIN_LEN + 2; struct sk_buff *skb = alloc_skb(len, GFP_ATOMIC); if (!skb) return; skb_reserve(skb, X25_MAX_L2_LEN); dptr = skb_put(skb, X25_STD_MIN_LEN + 2); *dptr++ = ((lci >> 8) & 0x0F) | (nb->extended ? X25_GFI_EXTSEQ : X25_GFI_STDSEQ); *dptr++ = (lci >> 0) & 0xFF; *dptr++ = X25_CLEAR_REQUEST; *dptr++ = cause; *dptr++ = 0x00; skb->sk = NULL; x25_send_frame(skb, nb); } void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb) { switch (nb->state) { case X25_LINK_STATE_0: skb_queue_tail(&nb->queue, skb); nb->state = X25_LINK_STATE_1; x25_establish_link(nb); break; case X25_LINK_STATE_1: case X25_LINK_STATE_2: skb_queue_tail(&nb->queue, skb); break; case X25_LINK_STATE_3: x25_send_frame(skb, nb); break; } } /* * Called when the link layer has become established. */ void x25_link_established(struct x25_neigh *nb) { switch (nb->state) { case X25_LINK_STATE_0: case X25_LINK_STATE_1: x25_transmit_restart_request(nb); nb->state = X25_LINK_STATE_2; x25_start_t20timer(nb); break; } } /* * Called when the link layer has terminated, or an establishment * request has failed. */ void x25_link_terminated(struct x25_neigh *nb) { nb->state = X25_LINK_STATE_0; skb_queue_purge(&nb->queue); x25_stop_t20timer(nb); /* Out of order: clear existing virtual calls (X.25 03/93 4.6.3) */ x25_kill_by_neigh(nb); } /* * Add a new device. */ void x25_link_device_up(struct net_device *dev) { struct x25_neigh *nb = kmalloc(sizeof(*nb), GFP_ATOMIC); if (!nb) return; skb_queue_head_init(&nb->queue); timer_setup(&nb->t20timer, x25_t20timer_expiry, 0); dev_hold(dev); nb->dev = dev; nb->state = X25_LINK_STATE_0; nb->extended = 0; /* * Enables negotiation */ nb->global_facil_mask = X25_MASK_REVERSE | X25_MASK_THROUGHPUT | X25_MASK_PACKET_SIZE | X25_MASK_WINDOW_SIZE; nb->t20 = sysctl_x25_restart_request_timeout; refcount_set(&nb->refcnt, 1); write_lock_bh(&x25_neigh_list_lock); list_add(&nb->node, &x25_neigh_list); write_unlock_bh(&x25_neigh_list_lock); } /** * __x25_remove_neigh - remove neighbour from x25_neigh_list * @nb: - neigh to remove * * Remove neighbour from x25_neigh_list. If it was there. * Caller must hold x25_neigh_list_lock. */ static void __x25_remove_neigh(struct x25_neigh *nb) { if (nb->node.next) { list_del(&nb->node); x25_neigh_put(nb); } } /* * A device has been removed, remove its links. */ void x25_link_device_down(struct net_device *dev) { struct x25_neigh *nb; struct list_head *entry, *tmp; write_lock_bh(&x25_neigh_list_lock); list_for_each_safe(entry, tmp, &x25_neigh_list) { nb = list_entry(entry, struct x25_neigh, node); if (nb->dev == dev) { __x25_remove_neigh(nb); dev_put(dev); } } write_unlock_bh(&x25_neigh_list_lock); } /* * Given a device, return the neighbour address. */ struct x25_neigh *x25_get_neigh(struct net_device *dev) { struct x25_neigh *nb, *use = NULL; read_lock_bh(&x25_neigh_list_lock); list_for_each_entry(nb, &x25_neigh_list, node) { if (nb->dev == dev) { use = nb; break; } } if (use) x25_neigh_hold(use); read_unlock_bh(&x25_neigh_list_lock); return use; } /* * Handle the ioctls that control the subscription functions. */ int x25_subscr_ioctl(unsigned int cmd, void __user *arg) { struct x25_subscrip_struct x25_subscr; struct x25_neigh *nb; struct net_device *dev; int rc = -EINVAL; if (cmd != SIOCX25GSUBSCRIP && cmd != SIOCX25SSUBSCRIP) goto out; rc = -EFAULT; if (copy_from_user(&x25_subscr, arg, sizeof(x25_subscr))) goto out; rc = -EINVAL; if ((dev = x25_dev_get(x25_subscr.device)) == NULL) goto out; if ((nb = x25_get_neigh(dev)) == NULL) goto out_dev_put; dev_put(dev); if (cmd == SIOCX25GSUBSCRIP) { read_lock_bh(&x25_neigh_list_lock); x25_subscr.extended = nb->extended; x25_subscr.global_facil_mask = nb->global_facil_mask; read_unlock_bh(&x25_neigh_list_lock); rc = copy_to_user(arg, &x25_subscr, sizeof(x25_subscr)) ? -EFAULT : 0; } else { rc = -EINVAL; if (!(x25_subscr.extended && x25_subscr.extended != 1)) { rc = 0; write_lock_bh(&x25_neigh_list_lock); nb->extended = x25_subscr.extended; nb->global_facil_mask = x25_subscr.global_facil_mask; write_unlock_bh(&x25_neigh_list_lock); } } x25_neigh_put(nb); out: return rc; out_dev_put: dev_put(dev); goto out; } /* * Release all memory associated with X.25 neighbour structures. */ void __exit x25_link_free(void) { struct x25_neigh *nb; struct list_head *entry, *tmp; write_lock_bh(&x25_neigh_list_lock); list_for_each_safe(entry, tmp, &x25_neigh_list) { struct net_device *dev; nb = list_entry(entry, struct x25_neigh, node); dev = nb->dev; __x25_remove_neigh(nb); dev_put(dev); } write_unlock_bh(&x25_neigh_list_lock); } |
18938 1 1 19 19 19 19 19 19 3915 3902 15 3916 3907 3917 4617 4619 832 183 4616 4617 19 19 20 4613 1 1376 1376 4614 4616 4175 4146 4177 999 4657 4671 4668 4147 4506 3464 4607 20 3558 2222 3694 859 3842 1022 3874 4605 4613 988 2912 4608 3704 3578 4592 2 20 905 4599 20 4607 20 4607 4607 1717 131 1708 1707 3915 3916 5 3917 1719 1719 1717 1719 1718 4604 4607 4599 1714 1715 3918 3 1 3 3914 90 3904 473 3887 9 9 42 42 40 20 4439 4575 53 4579 199 190 20 209 4581 4065 3903 4582 20 4577 4590 13 4586 20 44 3851 3636 3547 43 42 10 2 4 8 10 1 1 1 1 1 1 1 1 1 1 7 7 7 7 7 7 5 7 7 7 5 5 7 5 7 7 5 7 2 7 7 7 5 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1993 Linus Torvalds * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 * Numa awareness, Christoph Lameter, SGI, June 2005 * Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019 */ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/highmem.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/set_memory.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> #include <linux/notifier.h> #include <linux/rbtree.h> #include <linux/xarray.h> #include <linux/io.h> #include <linux/rcupdate.h> #include <linux/pfn.h> #include <linux/kmemleak.h> #include <linux/atomic.h> #include <linux/compiler.h> #include <linux/memcontrol.h> #include <linux/llist.h> #include <linux/uio.h> #include <linux/bitops.h> #include <linux/rbtree_augmented.h> #include <linux/overflow.h> #include <linux/pgtable.h> #include <linux/hugetlb.h> #include <linux/sched/mm.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> #define CREATE_TRACE_POINTS #include <trace/events/vmalloc.h> #include "internal.h" #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1; static int __init set_nohugeiomap(char *str) { ioremap_max_page_shift = PAGE_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static const unsigned int ioremap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC static bool __ro_after_init vmap_allow_huge = true; static int __init set_nohugevmalloc(char *str) { vmap_allow_huge = false; return 0; } early_param("nohugevmalloc", set_nohugevmalloc); #else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ static const bool vmap_allow_huge = false; #endif /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */ bool is_vmalloc_addr(const void *x) { unsigned long addr = (unsigned long)kasan_reset_tag(x); return addr >= VMALLOC_START && addr < VMALLOC_END; } EXPORT_SYMBOL(is_vmalloc_addr); struct vfree_deferred { struct llist_head list; struct work_struct wq; }; static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred); /*** Page table manipulation functions ***/ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pte_t *pte; u64 pfn; unsigned long size = PAGE_SIZE; pfn = phys_addr >> PAGE_SHIFT; pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { BUG_ON(!pte_none(ptep_get(pte))); #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); if (size != PAGE_SIZE) { pte_t entry = pfn_pte(pfn, prot); entry = arch_make_huge_pte(entry, ilog2(size), 0); set_huge_pte_at(&init_mm, addr, pte, entry, size); pfn += PFN_DOWN(size); continue; } #endif set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); pfn++; } while (pte += PFN_DOWN(size), addr += size, addr != end); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PMD_SHIFT) return 0; if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) return 0; if (!IS_ALIGNED(addr, PMD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PMD_SIZE)) return 0; if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) return 0; return pmd_set_huge(pmd, phys_addr, prot); } static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < PUD_SHIFT) return 0; if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) return 0; if (!IS_ALIGNED(addr, PUD_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, PUD_SIZE)) return 0; if (pud_present(*pud) && !pud_free_pmd_page(pud, addr)) return 0; return pud_set_huge(pud, phys_addr, prot); } static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } if (vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { if (max_page_shift < P4D_SHIFT) return 0; if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) return 0; if (!IS_ALIGNED(addr, P4D_SIZE)) return 0; if (!IS_ALIGNED(phys_addr, P4D_SIZE)) return 0; if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) return 0; return p4d_set_huge(p4d, phys_addr, prot); } static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } if (vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range_noflush(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; unsigned long next; int err; pgtbl_mod_mask mask = 0; might_sleep(); BUG_ON(addr >= end); start = addr; pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return err; } int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { int err; err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot), ioremap_max_page_shift); flush_cache_vmap(addr, end); if (!err) err = kmsan_ioremap_page_range(addr, end, phys_addr, prot, ioremap_max_page_shift); return err; } static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; } static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; int cleared; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); cleared = pmd_clear_huge(pmd); if (cleared || pmd_bad(*pmd)) *mask |= PGTBL_PMD_MODIFIED; if (cleared) continue; if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next, mask); cond_resched(); } while (pmd++, addr = next, addr != end); } static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; int cleared; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); cleared = pud_clear_huge(pud); if (cleared || pud_bad(*pud)) *mask |= PGTBL_PUD_MODIFIED; if (cleared) continue; if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next, mask); } while (pud++, addr = next, addr != end); } static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); p4d_clear_huge(p4d); if (p4d_bad(*p4d)) *mask |= PGTBL_P4D_MODIFIED; if (p4d_none_or_clear_bad(p4d)) continue; vunmap_pud_range(p4d, addr, next, mask); } while (p4d++, addr = next, addr != end); } /* * vunmap_range_noflush is similar to vunmap_range, but does not * flush caches or TLBs. * * The caller is responsible for calling flush_cache_vmap() before calling * this function, and flush_tlb_kernel_range after it has returned * successfully (and before the addresses are expected to cause a page fault * or be re-mapped for something else, if TLB flushes are being delayed or * coalesced). * * This is an internal function only. Do not use outside mm/. */ void __vunmap_range_noflush(unsigned long start, unsigned long end) { unsigned long next; pgd_t *pgd; unsigned long addr = start; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; if (pgd_none_or_clear_bad(pgd)) continue; vunmap_p4d_range(pgd, addr, next, &mask); } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); } void vunmap_range_noflush(unsigned long start, unsigned long end) { kmsan_vunmap_range_noflush(start, end); __vunmap_range_noflush(start, end); } /** * vunmap_range - unmap kernel virtual addresses * @addr: start of the VM area to unmap * @end: end of the VM area to unmap (non-inclusive) * * Clears any present PTEs in the virtual address range, flushes TLBs and * caches. Any subsequent access to the address before it has been re-mapped * is a kernel bug. */ void vunmap_range(unsigned long addr, unsigned long end) { flush_cache_vunmap(addr, end); vunmap_range_noflush(addr, end); flush_tlb_kernel_range(addr, end); } static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel_track(pmd, addr, mask); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(ptep_get(pte)))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; if (WARN_ON(!pfn_valid(page_to_pfn(page)))) return -EINVAL; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); *mask |= PGTBL_PTE_MODIFIED; return 0; } static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc_track(&init_mm, pud, addr, mask); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; } static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; pud = pud_alloc_track(&init_mm, p4d, addr, mask); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; } static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); if (!p4d) return -ENOMEM; do { next = p4d_addr_end(addr, end); if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask)) return -ENOMEM; } while (p4d++, addr = next, addr != end); return 0; } static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages) { unsigned long start = addr; pgd_t *pgd; unsigned long next; int err = 0; int nr = 0; pgtbl_mod_mask mask = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_bad(*pgd)) mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) return err; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); return 0; } /* * vmap_pages_range_noflush is similar to vmap_pages_range, but does not * flush caches. * * The caller is responsible for calling flush_cache_vmap() after this * function returns successfully and before the addresses are accessed. * * This is an internal function only. Do not use outside mm/. */ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { unsigned int i, nr = (end - addr) >> PAGE_SHIFT; WARN_ON(page_shift < PAGE_SHIFT); if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || page_shift == PAGE_SHIFT) return vmap_small_pages_range_noflush(addr, end, prot, pages); for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { int err; err = vmap_range_noflush(addr, addr + (1UL << page_shift), page_to_phys(pages[i]), prot, page_shift); if (err) return err; addr += 1UL << page_shift; } return 0; } int vmap_pages_range_noflush(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int ret = kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift); if (ret) return ret; return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift); } /** * vmap_pages_range - map pages to a kernel virtual address * @addr: start of the VM area to map * @end: end of the VM area to map (non-inclusive) * @prot: page protection flags to use * @pages: pages to map (always PAGE_SIZE pages) * @page_shift: maximum shift that the pages may be mapped with, @pages must * be aligned and contiguous up to at least this shift. * * RETURNS: * 0 on success, -errno on failure. */ static int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, unsigned int page_shift) { int err; err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift); flush_cache_vmap(addr, end); return err; } int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)kasan_reset_tag(x); if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); } EXPORT_SYMBOL_GPL(is_vmalloc_or_module_addr); /* * Walk a vmap address to the struct page it maps. Huge vmap mappings will * return the tail page that corresponds to the base page address, which * matches small vmap mappings. */ struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *ptep, pte; /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (pgd_none(*pgd)) return NULL; if (WARN_ON_ONCE(pgd_leaf(*pgd))) return NULL; /* XXX: no allowance for huge pgd */ if (WARN_ON_ONCE(pgd_bad(*pgd))) return NULL; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return NULL; if (p4d_leaf(*p4d)) return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(p4d_bad(*p4d))) return NULL; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return NULL; if (pud_leaf(*pud)) return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pud_bad(*pud))) return NULL; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return NULL; if (pmd_leaf(*pmd)) return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (WARN_ON_ONCE(pmd_bad(*pmd))) return NULL; ptep = pte_offset_kernel(pmd, addr); pte = ptep_get(ptep); if (pte_present(pte)) page = pte_page(pte); return page; } EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */ unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); } EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 static DEFINE_SPINLOCK(vmap_area_lock); static DEFINE_SPINLOCK(free_vmap_area_lock); /* Export for kexec only */ LIST_HEAD(vmap_area_list); static struct rb_root vmap_area_root = RB_ROOT; static bool vmap_initialized __read_mostly; static struct rb_root purge_vmap_area_root = RB_ROOT; static LIST_HEAD(purge_vmap_area_list); static DEFINE_SPINLOCK(purge_vmap_area_lock); /* * This kmem_cache is used for vmap_area objects. Instead of * allocating from slab we reuse an object from this cache to * make things faster. Especially in "no edge" splitting of * free block. */ static struct kmem_cache *vmap_area_cachep; /* * This linked list is used in pair with free_vmap_area_root. * It gives O(1) access to prev/next to perform fast coalescing. */ static LIST_HEAD(free_vmap_area_list); /* * This augment red-black tree represents the free vmap space. * All vmap_area objects in this tree are sorted by va->va_start * address. It is used for allocation and merging when a vmap * object is released. * * Each vmap_area node contains a maximum available free block * of its sub-tree, right or left. Therefore it is possible to * find a lowest match of free area. */ static struct rb_root free_vmap_area_root = RB_ROOT; /* * Preload a CPU with one object for "no edge" split case. The * aim is to get rid of allocations from the atomic context, thus * to use more permissive allocation masks. */ static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); static __always_inline unsigned long va_size(struct vmap_area *va) { return (va->va_end - va->va_start); } static __always_inline unsigned long get_subtree_max_size(struct rb_node *node) { struct vmap_area *va; va = rb_entry_safe(node, struct vmap_area, rb_node); return va ? va->subtree_max_size : 0; } RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) static void reclaim_and_purge_vmap_areas(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); static void drain_vmap_area_work(struct work_struct *work); static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); static atomic_long_t nr_vmalloc_pages; unsigned long vmalloc_nr_pages(void) { return atomic_long_read(&nr_vmalloc_pages); } /* Look up the first VA which satisfies addr < va_end, NULL if none. */ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) { struct vmap_area *va = NULL; struct rb_node *n = vmap_area_root.rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end > addr) { va = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } return va; } static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root) { struct rb_node *n = root->rb_node; addr = (unsigned long)kasan_reset_tag((void *)addr); while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr >= va->va_end) n = n->rb_right; else return va; } return NULL; } /* * This function returns back addresses of parent node * and its left or right link for further processing. * * Otherwise NULL is returned. In that case all further * steps regarding inserting of conflicting overlap range * have to be declined and actually considered as a bug. */ static __always_inline struct rb_node ** find_va_links(struct vmap_area *va, struct rb_root *root, struct rb_node *from, struct rb_node **parent) { struct vmap_area *tmp_va; struct rb_node **link; if (root) { link = &root->rb_node; if (unlikely(!*link)) { *parent = NULL; return link; } } else { link = &from; } /* * Go to the bottom of the tree. When we hit the last point * we end up with parent rb_node and correct direction, i name * it link, where the new va->rb_node will be attached to. */ do { tmp_va = rb_entry(*link, struct vmap_area, rb_node); /* * During the traversal we also do some sanity check. * Trigger the BUG() if there are sides(left/right) * or full overlaps. */ if (va->va_end <= tmp_va->va_start) link = &(*link)->rb_left; else if (va->va_start >= tmp_va->va_end) link = &(*link)->rb_right; else { WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); return NULL; } } while (*link); *parent = &tmp_va->rb_node; return link; } static __always_inline struct list_head * get_va_next_sibling(struct rb_node *parent, struct rb_node **link) { struct list_head *list; if (unlikely(!parent)) /* * The red-black tree where we try to find VA neighbors * before merging or inserting is empty, i.e. it means * there is no free vmap space. Normally it does not * happen but we handle this case anyway. */ return NULL; list = &rb_entry(parent, struct vmap_area, rb_node)->list; return (&parent->rb_right == link ? list->next : list); } static __always_inline void __link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head, bool augment) { /* * VA is still not in the list, but we can * identify its future previous list_head node. */ if (likely(parent)) { head = &rb_entry(parent, struct vmap_area, rb_node)->list; if (&parent->rb_right != link) head = head->prev; } /* Insert to the rb-tree */ rb_link_node(&va->rb_node, parent, link); if (augment) { /* * Some explanation here. Just perform simple insertion * to the tree. We do not set va->subtree_max_size to * its current size before calling rb_insert_augmented(). * It is because we populate the tree from the bottom * to parent levels when the node _is_ in the tree. * * Therefore we set subtree_max_size to zero after insertion, * to let __augment_tree_propagate_from() puts everything to * the correct order later on. */ rb_insert_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); va->subtree_max_size = 0; } else { rb_insert_color(&va->rb_node, root); } /* Address-sort this list */ list_add(&va->list, head); } static __always_inline void link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, false); } static __always_inline void link_va_augment(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { __link_va(va, root, parent, link, head, true); } static __always_inline void __unlink_va(struct vmap_area *va, struct rb_root *root, bool augment) { if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) return; if (augment) rb_erase_augmented(&va->rb_node, root, &free_vmap_area_rb_augment_cb); else rb_erase(&va->rb_node, root); list_del_init(&va->list); RB_CLEAR_NODE(&va->rb_node); } static __always_inline void unlink_va(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, false); } static __always_inline void unlink_va_augment(struct vmap_area *va, struct rb_root *root) { __unlink_va(va, root, true); } #if DEBUG_AUGMENT_PROPAGATE_CHECK /* * Gets called when remove the node and rotate. */ static __always_inline unsigned long compute_subtree_max_size(struct vmap_area *va) { return max3(va_size(va), get_subtree_max_size(va->rb_node.rb_left), get_subtree_max_size(va->rb_node.rb_right)); } static void augment_tree_propagate_check(void) { struct vmap_area *va; unsigned long computed_size; list_for_each_entry(va, &free_vmap_area_list, list) { computed_size = compute_subtree_max_size(va); if (computed_size != va->subtree_max_size) pr_emerg("tree is corrupted: %lu, %lu\n", va_size(va), va->subtree_max_size); } } #endif /* * This function populates subtree_max_size from bottom to upper * levels starting from VA point. The propagation must be done * when VA size is modified by changing its va_start/va_end. Or * in case of newly inserting of VA to the tree. * * It means that __augment_tree_propagate_from() must be called: * - After VA has been inserted to the tree(free path); * - After VA has been shrunk(allocation path); * - After VA has been increased(merging path). * * Please note that, it does not mean that upper parent nodes * and their subtree_max_size are recalculated all the time up * to the root node. * * 4--8 * /\ * / \ * / \ * 2--2 8--8 * * For example if we modify the node 4, shrinking it to 2, then * no any modification is required. If we shrink the node 2 to 1 * its subtree_max_size is updated only, and set to 1. If we shrink * the node 8 to 6, then its subtree_max_size is set to 6 and parent * node becomes 4--6. */ static __always_inline void augment_tree_propagate_from(struct vmap_area *va) { /* * Populate the tree from bottom towards the root until * the calculated maximum available size of checked node * is equal to its current one. */ free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); #if DEBUG_AUGMENT_PROPAGATE_CHECK augment_tree_propagate_check(); #endif } static void insert_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; link = find_va_links(va, root, NULL, &parent); if (link) link_va(va, root, parent, link, head); } static void insert_vmap_area_augment(struct vmap_area *va, struct rb_node *from, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; if (from) link = find_va_links(va, NULL, from, &parent); else link = find_va_links(va, root, NULL, &parent); if (link) { link_va_augment(va, root, parent, link, head); augment_tree_propagate_from(va); } } /* * Merge de-allocated chunk of VA memory with previous * and next free blocks. If coalesce is not done a new * free area is inserted. If VA has been merged, it is * freed. * * Please note, it can return NULL in case of overlap * ranges, followed by WARN() report. Despite it is a * buggy behaviour, a system can be alive and keep * ongoing. */ static __always_inline struct vmap_area * __merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head, bool augment) { struct vmap_area *sibling; struct list_head *next; struct rb_node **link; struct rb_node *parent; bool merged = false; /* * Find a place in the tree where VA potentially will be * inserted, unless it is merged with its sibling/siblings. */ link = find_va_links(va, root, NULL, &parent); if (!link) return NULL; /* * Get next node of VA to check if merging can be done. */ next = get_va_next_sibling(parent, link); if (unlikely(next == NULL)) goto insert; /* * start end * | | * |<------VA------>|<-----Next----->| * | | * start end */ if (next != head) { sibling = list_entry(next, struct vmap_area, list); if (sibling->va_start == va->va_end) { sibling->va_start = va->va_start; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } /* * start end * | | * |<-----Prev----->|<------VA------>| * | | * start end */ if (next->prev != head) { sibling = list_entry(next->prev, struct vmap_area, list); if (sibling->va_end == va->va_start) { /* * If both neighbors are coalesced, it is important * to unlink the "next" node first, followed by merging * with "previous" one. Otherwise the tree might not be * fully populated if a sibling's augmented value is * "normalized" because of rotation operations. */ if (merged) __unlink_va(va, root, augment); sibling->va_end = va->va_end; /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); /* Point to the new merged area. */ va = sibling; merged = true; } } insert: if (!merged) __link_va(va, root, parent, link, head, augment); return va; } static __always_inline struct vmap_area * merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { return __merge_or_add_vmap_area(va, root, head, false); } static __always_inline struct vmap_area * merge_or_add_vmap_area_augment(struct vmap_area *va, struct rb_root *root, struct list_head *head) { va = __merge_or_add_vmap_area(va, root, head, true); if (va) augment_tree_propagate_from(va); return va; } static __always_inline bool is_within_this_va(struct vmap_area *va, unsigned long size, unsigned long align, unsigned long vstart) { unsigned long nva_start_addr; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Can be overflowed due to big size or alignment. */ if (nva_start_addr + size < nva_start_addr || nva_start_addr < vstart) return false; return (nva_start_addr + size <= va->va_end); } /* * Find the first free block(lowest start address) in the tree, * that will accomplish the request corresponding to passing * parameters. Please note, with an alignment bigger than PAGE_SIZE, * a search length is adjusted to account for worst case alignment * overhead. */ static __always_inline struct vmap_area * find_vmap_lowest_match(struct rb_root *root, unsigned long size, unsigned long align, unsigned long vstart, bool adjust_search_size) { struct vmap_area *va; struct rb_node *node; unsigned long length; /* Start from the root. */ node = root->rb_node; /* Adjust the search size for alignment overhead. */ length = adjust_search_size ? size + align - 1 : size; while (node) { va = rb_entry(node, struct vmap_area, rb_node); if (get_subtree_max_size(node->rb_left) >= length && vstart < va->va_start) { node = node->rb_left; } else { if (is_within_this_va(va, size, align, vstart)) return va; /* * Does not make sense to go deeper towards the right * sub-tree if it does not have a free block that is * equal or bigger to the requested search length. */ if (get_subtree_max_size(node->rb_right) >= length) { node = node->rb_right; continue; } /* * OK. We roll back and find the first right sub-tree, * that will satisfy the search criteria. It can happen * due to "vstart" restriction or an alignment overhead * that is bigger then PAGE_SIZE. */ while ((node = rb_parent(node))) { va = rb_entry(node, struct vmap_area, rb_node); if (is_within_this_va(va, size, align, vstart)) return va; if (get_subtree_max_size(node->rb_right) >= length && vstart <= va->va_start) { /* * Shift the vstart forward. Please note, we update it with * parent's start address adding "1" because we do not want * to enter same sub-tree after it has already been checked * and no suitable free block found there. */ vstart = va->va_start + 1; node = node->rb_right; break; } } } } return NULL; } #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK #include <linux/random.h> static struct vmap_area * find_vmap_lowest_linear_match(struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart) { struct vmap_area *va; list_for_each_entry(va, head, list) { if (!is_within_this_va(va, size, align, vstart)) continue; return va; } return NULL; } static void find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align) { struct vmap_area *va_1, *va_2; unsigned long vstart; unsigned int rnd; get_random_bytes(&rnd, sizeof(rnd)); vstart = VMALLOC_START + rnd; va_1 = find_vmap_lowest_match(root, size, align, vstart, false); va_2 = find_vmap_lowest_linear_match(head, size, align, vstart); if (va_1 != va_2) pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n", va_1, va_2, vstart); } #endif enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, /* full fit */ LE_FIT_TYPE = 2, /* left edge fit */ RE_FIT_TYPE = 3, /* right edge fit */ NE_FIT_TYPE = 4 /* no edge fit */ }; static __always_inline enum fit_type classify_va_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { enum fit_type type; /* Check if it is within VA. */ if (nva_start_addr < va->va_start || nva_start_addr + size > va->va_end) return NOTHING_FIT; /* Now classify. */ if (va->va_start == nva_start_addr) { if (va->va_end == nva_start_addr + size) type = FL_FIT_TYPE; else type = LE_FIT_TYPE; } else if (va->va_end == nva_start_addr + size) { type = RE_FIT_TYPE; } else { type = NE_FIT_TYPE; } return type; } static __always_inline int adjust_va_to_fit_type(struct rb_root *root, struct list_head *head, struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { struct vmap_area *lva = NULL; enum fit_type type = classify_va_fit_type(va, nva_start_addr, size); if (type == FL_FIT_TYPE) { /* * No need to split VA, it fully fits. * * | | * V NVA V * |---------------| */ unlink_va_augment(va, root); kmem_cache_free(vmap_area_cachep, va); } else if (type == LE_FIT_TYPE) { /* * Split left edge of fit VA. * * | | * V NVA V R * |-------|-------| */ va->va_start += size; } else if (type == RE_FIT_TYPE) { /* * Split right edge of fit VA. * * | | * L V NVA V * |-------|-------| */ va->va_end = nva_start_addr; } else if (type == NE_FIT_TYPE) { /* * Split no edge of fit VA. * * | | * L V NVA V R * |---|-------|---| */ lva = __this_cpu_xchg(ne_fit_preload_node, NULL); if (unlikely(!lva)) { /* * For percpu allocator we do not do any pre-allocation * and leave it as it is. The reason is it most likely * never ends up with NE_FIT_TYPE splitting. In case of * percpu allocations offsets and sizes are aligned to * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE * are its main fitting cases. * * There are a few exceptions though, as an example it is * a first allocation (early boot up) when we have "one" * big free space that has to be split. * * Also we can hit this path in case of regular "vmap" * allocations, if "this" current CPU was not preloaded. * See the comment in alloc_vmap_area() why. If so, then * GFP_NOWAIT is used instead to get an extra object for * split purpose. That is rare and most time does not * occur. * * What happens if an allocation gets failed. Basically, * an "overflow" path is triggered to purge lazily freed * areas to free some memory, then, the "retry" path is * triggered to repeat one more time. See more details * in alloc_vmap_area() function. */ lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); if (!lva) return -1; } /* * Build the remainder. */ lva->va_start = va->va_start; lva->va_end = nva_start_addr; /* * Shrink this VA to remaining size. */ va->va_start = nva_start_addr + size; } else { return -1; } if (type != FL_FIT_TYPE) { augment_tree_propagate_from(va); if (lva) /* type == NE_FIT_TYPE */ insert_vmap_area_augment(lva, &va->rb_node, root, head); } return 0; } /* * Returns a start address of the newly allocated area, if success. * Otherwise a vend is returned that indicates failure. */ static __always_inline unsigned long __alloc_vmap_area(struct rb_root *root, struct list_head *head, unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend) { bool adjust_search_size = true; unsigned long nva_start_addr; struct vmap_area *va; int ret; /* * Do not adjust when: * a) align <= PAGE_SIZE, because it does not make any sense. * All blocks(their start addresses) are at least PAGE_SIZE * aligned anyway; * b) a short range where a requested size corresponds to exactly * specified [vstart:vend] interval and an alignment > PAGE_SIZE. * With adjusted search length an allocation would not succeed. */ if (align <= PAGE_SIZE || (align > PAGE_SIZE && (vend - vstart) == size)) adjust_search_size = false; va = find_vmap_lowest_match(root, size, align, vstart, adjust_search_size); if (unlikely(!va)) return vend; if (va->va_start > vstart) nva_start_addr = ALIGN(va->va_start, align); else nva_start_addr = ALIGN(vstart, align); /* Check the "vend" restriction. */ if (nva_start_addr + size > vend) return vend; /* Update the free vmap_area. */ ret = adjust_va_to_fit_type(root, head, va, nva_start_addr, size); if (WARN_ON_ONCE(ret)) return vend; #if DEBUG_AUGMENT_LOWEST_MATCH_CHECK find_vmap_lowest_match_check(root, head, size, align); #endif return nva_start_addr; } /* * Free a region of KVA allocated by alloc_vmap_area */ static void free_vmap_area(struct vmap_area *va) { /* * Remove from the busy tree/list. */ spin_lock(&vmap_area_lock); unlink_va(va, &vmap_area_root); spin_unlock(&vmap_area_lock); /* * Insert/Merge it back to the free tree/list. */ spin_lock(&free_vmap_area_lock); merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); spin_unlock(&free_vmap_area_lock); } static inline void preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) { struct vmap_area *va = NULL; /* * Preload this CPU with one extra vmap_area object. It is used * when fit type of free area is NE_FIT_TYPE. It guarantees that * a CPU that does an allocation is preloaded. * * We do it in non-atomic context, thus it allows us to use more * permissive allocation masks to be more stable under low memory * condition and high memory pressure. */ if (!this_cpu_read(ne_fit_preload_node)) va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); spin_lock(lock); if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va)) kmem_cache_free(vmap_area_cachep, va); } /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask, unsigned long va_flags) { struct vmap_area *va; unsigned long freed; unsigned long addr; int purged = 0; int ret; if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align))) return ERR_PTR(-EINVAL); if (unlikely(!vmap_initialized)) return ERR_PTR(-EBUSY); might_sleep(); gfp_mask = gfp_mask & GFP_RECLAIM_MASK; va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); /* * Only scan the relevant parts containing pointers to other objects * to avoid false negatives. */ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); retry: preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list, size, align, vstart, vend); spin_unlock(&free_vmap_area_lock); trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend); /* * If an allocation fails, the "vend" address is * returned. Therefore trigger the overflow path. */ if (unlikely(addr == vend)) goto overflow; va->va_start = addr; va->va_end = addr + size; va->vm = NULL; va->flags = va_flags; spin_lock(&vmap_area_lock); insert_vmap_area(va, &vmap_area_root, &vmap_area_list); spin_unlock(&vmap_area_lock); BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); ret = kasan_populate_vmalloc(addr, size); if (ret) { free_vmap_area(va); return ERR_PTR(ret); } return va; overflow: if (!purged) { reclaim_and_purge_vmap_areas(); purged = 1; goto retry; } freed = 0; blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); if (freed > 0) { purged = 0; goto retry; } if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", size); kmem_cache_free(vmap_area_cachep, va); return ERR_PTR(-EBUSY); } int register_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); int unregister_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmap_notify_list, nb); } EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); /* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */ static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); } static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0); /* * Serialize vmap purging. There is no actual critical section protected * by this lock, but we want to avoid concurrent calls for performance * reasons and to make the pcpu_get_vm_areas more deterministic. */ static DEFINE_MUTEX(vmap_purge_lock); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * Purges all lazily-freed vmap areas. */ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end) { unsigned long resched_threshold; unsigned int num_purged_areas = 0; struct list_head local_purge_list; struct vmap_area *va, *n_va; lockdep_assert_held(&vmap_purge_lock); spin_lock(&purge_vmap_area_lock); purge_vmap_area_root = RB_ROOT; list_replace_init(&purge_vmap_area_list, &local_purge_list); spin_unlock(&purge_vmap_area_lock); if (unlikely(list_empty(&local_purge_list))) goto out; start = min(start, list_first_entry(&local_purge_list, struct vmap_area, list)->va_start); end = max(end, list_last_entry(&local_purge_list, struct vmap_area, list)->va_end); flush_tlb_kernel_range(start, end); resched_threshold = lazy_max_pages() << 1; spin_lock(&free_vmap_area_lock); list_for_each_entry_safe(va, n_va, &local_purge_list, list) { unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; unsigned long orig_start = va->va_start; unsigned long orig_end = va->va_end; /* * Finally insert or merge lazily-freed area. It is * detached and there is no need to "unlink" it from * anything. */ va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); if (!va) continue; if (is_vmalloc_or_module_addr((void *)orig_start)) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); atomic_long_sub(nr, &vmap_lazy_nr); num_purged_areas++; if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) cond_resched_lock(&free_vmap_area_lock); } spin_unlock(&free_vmap_area_lock); out: trace_purge_vmap_area_lazy(start, end, num_purged_areas); return num_purged_areas > 0; } /* * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. */ static void reclaim_and_purge_vmap_areas(void) { mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); __purge_vmap_area_lazy(ULONG_MAX, 0); mutex_unlock(&vmap_purge_lock); } static void drain_vmap_area_work(struct work_struct *work) { unsigned long nr_lazy; do { mutex_lock(&vmap_purge_lock); __purge_vmap_area_lazy(ULONG_MAX, 0); mutex_unlock(&vmap_purge_lock); /* Recheck if further work is required. */ nr_lazy = atomic_long_read(&vmap_lazy_nr); } while (nr_lazy > lazy_max_pages()); } /* * Free a vmap area, caller ensuring that the area has been unmapped, * unlinked and flush_cache_vunmap had been called for the correct * range previously. */ static void free_vmap_area_noflush(struct vmap_area *va) { unsigned long nr_lazy_max = lazy_max_pages(); unsigned long va_start = va->va_start; unsigned long nr_lazy; if (WARN_ON_ONCE(!list_empty(&va->list))) return; nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); /* * Merge or place it to the purge tree/list. */ spin_lock(&purge_vmap_area_lock); merge_or_add_vmap_area(va, &purge_vmap_area_root, &purge_vmap_area_list); spin_unlock(&purge_vmap_area_lock); trace_free_vmap_area_noflush(va_start, nr_lazy, nr_lazy_max); /* After this point, we may free va at any time */ if (unlikely(nr_lazy > nr_lazy_max)) schedule_work(&drain_vmap_work); } /* * Free and unmap a vmap area */ static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); vunmap_range_noflush(va->va_start, va->va_end); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(va->va_start, va->va_end); free_vmap_area_noflush(va); } struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr, &vmap_area_root); spin_unlock(&vmap_area_lock); return va; } static struct vmap_area *find_unlink_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr, &vmap_area_root); if (va) unlink_va(va, &vmap_area_root); spin_unlock(&vmap_area_lock); return va; } /*** Per cpu kva allocator ***/ /* * vmap space is limited especially on 32 bit architectures. Ensure there is * room for at least 16 percpu vmap blocks per CPU. */ /* * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess * instead (we just need a rough idea) */ #if BITS_PER_LONG == 32 #define VMALLOC_SPACE (128UL*1024*1024) #else #define VMALLOC_SPACE (128UL*1024*1024*1024) #endif #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE) #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */ #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */ #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2) #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */ #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */ #define VMAP_BBMAP_BITS \ VMAP_MIN(VMAP_BBMAP_BITS_MAX, \ VMAP_MAX(VMAP_BBMAP_BITS_MIN, \ VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16)) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) /* * Purge threshold to prevent overeager purging of fragmented blocks for * regular operations: Purge if vb->free is less than 1/4 of the capacity. */ #define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ #define VMAP_FLAGS_MASK 0x3 struct vmap_block_queue { spinlock_t lock; struct list_head free; /* * An xarray requires an extra memory dynamically to * be allocated. If it is an issue, we can use rb-tree * instead. */ struct xarray vmap_blocks; }; struct vmap_block { spinlock_t lock; struct vmap_area *va; unsigned long free, dirty; DECLARE_BITMAP(used_map, VMAP_BBMAP_BITS); unsigned long dirty_min, dirty_max; /*< dirty range */ struct list_head free_list; struct rcu_head rcu_head; struct list_head purge; }; /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); /* * In order to fast access to any "vmap_block" associated with a * specific address, we use a hash. * * A per-cpu vmap_block_queue is used in both ways, to serialize * an access to free block chains among CPUs(alloc path) and it * also acts as a vmap_block hash(alloc/free paths). It means we * overload it, since we already have the per-cpu array which is * used as a hash table. When used as a hash a 'cpu' passed to * per_cpu() is not actually a CPU but rather a hash index. * * A hash function is addr_to_vb_xa() which hashes any address * to a specific index(in a hash) it belongs to. This then uses a * per_cpu() macro to access an array with generated index. * * An example: * * CPU_1 CPU_2 CPU_0 * | | | * V V V * 0 10 20 30 40 50 60 * |------|------|------|------|------|------|...<vmap address space> * CPU0 CPU1 CPU2 CPU0 CPU1 CPU2 * * - CPU_1 invokes vm_unmap_ram(6), 6 belongs to CPU0 zone, thus * it access: CPU0/INDEX0 -> vmap_blocks -> xa_lock; * * - CPU_2 invokes vm_unmap_ram(11), 11 belongs to CPU1 zone, thus * it access: CPU1/INDEX1 -> vmap_blocks -> xa_lock; * * - CPU_0 invokes vm_unmap_ram(20), 20 belongs to CPU2 zone, thus * it access: CPU2/INDEX2 -> vmap_blocks -> xa_lock. * * This technique almost always avoids lock contention on insert/remove, * however xarray spinlocks protect against any contention that remains. */ static struct xarray * addr_to_vb_xa(unsigned long addr) { int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); return &per_cpu(vmap_block_queue, index).vmap_blocks; } /* * We should probably have a fallback mechanism to allocate virtual memory * out of partially filled vmap blocks. However vmap block sizing should be * fairly reasonable according to the vmalloc size, so it shouldn't be a * big problem. */ static unsigned long addr_to_vb_idx(unsigned long addr) { addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); addr /= VMAP_BLOCK_SIZE; return addr; } static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off) { unsigned long addr; addr = va_start + (pages_off << PAGE_SHIFT); BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start)); return (void *)addr; } /** * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this * block. Of course pages number can't exceed VMAP_BBMAP_BITS * @order: how many 2^order pages should be occupied in newly allocated block * @gfp_mask: flags for the page level allocator * * Return: virtual address in a newly allocated block or ERR_PTR(-errno) */ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; struct vmap_area *va; struct xarray *xa; unsigned long vb_idx; int node, err; void *vaddr; node = numa_node_id(); vb = kmalloc_node(sizeof(struct vmap_block), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!vb)) return ERR_PTR(-ENOMEM); va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask, VMAP_RAM|VMAP_BLOCK); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); } vaddr = vmap_block_vaddr(va->va_start, 0); spin_lock_init(&vb->lock); vb->va = va; /* At least something should be left free */ BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); bitmap_zero(vb->used_map, VMAP_BBMAP_BITS); vb->free = VMAP_BBMAP_BITS - (1UL << order); vb->dirty = 0; vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; bitmap_set(vb->used_map, 0, (1UL << order)); INIT_LIST_HEAD(&vb->free_list); xa = addr_to_vb_xa(va->va_start); vb_idx = addr_to_vb_idx(va->va_start); err = xa_insert(xa, vb_idx, vb, gfp_mask); if (err) { kfree(vb); free_vmap_area(va); return ERR_PTR(err); } vbq = raw_cpu_ptr(&vmap_block_queue); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); return vaddr; } static void free_vmap_block(struct vmap_block *vb) { struct vmap_block *tmp; struct xarray *xa; xa = addr_to_vb_xa(vb->va->va_start); tmp = xa_erase(xa, addr_to_vb_idx(vb->va->va_start)); BUG_ON(tmp != vb); spin_lock(&vmap_area_lock); unlink_va(vb->va, &vmap_area_root); spin_unlock(&vmap_area_lock); free_vmap_area_noflush(vb->va); kfree_rcu(vb, rcu_head); } static bool purge_fragmented_block(struct vmap_block *vb, struct vmap_block_queue *vbq, struct list_head *purge_list, bool force_purge) { if (vb->free + vb->dirty != VMAP_BBMAP_BITS || vb->dirty == VMAP_BBMAP_BITS) return false; /* Don't overeagerly purge usable blocks unless requested */ if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) return false; /* prevent further allocs after releasing lock */ WRITE_ONCE(vb->free, 0); /* prevent purging it again */ WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); list_add_tail(&vb->purge, purge_list); return true; } static void free_purged_blocks(struct list_head *purge_list) { struct vmap_block *vb, *n_vb; list_for_each_entry_safe(vb, n_vb, purge_list, purge) { list_del(&vb->purge); free_vmap_block(vb); } } static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long free = READ_ONCE(vb->free); unsigned long dirty = READ_ONCE(vb->dirty); if (free + dirty != VMAP_BBMAP_BITS || dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); purge_fragmented_block(vb, vbq, &purge, true); spin_unlock(&vb->lock); } rcu_read_unlock(); free_purged_blocks(&purge); } static void purge_fragmented_blocks_allcpus(void) { int cpu; for_each_possible_cpu(cpu) purge_fragmented_blocks(cpu); } static void *vb_alloc(unsigned long size, gfp_t gfp_mask) { struct vmap_block_queue *vbq; struct vmap_block *vb; void *vaddr = NULL; unsigned int order; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); if (WARN_ON(size == 0)) { /* * Allocating 0 bytes isn't what caller wants since * get_order(0) returns funny result. Just warn and terminate * early. */ return NULL; } order = get_order(size); rcu_read_lock(); vbq = raw_cpu_ptr(&vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; if (READ_ONCE(vb->free) < (1UL << order)) continue; spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); continue; } pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock); list_del_rcu(&vb->free_list); spin_unlock(&vbq->lock); } spin_unlock(&vb->lock); break; } rcu_read_unlock(); /* Allocate new block if nothing was found */ if (!vaddr) vaddr = new_vmap_block(order, gfp_mask); return vaddr; } static void vb_free(unsigned long addr, unsigned long size) { unsigned long offset; unsigned int order; struct vmap_block *vb; struct xarray *xa; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); flush_cache_vunmap(addr, addr + size); order = get_order(size); offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; xa = addr_to_vb_xa(addr); vb = xa_load(xa, addr_to_vb_idx(addr)); spin_lock(&vb->lock); bitmap_clear(vb->used_map, offset, (1UL << order)); spin_unlock(&vb->lock); vunmap_range_noflush(addr, addr + size); if (debug_pagealloc_enabled_static()) flush_tlb_kernel_range(addr, addr + size); spin_lock(&vb->lock); /* Expand the not yet TLB flushed dirty range */ vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); free_vmap_block(vb); } else spin_unlock(&vb->lock); } static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { LIST_HEAD(purge_list); int cpu; if (unlikely(!vmap_initialized)) return; mutex_lock(&vmap_purge_lock); for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; unsigned long idx; rcu_read_lock(); xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); /* * Try to purge a fragmented block first. If it's * not purgeable, check whether there is dirty * space to be flushed. */ if (!purge_fragmented_block(vb, vbq, &purge_list, false) && vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; s = va_start + (vb->dirty_min << PAGE_SHIFT); e = va_start + (vb->dirty_max << PAGE_SHIFT); start = min(s, start); end = max(e, end); /* Prevent that this is flushed again */ vb->dirty_min = VMAP_BBMAP_BITS; vb->dirty_max = 0; flush = 1; } spin_unlock(&vb->lock); } rcu_read_unlock(); } free_purged_blocks(&purge_list); if (!__purge_vmap_area_lazy(start, end) && flush) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); } /** * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer * * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily * to amortize TLB flushing overheads. What this means is that any page you * have now, may, in a former life, have been mapped into kernel virtual * address by the vmap layer and so there might be some CPUs with TLB entries * still referencing that page (additional to the regular 1:1 kernel mapping). * * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can * be sure that none of the pages we have control over will have any aliases * from the vmap layer. */ void vm_unmap_aliases(void) { unsigned long start = ULONG_MAX, end = 0; int flush = 0; _vm_unmap_aliases(start, end, flush); } EXPORT_SYMBOL_GPL(vm_unmap_aliases); /** * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram * @mem: the pointer returned by vm_map_ram * @count: the count passed to that vm_map_ram call (cannot unmap partial) */ void vm_unmap_ram(const void *mem, unsigned int count) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr = (unsigned long)kasan_reset_tag(mem); struct vmap_area *va; might_sleep(); BUG_ON(!addr); BUG_ON(addr < VMALLOC_START); BUG_ON(addr > VMALLOC_END); BUG_ON(!PAGE_ALIGNED(addr)); kasan_poison_vmalloc(mem, size); if (likely(count <= VMAP_MAX_ALLOC)) { debug_check_no_locks_freed(mem, size); vb_free(addr, size); return; } va = find_unlink_vmap_area(addr); if (WARN_ON_ONCE(!va)) return; debug_check_no_locks_freed((void *)va->va_start, (va->va_end - va->va_start)); free_unmap_vmap_area(va); } EXPORT_SYMBOL(vm_unmap_ram); /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped * @count: number of pages * @node: prefer to allocate data structures on this node * * If you use this function for less than VMAP_MAX_ALLOC pages, it could be * faster than vmap so it's good. But if you mix long-life and short-life * objects with vm_map_ram(), it could consume lots of address space through * fragmentation (especially on a 32bit machine). You could see failures in * the end. Please use this function for short-lived objects. * * Returns: a pointer to the address that has been mapped, or %NULL on failure */ void *vm_map_ram(struct page **pages, unsigned int count, int node) { unsigned long size = (unsigned long)count << PAGE_SHIFT; unsigned long addr; void *mem; if (likely(count <= VMAP_MAX_ALLOC)) { mem = vb_alloc(size, GFP_KERNEL); if (IS_ERR(mem)) return NULL; addr = (unsigned long)mem; } else { struct vmap_area *va; va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL, VMAP_RAM); if (IS_ERR(va)) return NULL; addr = va->va_start; mem = (void *)addr; } if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { vm_unmap_ram(mem, count); return NULL; } /* * Mark the pages as accessible, now that they are mapped. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_PROT_NORMAL); return mem; } EXPORT_SYMBOL(vm_map_ram); static struct vm_struct *vmlist __initdata; static inline unsigned int vm_area_page_order(struct vm_struct *vm) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC return vm->page_order; #else return 0; #endif } static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) { #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC vm->page_order = order; #else BUG_ON(order != 0); #endif } /** * vm_area_add_early - add vmap area early during boot * @vm: vm_struct to add * * This function is used to add fixed kernel vm area to vmlist before * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags * should contain proper values and the other fields should be zero. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_add_early(struct vm_struct *vm) { struct vm_struct *tmp, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { if (tmp->addr >= vm->addr) { BUG_ON(tmp->addr < vm->addr + vm->size); break; } else BUG_ON(tmp->addr + tmp->size > vm->addr); } vm->next = *p; *p = vm; } /** * vm_area_register_early - register vmap area early during boot * @vm: vm_struct to register * @align: requested alignment * * This function is used to register kernel vm area before * vmalloc_init() is called. @vm->size and @vm->flags should contain * proper values on entry and other fields should be zero. On return, * vm->addr contains the allocated address. * * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. */ void __init vm_area_register_early(struct vm_struct *vm, size_t align) { unsigned long addr = ALIGN(VMALLOC_START, align); struct vm_struct *cur, **p; BUG_ON(vmap_initialized); for (p = &vmlist; (cur = *p) != NULL; p = &cur->next) { if ((unsigned long)cur->addr - addr >= vm->size) break; addr = ALIGN((unsigned long)cur->addr + cur->size, align); } BUG_ON(addr > VMALLOC_END - vm->size); vm->addr = (void *)addr; vm->next = *p; *p = vm; kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } static void vmap_init_free_space(void) { unsigned long vmap_start = 1; const unsigned long vmap_end = ULONG_MAX; struct vmap_area *busy, *free; /* * B F B B B F * -|-----|.....|-----|-----|-----|.....|- * | The KVA space | * |<--------------------------------->| */ list_for_each_entry(busy, &vmap_area_list, list) { if (busy->va_start - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = busy->va_start; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } vmap_start = busy->va_end; } if (vmap_end - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (!WARN_ON_ONCE(!free)) { free->va_start = vmap_start; free->va_end = vmap_end; insert_vmap_area_augment(free, NULL, &free_vmap_area_root, &free_vmap_area_list); } } } static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; vm->size = va->va_end - va->va_start; vm->caller = caller; va->vm = vm; } static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { spin_lock(&vmap_area_lock); setup_vmalloc_vm_locked(vm, va, flags, caller); spin_unlock(&vmap_area_lock); } static void clear_vm_uninitialized_flag(struct vm_struct *vm) { /* * Before removing VM_UNINITIALIZED, * we should make sure that vm has proper values. * Pair with smp_rmb() in show_numa_info(). */ smp_wmb(); vm->flags &= ~VM_UNINITIALIZED; } static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long shift, unsigned long flags, unsigned long start, unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; unsigned long requested_size = size; BUG_ON(in_interrupt()); size = ALIGN(size, 1ul << shift); if (unlikely(!size)) return NULL; if (flags & VM_IOREMAP) align = 1ul << clamp_t(int, get_count_order_long(size), PAGE_SHIFT, IOREMAP_MAX_ORDER); area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!area)) return NULL; if (!(flags & VM_NO_GUARD)) size += PAGE_SIZE; va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0); if (IS_ERR(va)) { kfree(area); return NULL; } setup_vmalloc_vm(area, va, flags, caller); /* * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a * best-effort approach, as they can be mapped outside of vmalloc code. * For VM_ALLOC mappings, the pages are marked as accessible after * getting mapped in __vmalloc_node_range(). * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ if (!(flags & VM_ALLOC)) area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, KASAN_VMALLOC_PROT_NORMAL); return area; } struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * get_vm_area - reserve a contiguous kernel virtual area * @size: size of the area * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC * * Search an area of @size in the kernel virtual mapping area, * and reserved it for out purposes. Returns the area descriptor * on success or %NULL on failure. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, __builtin_return_address(0)); } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller) { return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, VMALLOC_START, VMALLOC_END, NUMA_NO_NODE, GFP_KERNEL, caller); } /** * find_vm_area - find a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and return it. * It is up to the caller to do all required locking to keep the returned * pointer valid. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; va = find_vmap_area((unsigned long)addr); if (!va) return NULL; return va->vm; } /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address * * Search for the kernel VM area starting at @addr, and remove it. * This function returns the found VM area, but using it is NOT safe * on SMP machines, except for its size or flags. * * Return: the area descriptor on success or %NULL on failure. */ struct vm_struct *remove_vm_area(const void *addr) { struct vmap_area *va; struct vm_struct *vm; might_sleep(); if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n", addr)) return NULL; va = find_unlink_vmap_area((unsigned long)addr); if (!va || !va->vm) return NULL; vm = va->vm; debug_check_no_locks_freed(vm->addr, get_vm_area_size(vm)); debug_check_no_obj_freed(vm->addr, get_vm_area_size(vm)); kasan_free_module_shadow(vm); kasan_poison_vmalloc(vm->addr, get_vm_area_size(vm)); free_unmap_vmap_area(va); return vm; } static inline void set_area_direct_map(const struct vm_struct *area, int (*set_direct_map)(struct page *page)) { int i; /* HUGE_VMALLOC passes small pages to set_direct_map */ for (i = 0; i < area->nr_pages; i++) if (page_address(area->pages[i])) set_direct_map(area->pages[i]); } /* * Flush the vm mapping and reset the direct map. */ static void vm_reset_perms(struct vm_struct *area) { unsigned long start = ULONG_MAX, end = 0; unsigned int page_order = vm_area_page_order(area); int flush_dmap = 0; int i; /* * Find the start and end range of the direct mappings to make sure that * the vm_unmap_aliases() flush includes the direct map. */ for (i = 0; i < area->nr_pages; i += 1U << page_order) { unsigned long addr = (unsigned long)page_address(area->pages[i]); if (addr) { unsigned long page_size; page_size = PAGE_SIZE << page_order; start = min(addr, start); end = max(addr + page_size, end); flush_dmap = 1; } } /* * Set direct map to something invalid so that it won't be cached if * there are any accesses after the TLB flush, then flush the TLB and * reset the direct map permissions to the default. */ set_area_direct_map(area, set_direct_map_invalid_noflush); _vm_unmap_aliases(start, end, flush_dmap); set_area_direct_map(area, set_direct_map_default_noflush); } static void delayed_vfree_work(struct work_struct *w) { struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); struct llist_node *t, *llnode; llist_for_each_safe(llnode, t, llist_del_all(&p->list)) vfree(llnode); } /** * vfree_atomic - release memory allocated by vmalloc() * @addr: memory base address * * This one is just like vfree() but can be called in any atomic context * except NMIs. */ void vfree_atomic(const void *addr) { struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); BUG_ON(in_nmi()); kmemleak_free(addr); /* * Use raw_cpu_ptr() because this can be called from preemptible * context. Preemption is absolutely fine here, because the llist_add() * implementation is lockless, so it works even if we are adding to * another cpu's list. schedule_work() should be fine with this too. */ if (addr && llist_add((struct llist_node *)addr, &p->list)) schedule_work(&p->wq); } /** * vfree - Release memory allocated by vmalloc() * @addr: Memory base address * * Free the virtually continuous memory area starting at @addr, as obtained * from one of the vmalloc() family of APIs. This will usually also free the * physical memory underlying the virtual allocation, but that memory is * reference counted, so it will not be freed until the last user goes away. * * If @addr is NULL, no operation is performed. * * Context: * May sleep if called *not* from interrupt context. * Must not be called in NMI context (strictly speaking, it could be * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling * conventions for vfree() arch-dependent would be a really bad idea). */ void vfree(const void *addr) { struct vm_struct *vm; int i; if (unlikely(in_interrupt())) { vfree_atomic(addr); return; } BUG_ON(in_nmi()); kmemleak_free(addr); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS)) vm_reset_perms(vm); for (i = 0; i < vm->nr_pages; i++) { struct page *page = vm->pages[i]; BUG_ON(!page); mod_memcg_page_state(page, MEMCG_VMALLOC, -1); /* * High-order allocs for huge vmallocs are split, so * can be freed as an array of order-0 allocations */ __free_page(page); cond_resched(); } atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages); kvfree(vm->pages); kfree(vm); } EXPORT_SYMBOL(vfree); /** * vunmap - release virtual mapping obtained by vmap() * @addr: memory base address * * Free the virtually contiguous memory area starting at @addr, * which was created from the page array passed to vmap(). * * Must not be called in interrupt context. */ void vunmap(const void *addr) { struct vm_struct *vm; BUG_ON(in_interrupt()); might_sleep(); if (!addr) return; vm = remove_vm_area(addr); if (unlikely(!vm)) { WARN(1, KERN_ERR "Trying to vunmap() nonexistent vm area (%p)\n", addr); return; } kfree(vm); } EXPORT_SYMBOL(vunmap); /** * vmap - map an array of pages into virtually contiguous space * @pages: array of page pointers * @count: number of pages to map * @flags: vm_area->flags * @prot: page protection for the mapping * * Maps @count pages from @pages into contiguous kernel virtual space. * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself * (which must be kmalloc or vmalloc memory) and one reference per pages in it * are transferred from the caller to vmap(), and will be freed / dropped when * vfree() is called on the return value. * * Return: the address of the area or %NULL on failure */ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; unsigned long addr; unsigned long size; /* In bytes */ might_sleep(); if (WARN_ON_ONCE(flags & VM_FLUSH_RESET_PERMS)) return NULL; /* * Your top guard is someone else's bottom guard. Not having a top * guard compromises someone else's mappings too. */ if (WARN_ON_ONCE(flags & VM_NO_GUARD)) flags &= ~VM_NO_GUARD; if (count > totalram_pages()) return NULL; size = (unsigned long)count << PAGE_SHIFT; area = get_vm_area_caller(size, flags, __builtin_return_address(0)); if (!area) return NULL; addr = (unsigned long)area->addr; if (vmap_pages_range(addr, addr + size, pgprot_nx(prot), pages, PAGE_SHIFT) < 0) { vunmap(area->addr); return NULL; } if (flags & VM_MAP_PUT_PAGES) { area->pages = pages; area->nr_pages = count; } return area->addr; } EXPORT_SYMBOL(vmap); #ifdef CONFIG_VMAP_PFN struct vmap_pfn_data { unsigned long *pfns; pgprot_t prot; unsigned int idx; }; static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) { struct vmap_pfn_data *data = private; unsigned long pfn = data->pfns[data->idx]; pte_t ptent; if (WARN_ON_ONCE(pfn_valid(pfn))) return -EINVAL; ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); set_pte_at(&init_mm, addr, pte, ptent); data->idx++; return 0; } /** * vmap_pfn - map an array of PFNs into virtually contiguous space * @pfns: array of PFNs * @count: number of pages to map * @prot: page protection for the mapping * * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns * the start address of the mapping. */ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) { struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) }; struct vm_struct *area; area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, __builtin_return_address(0)); if (!area) return NULL; if (apply_to_page_range(&init_mm, (unsigned long)area->addr, count * PAGE_SIZE, vmap_pfn_apply, &data)) { free_vm_area(area); return NULL; } flush_cache_vmap((unsigned long)area->addr, (unsigned long)area->addr + count * PAGE_SIZE); return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); #endif /* CONFIG_VMAP_PFN */ static inline unsigned int vm_area_alloc_pages(gfp_t gfp, int nid, unsigned int order, unsigned int nr_pages, struct page **pages) { unsigned int nr_allocated = 0; gfp_t alloc_gfp = gfp; bool nofail = false; struct page *page; int i; /* * For order-0 pages we make use of bulk allocator, if * the page array is partly or not at all populated due * to fails, fallback to a single page allocator that is * more permissive. */ if (!order) { /* bulk allocator doesn't support nofail req. officially */ gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL; while (nr_allocated < nr_pages) { unsigned int nr, nr_pages_request; /* * A maximum allowed request is hard-coded and is 100 * pages per call. That is done in order to prevent a * long preemption off scenario in the bulk-allocator * so the range is [1:100]. */ nr_pages_request = min(100U, nr_pages - nr_allocated); /* memory allocation should consider mempolicy, we can't * wrongly use nearest node when nid == NUMA_NO_NODE, * otherwise memory may be allocated in only one node, * but mempolicy wants to alloc memory by interleaving. */ if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, nr_pages_request, pages + nr_allocated); else nr = alloc_pages_bulk_array_node(bulk_gfp, nid, nr_pages_request, pages + nr_allocated); nr_allocated += nr; cond_resched(); /* * If zero or pages were obtained partly, * fallback to a single page allocator. */ if (nr != nr_pages_request) break; } } else if (gfp & __GFP_NOFAIL) { /* * Higher order nofail allocations are really expensive and * potentially dangerous (pre-mature OOM, disruptive reclaim * and compaction etc. */ alloc_gfp &= ~__GFP_NOFAIL; nofail = true; } /* High-order pages or fallback path if "bulk" fails. */ while (nr_allocated < nr_pages) { if (fatal_signal_pending(current)) break; if (nid == NUMA_NO_NODE) page = alloc_pages(alloc_gfp, order); else page = alloc_pages_node(nid, alloc_gfp, order); if (unlikely(!page)) { if (!nofail) break; /* fall back to the zero order allocations */ alloc_gfp |= __GFP_NOFAIL; order = 0; continue; } /* * Higher order allocations must be able to be treated as * indepdenent small pages by callers (as they can with * small-page vmallocs). Some drivers do their own refcounting * on vmalloc_to_page() pages, some use page->mapping, * page->lru, etc. */ if (order) split_page(page, order); /* * Careful, we allocate and map page-order pages, but * tracking is done per PAGE_SIZE page so as to keep the * vm_struct APIs independent of the physical/mapped size. */ for (i = 0; i < (1U << order); i++) pages[nr_allocated + i] = page + i; cond_resched(); nr_allocated += 1U << order; } return nr_allocated; } static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot, unsigned int page_shift, int node) { const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; bool nofail = gfp_mask & __GFP_NOFAIL; unsigned long addr = (unsigned long)area->addr; unsigned long size = get_vm_area_size(area); unsigned long array_size; unsigned int nr_small_pages = size >> PAGE_SHIFT; unsigned int page_order; unsigned int flags; int ret; array_size = (unsigned long)nr_small_pages * sizeof(struct page *); if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) gfp_mask |= __GFP_HIGHMEM; /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, area->caller); } else { area->pages = kmalloc_node(array_size, nested_gfp, node); } if (!area->pages) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocated page array size %lu", nr_small_pages * PAGE_SIZE, array_size); free_vm_area(area); return NULL; } set_vm_area_page_order(area, page_shift - PAGE_SHIFT); page_order = vm_area_page_order(area); area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, node, page_order, nr_small_pages, area->pages); atomic_long_add(area->nr_pages, &nr_vmalloc_pages); if (gfp_mask & __GFP_ACCOUNT) { int i; for (i = 0; i < area->nr_pages; i++) mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); } /* * If not enough pages were obtained to accomplish an * allocation request, free them via vfree() if any. */ if (area->nr_pages != nr_small_pages) { /* * vm_area_alloc_pages() can fail due to insufficient memory but * also:- * * - a pending fatal signal * - insufficient huge page-order pages * * Since we always retry allocations at order-0 in the huge page * case a warning for either is spurious. */ if (!fatal_signal_pending(current) && page_order == 0) warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to allocate pages", area->nr_pages * PAGE_SIZE); goto fail; } /* * page tables allocations ignore external gfp mask, enforce it * by the scope API */ if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) flags = memalloc_nofs_save(); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) flags = memalloc_noio_save(); do { ret = vmap_pages_range(addr, addr + size, prot, area->pages, page_shift); if (nofail && (ret < 0)) schedule_timeout_uninterruptible(1); } while (nofail && (ret < 0)); if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) memalloc_nofs_restore(flags); else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) memalloc_noio_restore(flags); if (ret < 0) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, failed to map pages", area->nr_pages * PAGE_SIZE); goto fail; } return area->addr; fail: vfree(area->addr); return NULL; } /** * __vmalloc_node_range - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @start: vm area range start * @end: vm area range end * @gfp_mask: flags for the page level allocator * @prot: protection mask for the allocated pages * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD) * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level * allocator with @gfp_mask flags. Please note that the full set of gfp * flags are not supported. GFP_KERNEL, GFP_NOFS and GFP_NOIO are all * supported. * Zone modifiers are not supported. From the reclaim modifiers * __GFP_DIRECT_RECLAIM is required (aka GFP_NOWAIT is not supported) * and only __GFP_NOFAIL is supported (i.e. __GFP_NORETRY and * __GFP_RETRY_MAYFAIL are not supported). * * __GFP_NOWARN can be used to suppress failures messages. * * Map them into contiguous kernel virtual space, using a pagetable * protection of @prot. * * Return: the address of the area or %NULL on failure */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { struct vm_struct *area; void *ret; kasan_vmalloc_flags_t kasan_flags = KASAN_VMALLOC_NONE; unsigned long real_size = size; unsigned long real_align = align; unsigned int shift = PAGE_SHIFT; if (WARN_ON_ONCE(!size)) return NULL; if ((size >> PAGE_SHIFT) > totalram_pages()) { warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, exceeds total pages", real_size); return NULL; } if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) { unsigned long size_per_node; /* * Try huge pages. Only try for PAGE_KERNEL allocations, * others like modules don't yet expect huge pages in * their allocations due to apply_to_page_range not * supporting them. */ size_per_node = size; if (node == NUMA_NO_NODE) size_per_node /= num_online_nodes(); if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE) shift = PMD_SHIFT; else shift = arch_vmap_pte_supported_shift(size_per_node); align = max(real_align, 1UL << shift); size = ALIGN(real_size, 1UL << shift); } again: area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | VM_UNINITIALIZED | vm_flags, start, end, node, gfp_mask, caller); if (!area) { bool nofail = gfp_mask & __GFP_NOFAIL; warn_alloc(gfp_mask, NULL, "vmalloc error: size %lu, vm_struct allocation failed%s", real_size, (nofail) ? ". Retrying." : ""); if (nofail) { schedule_timeout_uninterruptible(1); goto again; } goto fail; } /* * Prepare arguments for __vmalloc_area_node() and * kasan_unpoison_vmalloc(). */ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) { if (kasan_hw_tags_enabled()) { /* * Modify protection bits to allow tagging. * This must be done before mapping. */ prot = arch_vmap_pgprot_tagged(prot); /* * Skip page_alloc poisoning and zeroing for physical * pages backing VM_ALLOC mapping. Memory is instead * poisoned and zeroed by kasan_unpoison_vmalloc(). */ gfp_mask |= __GFP_SKIP_KASAN | __GFP_SKIP_ZERO; } /* Take note that the mapping is PAGE_KERNEL. */ kasan_flags |= KASAN_VMALLOC_PROT_NORMAL; } /* Allocate physical pages and map them into vmalloc space. */ ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); if (!ret) goto fail; /* * Mark the pages as accessible, now that they are mapped. * The condition for setting KASAN_VMALLOC_INIT should complement the * one in post_alloc_hook() with regards to the __GFP_SKIP_ZERO check * to make sure that memory is initialized under the same conditions. * Tag-based KASAN modes only assign tags to normal non-executable * allocations, see __kasan_unpoison_vmalloc(). */ kasan_flags |= KASAN_VMALLOC_VM_ALLOC; if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && (gfp_mask & __GFP_SKIP_ZERO)) kasan_flags |= KASAN_VMALLOC_INIT; /* KASAN_VMALLOC_PROT_NORMAL already set if required. */ area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); /* * In this function, newly allocated vm_struct has VM_UNINITIALIZED * flag. It means that vm_struct is not fully initialized. * Now, it is fully initialized, so remove this flag here. */ clear_vm_uninitialized_flag(area); size = PAGE_ALIGN(size); if (!(vm_flags & VM_DEFER_KMEMLEAK)) kmemleak_vmalloc(area, size, gfp_mask); return area->addr; fail: if (shift > PAGE_SHIFT) { shift = PAGE_SHIFT; align = real_align; size = real_size; goto again; } return NULL; } /** * __vmalloc_node - allocate virtually contiguous memory * @size: allocation size * @align: desired alignment * @gfp_mask: flags for the page level allocator * @node: node to use for allocation or NUMA_NO_NODE * @caller: caller's return address * * Allocate enough pages to cover @size from the page level allocator with * @gfp_mask flags. Map them into contiguous kernel virtual space. * * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL * and __GFP_NOFAIL are not supported * * Any use of gfp flags outside of GFP_KERNEL should be consulted * with mm people. * * Return: pointer to the allocated memory or %NULL on error */ void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, 0, node, caller); } /* * This is only for performance analysis of vmalloc and stress purpose. * It is required by vmalloc test module, therefore do not use it other * than that. */ #ifdef CONFIG_TEST_VMALLOC_MODULE EXPORT_SYMBOL_GPL(__vmalloc_node); #endif void *__vmalloc(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(__vmalloc); /** * vmalloc - allocate virtually contiguous memory * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc); /** * vmalloc_huge - allocate virtually contiguous memory, allow huge pages * @size: allocation size * @gfp_mask: flags for the page level allocator * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * If @size is greater than or equal to PMD_SIZE, allow using * huge pages for the memory * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) { return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL_GPL(vmalloc_huge); /** * vzalloc - allocate virtually contiguous memory with zero fill * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc(unsigned long size) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace * @size: allocation size * * The resulting memory area is zeroed so it can be mapped to userspace * without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_user(unsigned long size) { return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_user); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * Return: pointer to the allocated memory or %NULL on error */ void *vzalloc_node(unsigned long size, int node) { return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, __builtin_return_address(0)); } EXPORT_SYMBOL(vzalloc_node); #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA) #define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL) #else /* * 64b systems should always have either DMA or DMA32 zones. For others * GFP_DMA32 should do the right thing and use the normal zone. */ #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) #endif /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32(unsigned long size) { return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * Return: pointer to the allocated memory or %NULL on error */ void *vmalloc_32_user(unsigned long size) { return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } EXPORT_SYMBOL(vmalloc_32_user); /* * Atomically zero bytes in the iterator. * * Returns the number of zeroed bytes. */ static size_t zero_iter(struct iov_iter *iter, size_t count) { size_t remains = count; while (remains > 0) { size_t num, copied; num = min_t(size_t, remains, PAGE_SIZE); copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); remains -= copied; if (copied < num) break; } return count - remains; } /* * small helper routine, copy contents to iter from addr. * If the page is not present, fill zero. * * Returns the number of copied bytes. */ static size_t aligned_vread_iter(struct iov_iter *iter, const char *addr, size_t count) { size_t remains = count; struct page *page; while (remains > 0) { unsigned long offset, length; size_t copied = 0; offset = offset_in_page(addr); length = PAGE_SIZE - offset; if (length > remains) length = remains; page = vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need lock. But * adding lock here means that we need to add overhead of * vmalloc()/vfree() calls for this _debug_ interface, rarely * used. Instead of that, we'll use an local mapping via * copy_page_to_iter_nofault() and accept a small overhead in * this access function. */ if (page) copied = copy_page_to_iter_nofault(page, offset, length, iter); else copied = zero_iter(iter, length); addr += copied; remains -= copied; if (copied != length) break; } return count - remains; } /* * Read from a vm_map_ram region of memory. * * Returns the number of copied bytes. */ static size_t vmap_ram_vread_iter(struct iov_iter *iter, const char *addr, size_t count, unsigned long flags) { char *start; struct vmap_block *vb; struct xarray *xa; unsigned long offset; unsigned int rs, re; size_t remains, n; /* * If it's area created by vm_map_ram() interface directly, but * not further subdividing and delegating management to vmap_block, * handle it here. */ if (!(flags & VMAP_BLOCK)) return aligned_vread_iter(iter, addr, count); remains = count; /* * Area is split into regions and tracked with vmap_block, read out * each region and zero fill the hole between regions. */ xa = addr_to_vb_xa((unsigned long) addr); vb = xa_load(xa, addr_to_vb_idx((unsigned long)addr)); if (!vb) goto finished_zero; spin_lock(&vb->lock); if (bitmap_empty(vb->used_map, VMAP_BBMAP_BITS)) { spin_unlock(&vb->lock); goto finished_zero; } for_each_set_bitrange(rs, re, vb->used_map, VMAP_BBMAP_BITS) { size_t copied; if (remains == 0) goto finished; start = vmap_block_vaddr(vb->va->va_start, rs); if (addr < start) { size_t to_zero = min_t(size_t, start - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } /*it could start reading from the middle of used region*/ offset = offset_in_page(addr); n = ((re - rs + 1) << PAGE_SHIFT) - offset; if (n > remains) n = remains; copied = aligned_vread_iter(iter, start + offset, n); addr += copied; remains -= copied; if (copied != n) goto finished; } spin_unlock(&vb->lock); finished_zero: /* zero-fill the left dirty or free regions */ return count - remains + zero_iter(iter, remains); finished: /* We couldn't copy/zero everything */ spin_unlock(&vb->lock); return count - remains; } /** * vread_iter() - read vmalloc area in a safe way to an iterator. * @iter: the iterator to which data should be written. * @addr: vm address. * @count: number of bytes to be read. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range * of [addr...addr+count) includes some valid address, data is copied to * proper area of @buf. If there are memory holes, they'll be zero-filled. * IOREMAP area is treated as memory hole and no copy is done. * * If [addr...addr+count) doesn't includes any intersects with alive * vm_struct area, returns 0. @buf should be kernel's buffer. * * Note: In usual ops, vread() is never necessary because the caller * should know vmalloc() area is valid and can use memcpy(). * This is for routines which have to access vmalloc area without * any information, as /proc/kcore. * * Return: number of bytes for which addr and buf should be increased * (same number as @count) or %0 if [addr...addr+count) doesn't * include any intersection with valid vmalloc area */ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) { struct vmap_area *va; struct vm_struct *vm; char *vaddr; size_t n, size, flags, remains; addr = kasan_reset_tag(addr); /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; remains = count; spin_lock(&vmap_area_lock); va = find_vmap_area_exceed_addr((unsigned long)addr); if (!va) goto finished_zero; /* no intersects with alive vmap_area */ if ((unsigned long)addr + remains <= va->va_start) goto finished_zero; list_for_each_entry_from(va, &vmap_area_list, list) { size_t copied; if (remains == 0) goto finished; vm = va->vm; flags = va->flags & VMAP_FLAGS_MASK; /* * VMAP_BLOCK indicates a sub-type of vm_map_ram area, need * be set together with VMAP_RAM. */ WARN_ON(flags == VMAP_BLOCK); if (!vm && !flags) continue; if (vm && (vm->flags & VM_UNINITIALIZED)) continue; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); vaddr = (char *) va->va_start; size = vm ? get_vm_area_size(vm) : va_size(va); if (addr >= vaddr + size) continue; if (addr < vaddr) { size_t to_zero = min_t(size_t, vaddr - addr, remains); size_t zeroed = zero_iter(iter, to_zero); addr += zeroed; remains -= zeroed; if (remains == 0 || zeroed != to_zero) goto finished; } n = vaddr + size - addr; if (n > remains) n = remains; if (flags & VMAP_RAM) copied = vmap_ram_vread_iter(iter, addr, n, flags); else if (!(vm && (vm->flags & VM_IOREMAP))) copied = aligned_vread_iter(iter, addr, n); else /* IOREMAP area is treated as memory hole */ copied = zero_iter(iter, n); addr += copied; remains -= copied; if (copied != n) goto finished; } finished_zero: spin_unlock(&vmap_area_lock); /* zero-fill memory holes */ return count - remains + zero_iter(iter, remains); finished: /* Nothing remains, or We couldn't copy/zero everything. */ spin_unlock(&vmap_area_lock); return count - remains; } /** * remap_vmalloc_range_partial - map vmalloc pages to userspace * @vma: vma to cover * @uaddr: target user address to start at * @kaddr: virtual address of vmalloc kernel memory * @pgoff: offset from @kaddr to start at * @size: size of map area * * Returns: 0 for success, -Exxx on failure * * This function checks that @kaddr is a valid vmalloc'ed area, * and that it is big enough to cover the range starting at * @uaddr in @vma. Will return failure if that criteria isn't * met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long pgoff, unsigned long size) { struct vm_struct *area; unsigned long off; unsigned long end_index; if (check_shl_overflow(pgoff, PAGE_SHIFT, &off)) return -EINVAL; size = PAGE_ALIGN(size); if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr)) return -EINVAL; area = find_vm_area(kaddr); if (!area) return -EINVAL; if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) return -EINVAL; if (check_add_overflow(size, off, &end_index) || end_index > get_vm_area_size(area)) return -EINVAL; kaddr += off; do { struct page *page = vmalloc_to_page(kaddr); int ret; ret = vm_insert_page(vma, uaddr, page); if (ret) return ret; uaddr += PAGE_SIZE; kaddr += PAGE_SIZE; size -= PAGE_SIZE; } while (size > 0); vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); return 0; } /** * remap_vmalloc_range - map vmalloc pages to userspace * @vma: vma to cover (map full range of vma) * @addr: vmalloc memory * @pgoff: number of pages into addr before first page to map * * Returns: 0 for success, -Exxx on failure * * This function checks that addr is a valid vmalloc'ed area, and * that it is big enough to cover the vma. Will return failure if * that criteria isn't met. * * Similar to remap_pfn_range() (see mm/memory.c) */ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { return remap_vmalloc_range_partial(vma, vma->vm_start, addr, pgoff, vma->vm_end - vma->vm_start); } EXPORT_SYMBOL(remap_vmalloc_range); void free_vm_area(struct vm_struct *area) { struct vm_struct *ret; ret = remove_vm_area(area->addr); BUG_ON(ret != area); kfree(area); } EXPORT_SYMBOL_GPL(free_vm_area); #ifdef CONFIG_SMP static struct vmap_area *node_to_va(struct rb_node *n) { return rb_entry_safe(n, struct vmap_area, rb_node); } /** * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to * @addr: target address * * Returns: vmap_area if it is found. If there is no such area * the first highest(reverse order) vmap_area is returned * i.e. va->va_start < addr && va->va_end < addr or NULL * if there are no any areas before @addr. */ static struct vmap_area * pvm_find_va_enclose_addr(unsigned long addr) { struct vmap_area *va, *tmp; struct rb_node *n; n = free_vmap_area_root.rb_node; va = NULL; while (n) { tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_start <= addr) { va = tmp; if (tmp->va_end >= addr) break; n = n->rb_right; } else { n = n->rb_left; } } return va; } /** * pvm_determine_end_from_reverse - find the highest aligned address * of free block below VMALLOC_END * @va: * in - the VA we start the search(reverse order); * out - the VA with the highest aligned end address. * @align: alignment for required highest address * * Returns: determined end address within vmap_area */ static unsigned long pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) { unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; if (likely(*va)) { list_for_each_entry_from_reverse((*va), &free_vmap_area_list, list) { addr = min((*va)->va_end & ~(align - 1), vmalloc_end); if ((*va)->va_start < addr) return addr; } } return 0; } /** * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator * @offsets: array containing offset of each area * @sizes: array containing size of each area * @nr_vms: the number of areas to allocate * @align: alignment, all entries in @offsets and @sizes must be aligned to this * * Returns: kmalloc'd vm_struct pointer array pointing to allocated * vm_structs on success, %NULL on failure * * Percpu allocator wants to use congruent vm areas so that it can * maintain the offsets among percpu areas. This function allocates * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to * be scattered pretty far, distance between two areas easily going up * to gigabytes. To avoid interacting with regular vmallocs, these * areas are allocated from top. * * Despite its complicated look, this allocator is rather simple. It * does everything top-down and scans free blocks from the end looking * for matching base. While scanning, if any of the areas do not fit the * base address is pulled down to fit the area. Scanning is repeated till * all the areas fit and then all necessary data structures are inserted * and the result is returned. */ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); struct vmap_area **vas, *va; struct vm_struct **vms; int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end, orig_start, orig_end; bool purged = false; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); for (last_area = 0, area = 0; area < nr_vms; area++) { start = offsets[area]; end = start + sizes[area]; /* is everything aligned properly? */ BUG_ON(!IS_ALIGNED(offsets[area], align)); BUG_ON(!IS_ALIGNED(sizes[area], align)); /* detect the area with the highest address */ if (start > offsets[last_area]) last_area = area; for (area2 = area + 1; area2 < nr_vms; area2++) { unsigned long start2 = offsets[area2]; unsigned long end2 = start2 + sizes[area2]; BUG_ON(start2 < end && start < end2); } } last_end = offsets[last_area] + sizes[last_area]; if (vmalloc_end - vmalloc_start < last_end) { WARN_ON(true); return NULL; } vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL); vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL); if (!vas || !vms) goto err_free2; for (area = 0; area < nr_vms; area++) { vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); if (!vas[area] || !vms[area]) goto err_free; } retry: spin_lock(&free_vmap_area_lock); /* start scanning - we scan from the top, begin with the last area */ area = term_area = last_area; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(vmalloc_end); base = pvm_determine_end_from_reverse(&va, align) - end; while (true) { /* * base might have underflowed, add last_end before * comparing. */ if (base + last_end < vmalloc_start + last_end) goto overflow; /* * Fitting base has not been found. */ if (va == NULL) goto overflow; /* * If required width exceeds current VA block, move * base downwards and then recheck. */ if (base + end > va->va_end) { base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * If this VA does not fit, move base downwards and recheck. */ if (base + start < va->va_start) { va = node_to_va(rb_prev(&va->rb_node)); base = pvm_determine_end_from_reverse(&va, align) - end; term_area = area; continue; } /* * This area fits, move on to the previous one. If * the previous one is the terminal one, we're done. */ area = (area + nr_vms - 1) % nr_vms; if (area == term_area) break; start = offsets[area]; end = start + sizes[area]; va = pvm_find_va_enclose_addr(base + end); } /* we've found a fitting base, insert all va's */ for (area = 0; area < nr_vms; area++) { int ret; start = base + offsets[area]; size = sizes[area]; va = pvm_find_va_enclose_addr(start); if (WARN_ON_ONCE(va == NULL)) /* It is a BUG(), but trigger recovery instead. */ goto recovery; ret = adjust_va_to_fit_type(&free_vmap_area_root, &free_vmap_area_list, va, start, size); if (WARN_ON_ONCE(unlikely(ret))) /* It is a BUG(), but trigger recovery instead. */ goto recovery; /* Allocated area. */ va = vas[area]; va->va_start = start; va->va_end = start + size; } spin_unlock(&free_vmap_area_lock); /* populate the kasan shadow space */ for (area = 0; area < nr_vms; area++) { if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) goto err_free_shadow; } /* insert all vm's */ spin_lock(&vmap_area_lock); for (area = 0; area < nr_vms; area++) { insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); } spin_unlock(&vmap_area_lock); /* * Mark allocated areas as accessible. Do it now as a best-effort * approach, as they can be mapped outside of vmalloc code. * With hardware tag-based KASAN, marking is skipped for * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). */ for (area = 0; area < nr_vms; area++) vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); kfree(vas); return vms; recovery: /* * Remove previously allocated areas. There is no * need in removing these areas from the busy tree, * because they are inserted only on the final step * and when pcpu_get_vm_areas() is success. */ while (area--) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); vas[area] = NULL; } overflow: spin_unlock(&free_vmap_area_lock); if (!purged) { reclaim_and_purge_vmap_areas(); purged = true; /* Before "retry", check if we recover. */ for (area = 0; area < nr_vms; area++) { if (vas[area]) continue; vas[area] = kmem_cache_zalloc( vmap_area_cachep, GFP_KERNEL); if (!vas[area]) goto err_free; } goto retry; } err_free: for (area = 0; area < nr_vms; area++) { if (vas[area]) kmem_cache_free(vmap_area_cachep, vas[area]); kfree(vms[area]); } err_free2: kfree(vas); kfree(vms); return NULL; err_free_shadow: spin_lock(&free_vmap_area_lock); /* * We release all the vmalloc shadows, even the ones for regions that * hadn't been successfully added. This relies on kasan_release_vmalloc * being able to tolerate this case. */ for (area = 0; area < nr_vms; area++) { orig_start = vas[area]->va_start; orig_end = vas[area]->va_end; va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, &free_vmap_area_list); if (va) kasan_release_vmalloc(orig_start, orig_end, va->va_start, va->va_end); vas[area] = NULL; kfree(vms[area]); } spin_unlock(&free_vmap_area_lock); kfree(vas); kfree(vms); return NULL; } /** * pcpu_free_vm_areas - free vmalloc areas for percpu allocator * @vms: vm_struct pointer array returned by pcpu_get_vm_areas() * @nr_vms: the number of allocated areas * * Free vm_structs and the array allocated by pcpu_get_vm_areas(). */ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { int i; for (i = 0; i < nr_vms; i++) free_vm_area(vms[i]); kfree(vms); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PRINTK bool vmalloc_dump_obj(void *object) { void *objp = (void *)PAGE_ALIGN((unsigned long)object); const void *caller; struct vm_struct *vm; struct vmap_area *va; unsigned long addr; unsigned int nr_pages; if (!spin_trylock(&vmap_area_lock)) return false; va = __find_vmap_area((unsigned long)objp, &vmap_area_root); if (!va) { spin_unlock(&vmap_area_lock); return false; } vm = va->vm; if (!vm) { spin_unlock(&vmap_area_lock); return false; } addr = (unsigned long)vm->addr; caller = vm->caller; nr_pages = vm->nr_pages; spin_unlock(&vmap_area_lock); pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", nr_pages, addr, caller); return true; } #endif #ifdef CONFIG_PROC_FS static void *s_start(struct seq_file *m, loff_t *pos) __acquires(&vmap_purge_lock) __acquires(&vmap_area_lock) { mutex_lock(&vmap_purge_lock); spin_lock(&vmap_area_lock); return seq_list_start(&vmap_area_list, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &vmap_area_list, pos); } static void s_stop(struct seq_file *m, void *p) __releases(&vmap_area_lock) __releases(&vmap_purge_lock) { spin_unlock(&vmap_area_lock); mutex_unlock(&vmap_purge_lock); } static void show_numa_info(struct seq_file *m, struct vm_struct *v) { if (IS_ENABLED(CONFIG_NUMA)) { unsigned int nr, *counters = m->private; unsigned int step = 1U << vm_area_page_order(v); if (!counters) return; if (v->flags & VM_UNINITIALIZED) return; /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */ smp_rmb(); memset(counters, 0, nr_node_ids * sizeof(unsigned int)); for (nr = 0; nr < v->nr_pages; nr += step) counters[page_to_nid(v->pages[nr])] += step; for_each_node_state(nr, N_HIGH_MEMORY) if (counters[nr]) seq_printf(m, " N%u=%u", nr, counters[nr]); } } static void show_purge_info(struct seq_file *m) { struct vmap_area *va; spin_lock(&purge_vmap_area_lock); list_for_each_entry(va, &purge_vmap_area_list, list) { seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", (void *)va->va_start, (void *)va->va_end, va->va_end - va->va_start); } spin_unlock(&purge_vmap_area_lock); } static int s_show(struct seq_file *m, void *p) { struct vmap_area *va; struct vm_struct *v; va = list_entry(p, struct vmap_area, list); if (!va->vm) { if (va->flags & VMAP_RAM) seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", (void *)va->va_start, (void *)va->va_end, va->va_end - va->va_start); goto final; } v = va->vm; seq_printf(m, "0x%pK-0x%pK %7ld", v->addr, v->addr + v->size, v->size); if (v->caller) seq_printf(m, " %pS", v->caller); if (v->nr_pages) seq_printf(m, " pages=%d", v->nr_pages); if (v->phys_addr) seq_printf(m, " phys=%pa", &v->phys_addr); if (v->flags & VM_IOREMAP) seq_puts(m, " ioremap"); if (v->flags & VM_ALLOC) seq_puts(m, " vmalloc"); if (v->flags & VM_MAP) seq_puts(m, " vmap"); if (v->flags & VM_USERMAP) seq_puts(m, " user"); if (v->flags & VM_DMA_COHERENT) seq_puts(m, " dma-coherent"); if (is_vmalloc_addr(v->pages)) seq_puts(m, " vpages"); show_numa_info(m, v); seq_putc(m, '\n'); /* * As a final step, dump "unpurged" areas. */ final: if (list_is_last(&va->list, &vmap_area_list)) show_purge_info(m); return 0; } static const struct seq_operations vmalloc_op = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; static int __init proc_vmalloc_init(void) { if (IS_ENABLED(CONFIG_NUMA)) proc_create_seq_private("vmallocinfo", 0400, NULL, &vmalloc_op, nr_node_ids * sizeof(unsigned int), NULL); else proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); return 0; } module_init(proc_vmalloc_init); #endif void __init vmalloc_init(void) { struct vmap_area *va; struct vm_struct *tmp; int i; /* * Create the cache for vmap_area objects. */ vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC); for_each_possible_cpu(i) { struct vmap_block_queue *vbq; struct vfree_deferred *p; vbq = &per_cpu(vmap_block_queue, i); spin_lock_init(&vbq->lock); INIT_LIST_HEAD(&vbq->free); p = &per_cpu(vfree_deferred, i); init_llist_head(&p->list); INIT_WORK(&p->wq, delayed_vfree_work); xa_init(&vbq->vmap_blocks); } /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); if (WARN_ON_ONCE(!va)) continue; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; insert_vmap_area(va, &vmap_area_root, &vmap_area_list); } /* * Now we can initialize a free vmap space. */ vmap_init_free_space(); vmap_initialized = true; } |
1 1 1 29 4 43 25 6 1 1 2 2 8 9 1 6 6 6 1 1 3 1 1 1 1 2 1 1 1 1 4 1 4 1 1 2 1 1 9 1 1 1 1 1 1 1 1 4 2 1 3 1 1 1 9 6 3 1 1 6 1 1 1 1 2 2 26 24 1 2 1 1 1 1 1 8 8 1 1 6 6 4 4 1 3 3 3 3 3 3 3 3 3 3 3 10 1 9 6 3 12 12 19 18 33 4 10 1 11 23 2 2 1 1 1 2 36 33 1 3 159 157 153 12 131 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 | /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright (C) 2011 ProFUSION Embedded Systems Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI core. */ #include <linux/export.h> #include <linux/rfkill.h> #include <linux/debugfs.h> #include <linux/crypto.h> #include <linux/kcov.h> #include <linux/property.h> #include <linux/suspend.h> #include <linux/wait.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "hci_debugfs.h" #include "smp.h" #include "leds.h" #include "msft.h" #include "aosp.h" #include "hci_codec.h" static void hci_rx_work(struct work_struct *work); static void hci_cmd_work(struct work_struct *work); static void hci_tx_work(struct work_struct *work); /* HCI device list */ LIST_HEAD(hci_dev_list); DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); static int hci_scan_req(struct hci_request *req, unsigned long opt) { __u8 scan = opt; BT_DBG("%s %x", req->hdev->name, scan); /* Inquiry and Page scans */ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); return 0; } static int hci_auth_req(struct hci_request *req, unsigned long opt) { __u8 auth = opt; BT_DBG("%s %x", req->hdev->name, auth); /* Authentication */ hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); return 0; } static int hci_encrypt_req(struct hci_request *req, unsigned long opt) { __u8 encrypt = opt; BT_DBG("%s %x", req->hdev->name, encrypt); /* Encryption */ hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); return 0; } static int hci_linkpol_req(struct hci_request *req, unsigned long opt) { __le16 policy = cpu_to_le16(opt); BT_DBG("%s %x", req->hdev->name, policy); /* Default link policy */ hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); return 0; } /* Get HCI device by index. * Device is held on return. */ struct hci_dev *hci_dev_get(int index) { struct hci_dev *hdev = NULL, *d; BT_DBG("%d", index); if (index < 0) return NULL; read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { if (d->id == index) { hdev = hci_dev_hold(d); break; } } read_unlock(&hci_dev_list_lock); return hdev; } /* ---- Inquiry support ---- */ bool hci_discovery_active(struct hci_dev *hdev) { struct discovery_state *discov = &hdev->discovery; switch (discov->state) { case DISCOVERY_FINDING: case DISCOVERY_RESOLVING: return true; default: return false; } } void hci_discovery_set_state(struct hci_dev *hdev, int state) { int old_state = hdev->discovery.state; BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); if (old_state == state) return; hdev->discovery.state = state; switch (state) { case DISCOVERY_STOPPED: hci_update_passive_scan(hdev); if (old_state != DISCOVERY_STARTING) mgmt_discovering(hdev, 0); break; case DISCOVERY_STARTING: break; case DISCOVERY_FINDING: mgmt_discovering(hdev, 1); break; case DISCOVERY_RESOLVING: break; case DISCOVERY_STOPPING: break; } } void hci_inquiry_cache_flush(struct hci_dev *hdev) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *p, *n; list_for_each_entry_safe(p, n, &cache->all, all) { list_del(&p->all); kfree(p); } INIT_LIST_HEAD(&cache->unknown); INIT_LIST_HEAD(&cache->resolve); } struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->all, all) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p, %pMR", cache, bdaddr); list_for_each_entry(e, &cache->unknown, list) { if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, bdaddr_t *bdaddr, int state) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); list_for_each_entry(e, &cache->resolve, list) { if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) return e; if (!bacmp(&e->data.bdaddr, bdaddr)) return e; } return NULL; } void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, struct inquiry_entry *ie) { struct discovery_state *cache = &hdev->discovery; struct list_head *pos = &cache->resolve; struct inquiry_entry *p; list_del(&ie->list); list_for_each_entry(p, &cache->resolve, list) { if (p->name_state != NAME_PENDING && abs(p->data.rssi) >= abs(ie->data.rssi)) break; pos = &p->list; } list_add(&ie->list, pos); } u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, bool name_known) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *ie; u32 flags = 0; BT_DBG("cache %p, %pMR", cache, &data->bdaddr); hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); if (!data->ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); if (ie) { if (!ie->data.ssp_mode) flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; if (ie->name_state == NAME_NEEDED && data->rssi != ie->data.rssi) { ie->data.rssi = data->rssi; hci_inquiry_cache_update_resolve(hdev, ie); } goto update; } /* Entry not in the cache. Add new one. */ ie = kzalloc(sizeof(*ie), GFP_KERNEL); if (!ie) { flags |= MGMT_DEV_FOUND_CONFIRM_NAME; goto done; } list_add(&ie->all, &cache->all); if (name_known) { ie->name_state = NAME_KNOWN; } else { ie->name_state = NAME_NOT_KNOWN; list_add(&ie->list, &cache->unknown); } update: if (name_known && ie->name_state != NAME_KNOWN && ie->name_state != NAME_PENDING) { ie->name_state = NAME_KNOWN; list_del(&ie->list); } memcpy(&ie->data, data, sizeof(*data)); ie->timestamp = jiffies; cache->timestamp = jiffies; if (ie->name_state == NAME_NOT_KNOWN) flags |= MGMT_DEV_FOUND_CONFIRM_NAME; done: return flags; } static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) { struct discovery_state *cache = &hdev->discovery; struct inquiry_info *info = (struct inquiry_info *) buf; struct inquiry_entry *e; int copied = 0; list_for_each_entry(e, &cache->all, all) { struct inquiry_data *data = &e->data; if (copied >= num) break; bacpy(&info->bdaddr, &data->bdaddr); info->pscan_rep_mode = data->pscan_rep_mode; info->pscan_period_mode = data->pscan_period_mode; info->pscan_mode = data->pscan_mode; memcpy(info->dev_class, data->dev_class, 3); info->clock_offset = data->clock_offset; info++; copied++; } BT_DBG("cache %p, copied %d", cache, copied); return copied; } static int hci_inq_req(struct hci_request *req, unsigned long opt) { struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; struct hci_dev *hdev = req->hdev; struct hci_cp_inquiry cp; BT_DBG("%s", hdev->name); if (test_bit(HCI_INQUIRY, &hdev->flags)) return 0; /* Start Inquiry */ memcpy(&cp.lap, &ir->lap, 3); cp.length = ir->length; cp.num_rsp = ir->num_rsp; hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); return 0; } int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) return -EFAULT; hdev = hci_dev_get(ir.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (hdev->dev_type != HCI_PRIMARY) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } /* Restrict maximum inquiry length to 60 seconds */ if (ir.length > 60) { err = -EINVAL; goto done; } hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { hci_inquiry_cache_flush(hdev); do_inquiry = 1; } hci_dev_unlock(hdev); timeo = ir.length * msecs_to_jiffies(2000); if (do_inquiry) { err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, timeo, NULL); if (err < 0) goto done; /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is * cleared). If it is interrupted by a signal, return -EINTR. */ if (wait_on_bit(&hdev->flags, HCI_INQUIRY, TASK_INTERRUPTIBLE)) { err = -EINTR; goto done; } } /* for unlimited number of responses we will use buffer with * 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then * copy it to the user space. */ buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); if (!buf) { err = -ENOMEM; goto done; } hci_dev_lock(hdev); ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); hci_dev_unlock(hdev); BT_DBG("num_rsp %d", ir.num_rsp); if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; kfree(buf); done: hci_dev_put(hdev); return err; } static int hci_dev_do_open(struct hci_dev *hdev) { int ret = 0; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); ret = hci_dev_open_sync(hdev); hci_req_sync_unlock(hdev); return ret; } /* ---- HCI ioctl helpers ---- */ int hci_dev_open(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; /* Devices that are marked as unconfigured can only be powered * up as user channel. Trying to bring them up as normal devices * will result into a failure. Only user channel operation is * possible. * * When this function is called for a user channel, the flag * HCI_USER_CHANNEL will be set first before attempting to * open the device. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EOPNOTSUPP; goto done; } /* We need to ensure that no other power on/off work is pending * before proceeding to call hci_dev_do_open. This is * particularly important if the setup procedure has not yet * completed. */ if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); /* After this call it is guaranteed that the setup procedure * has finished. This means that error conditions like RFKILL * or no valid public or static random address apply. */ flush_workqueue(hdev->req_workqueue); /* For controllers not using the management interface and that * are brought up using legacy ioctl, set the HCI_BONDABLE bit * so that pairing works for them. Once the management interface * is in use this bit will be cleared again and userspace has * to explicitly enable it. */ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !hci_dev_test_flag(hdev, HCI_MGMT)) hci_dev_set_flag(hdev, HCI_BONDABLE); err = hci_dev_do_open(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_do_close(struct hci_dev *hdev) { int err; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); err = hci_dev_close_sync(hdev); hci_req_sync_unlock(hdev); return err; } int hci_dev_close(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } cancel_work_sync(&hdev->power_on); if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) cancel_delayed_work(&hdev->power_off); err = hci_dev_do_close(hdev); done: hci_dev_put(hdev); return err; } static int hci_dev_do_reset(struct hci_dev *hdev) { int ret; BT_DBG("%s %p", hdev->name, hdev); hci_req_sync_lock(hdev); /* Drop queues */ skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->cmd_q); /* Cancel these to avoid queueing non-chained pending work */ hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); /* Wait for * * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) * queue_delayed_work(&hdev->{cmd,ncmd}_timer) * * inside RCU section to see the flag or complete scheduling. */ synchronize_rcu(); /* Explicitly cancel works in case scheduled after setting the flag. */ cancel_delayed_work(&hdev->cmd_timer); cancel_delayed_work(&hdev->ncmd_timer); /* Avoid potential lockdep warnings from the *_flush() calls by * ensuring the workqueue is empty up front. */ drain_workqueue(hdev->workqueue); hci_dev_lock(hdev); hci_inquiry_cache_flush(hdev); hci_conn_hash_flush(hdev); hci_dev_unlock(hdev); if (hdev->flush) hdev->flush(hdev); hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); atomic_set(&hdev->cmd_cnt, 1); hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; hdev->iso_cnt = 0; ret = hci_reset_sync(hdev); hci_req_sync_unlock(hdev); return ret; } int hci_dev_reset(__u16 dev) { struct hci_dev *hdev; int err; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (!test_bit(HCI_UP, &hdev->flags)) { err = -ENETDOWN; goto done; } if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } err = hci_dev_do_reset(hdev); done: hci_dev_put(hdev); return err; } int hci_dev_reset_stat(__u16 dev) { struct hci_dev *hdev; int ret = 0; hdev = hci_dev_get(dev); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { ret = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { ret = -EOPNOTSUPP; goto done; } memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); done: hci_dev_put(hdev); return ret; } static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) { bool conn_changed, discov_changed; BT_DBG("%s scan 0x%02x", hdev->name, scan); if ((scan & SCAN_PAGE)) conn_changed = !hci_dev_test_and_set_flag(hdev, HCI_CONNECTABLE); else conn_changed = hci_dev_test_and_clear_flag(hdev, HCI_CONNECTABLE); if ((scan & SCAN_INQUIRY)) { discov_changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE); } else { hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); discov_changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE); } if (!hci_dev_test_flag(hdev, HCI_MGMT)) return; if (conn_changed || discov_changed) { /* In case this was disabled through mgmt */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) hci_update_adv_data(hdev, hdev->cur_adv_instance); mgmt_new_settings(hdev); } } int hci_dev_cmd(unsigned int cmd, void __user *arg) { struct hci_dev *hdev; struct hci_dev_req dr; int err = 0; if (copy_from_user(&dr, arg, sizeof(dr))) return -EFAULT; hdev = hci_dev_get(dr.dev_id); if (!hdev) return -ENODEV; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { err = -EBUSY; goto done; } if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { err = -EOPNOTSUPP; goto done; } if (hdev->dev_type != HCI_PRIMARY) { err = -EOPNOTSUPP; goto done; } if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = -EOPNOTSUPP; goto done; } switch (cmd) { case HCISETAUTH: err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETENCRYPT: if (!lmp_encrypt_capable(hdev)) { err = -EOPNOTSUPP; break; } if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); if (err) break; } err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETSCAN: err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. */ if (!err) hci_update_passive_scan_state(hdev, dr.dev_opt); break; case HCISETLINKPOL: err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, HCI_INIT_TIMEOUT, NULL); break; case HCISETLINKMODE: hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); break; case HCISETPTYPE: if (hdev->pkt_type == (__u16) dr.dev_opt) break; hdev->pkt_type = (__u16) dr.dev_opt; mgmt_phy_configuration_changed(hdev, NULL); break; case HCISETACLMTU: hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); break; case HCISETSCOMTU: hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); break; default: err = -EINVAL; break; } done: hci_dev_put(hdev); return err; } int hci_get_dev_list(void __user *arg) { struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; int n = 0, size, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) return -EFAULT; if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; size = sizeof(*dl) + dev_num * sizeof(*dr); dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; dr = dl->dev_req; read_lock(&hci_dev_list_lock); list_for_each_entry(hdev, &hci_dev_list, list) { unsigned long flags = hdev->flags; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); (dr + n)->dev_id = hdev->id; (dr + n)->dev_opt = flags; if (++n >= dev_num) break; } read_unlock(&hci_dev_list_lock); dl->dev_num = n; size = sizeof(*dl) + n * sizeof(*dr); err = copy_to_user(arg, dl, size); kfree(dl); return err ? -EFAULT : 0; } int hci_get_dev_info(void __user *arg) { struct hci_dev *hdev; struct hci_dev_info di; unsigned long flags; int err = 0; if (copy_from_user(&di, arg, sizeof(di))) return -EFAULT; hdev = hci_dev_get(di.dev_id); if (!hdev) return -ENODEV; /* When the auto-off is configured it means the transport * is running, but in that case still indicate that the * device is actually down. */ if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags = hdev->flags & ~BIT(HCI_UP); else flags = hdev->flags; strcpy(di.name, hdev->name); di.bdaddr = hdev->bdaddr; di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); di.flags = flags; di.pkt_type = hdev->pkt_type; if (lmp_bredr_capable(hdev)) { di.acl_mtu = hdev->acl_mtu; di.acl_pkts = hdev->acl_pkts; di.sco_mtu = hdev->sco_mtu; di.sco_pkts = hdev->sco_pkts; } else { di.acl_mtu = hdev->le_mtu; di.acl_pkts = hdev->le_pkts; di.sco_mtu = 0; di.sco_pkts = 0; } di.link_policy = hdev->link_policy; di.link_mode = hdev->link_mode; memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); memcpy(&di.features, &hdev->features, sizeof(di.features)); if (copy_to_user(arg, &di, sizeof(di))) err = -EFAULT; hci_dev_put(hdev); return err; } /* ---- Interface to HCI drivers ---- */ static int hci_rfkill_set_block(void *data, bool blocked) { struct hci_dev *hdev = data; BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return -EBUSY; if (blocked) { hci_dev_set_flag(hdev, HCI_RFKILLED); if (!hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) hci_dev_do_close(hdev); } else { hci_dev_clear_flag(hdev, HCI_RFKILLED); } return 0; } static const struct rfkill_ops hci_rfkill_ops = { .set_block = hci_rfkill_set_block, }; static void hci_power_on(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); int err; BT_DBG("%s", hdev->name); if (test_bit(HCI_UP, &hdev->flags) && hci_dev_test_flag(hdev, HCI_MGMT) && hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { cancel_delayed_work(&hdev->power_off); err = hci_powered_update_sync(hdev); mgmt_power_on(hdev, err); return; } err = hci_dev_do_open(hdev); if (err < 0) { hci_dev_lock(hdev); mgmt_set_powered_failed(hdev, err); hci_dev_unlock(hdev); return; } /* During the HCI setup phase, a few error conditions are * ignored and they need to be checked now. If they are still * valid, it is important to turn the device back off. */ if (hci_dev_test_flag(hdev, HCI_RFKILLED) || hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || (hdev->dev_type == HCI_PRIMARY && !bacmp(&hdev->bdaddr, BDADDR_ANY) && !bacmp(&hdev->static_addr, BDADDR_ANY))) { hci_dev_clear_flag(hdev, HCI_AUTO_OFF); hci_dev_do_close(hdev); } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_AUTO_OFF_TIMEOUT); } if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { /* For unconfigured devices, set the HCI_RAW flag * so that userspace can easily identify them. */ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) set_bit(HCI_RAW, &hdev->flags); /* For fully configured devices, this will send * the Index Added event. For unconfigured devices, * it will send Unconfigued Index Added event. * * Devices with HCI_QUIRK_RAW_DEVICE are ignored * and no event will be send. */ mgmt_index_added(hdev); } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { /* When the controller is now configured, then it * is important to clear the HCI_RAW flag. */ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) clear_bit(HCI_RAW, &hdev->flags); /* Powering on the controller with HCI_CONFIG set only * happens with the transition from unconfigured to * configured. This will send the Index Added event. */ mgmt_index_added(hdev); } } static void hci_power_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, power_off.work); BT_DBG("%s", hdev->name); hci_dev_do_close(hdev); } static void hci_error_reset(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); BT_DBG("%s", hdev->name); if (hdev->hw_error) hdev->hw_error(hdev, hdev->hw_error_code); else bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); if (hci_dev_do_close(hdev)) return; hci_dev_do_open(hdev); } void hci_uuids_clear(struct hci_dev *hdev) { struct bt_uuid *uuid, *tmp; list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { list_del(&uuid->list); kfree(uuid); } } void hci_link_keys_clear(struct hci_dev *hdev) { struct link_key *key, *tmp; list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { list_del_rcu(&key->list); kfree_rcu(key, rcu); } } void hci_smp_ltks_clear(struct hci_dev *hdev) { struct smp_ltk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_smp_irks_clear(struct hci_dev *hdev) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { list_del_rcu(&k->list); kfree_rcu(k, rcu); } } void hci_blocked_keys_clear(struct hci_dev *hdev) { struct blocked_key *b, *tmp; list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { list_del_rcu(&b->list); kfree_rcu(b, rcu); } } bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) { bool blocked = false; struct blocked_key *b; rcu_read_lock(); list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { blocked = true; break; } } rcu_read_unlock(); return blocked; } struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->link_keys, list) { if (bacmp(bdaddr, &k->bdaddr) == 0) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LINKKEY, k->val)) { bt_dev_warn_ratelimited(hdev, "Link key blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) return true; /* Debug keys are insecure so don't store them persistently */ if (key_type == HCI_LK_DEBUG_COMBINATION) return false; /* Changed combination key and there's no previous one */ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) return false; /* Security mode 3 case */ if (!conn) return true; /* BR/EDR key derived using SC from an LE link */ if (conn->type == LE_LINK) return true; /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) return true; /* Local side had dedicated bonding as requirement */ if (conn->auth_type == 0x02 || conn->auth_type == 0x03) return true; /* Remote side had dedicated bonding as requirement */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) return true; /* If none of the above criteria match, then don't store the key * persistently */ return false; } static u8 ltk_role(u8 type) { if (type == SMP_LTK) return HCI_ROLE_MASTER; return HCI_ROLE_SLAVE; } struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 role) { struct smp_ltk *k; rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) continue; if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { rcu_read_unlock(); if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, k->val)) { bt_dev_warn_ratelimited(hdev, "LTK blocked for %pMR", &k->bdaddr); return NULL; } return k; } } rcu_read_unlock(); return NULL; } struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (!bacmp(&irk->rpa, rpa)) { irk_to_return = irk; goto done; } } list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (smp_irk_matches(hdev, irk->val, rpa)) { bacpy(&irk->rpa, rpa); irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *irk_to_return = NULL; struct smp_irk *irk; /* Identity Address must be public or static random */ if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) return NULL; rcu_read_lock(); list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { if (addr_type == irk->addr_type && bacmp(bdaddr, &irk->bdaddr) == 0) { irk_to_return = irk; goto done; } } done: if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk_to_return->val)) { bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", &irk_to_return->bdaddr); irk_to_return = NULL; } rcu_read_unlock(); return irk_to_return; } struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len, bool *persistent) { struct link_key *key, *old_key; u8 old_key_type; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { old_key_type = old_key->type; key = old_key; } else { old_key_type = conn ? conn->key_type : 0xff; key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->link_keys); } BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); /* Some buggy controller combinations generate a changed * combination key for legacy pairing even when there's no * previous key */ if (type == HCI_LK_CHANGED_COMBINATION && (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { type = HCI_LK_COMBINATION; if (conn) conn->key_type = type; } bacpy(&key->bdaddr, bdaddr); memcpy(key->val, val, HCI_LINK_KEY_SIZE); key->pin_len = pin_len; if (type == HCI_LK_CHANGED_COMBINATION) key->type = old_key_type; else key->type = type; if (persistent) *persistent = hci_persistent_key(hdev, conn, type, old_key_type); return key; } struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type, u8 authenticated, u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) { struct smp_ltk *key, *old_key; u8 role = ltk_role(type); old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); if (old_key) key = old_key; else { key = kzalloc(sizeof(*key), GFP_KERNEL); if (!key) return NULL; list_add_rcu(&key->list, &hdev->long_term_keys); } bacpy(&key->bdaddr, bdaddr); key->bdaddr_type = addr_type; memcpy(key->val, tk, sizeof(key->val)); key->authenticated = authenticated; key->ediv = ediv; key->rand = rand; key->enc_size = enc_size; key->type = type; return key; } struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 val[16], bdaddr_t *rpa) { struct smp_irk *irk; irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); if (!irk) { irk = kzalloc(sizeof(*irk), GFP_KERNEL); if (!irk) return NULL; bacpy(&irk->bdaddr, bdaddr); irk->addr_type = addr_type; list_add_rcu(&irk->list, &hdev->identity_resolving_keys); } memcpy(irk->val, val, 16); bacpy(&irk->rpa, rpa); return irk; } int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct link_key *key; key = hci_find_link_key(hdev, bdaddr); if (!key) return -ENOENT; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&key->list); kfree_rcu(key, rcu); return 0; } int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct smp_ltk *k, *tmp; int removed = 0; list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); removed++; } return removed ? 0 : -ENOENT; } void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) { struct smp_irk *k, *tmp; list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) continue; BT_DBG("%s removing %pMR", hdev->name, bdaddr); list_del_rcu(&k->list); kfree_rcu(k, rcu); } } bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { struct smp_ltk *k; struct smp_irk *irk; u8 addr_type; if (type == BDADDR_BREDR) { if (hci_find_link_key(hdev, bdaddr)) return true; return false; } /* Convert to HCI addr type which struct smp_ltk uses */ if (type == BDADDR_LE_PUBLIC) addr_type = ADDR_LE_DEV_PUBLIC; else addr_type = ADDR_LE_DEV_RANDOM; irk = hci_get_irk(hdev, bdaddr, addr_type); if (irk) { bdaddr = &irk->bdaddr; addr_type = irk->addr_type; } rcu_read_lock(); list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; } /* HCI command timer function */ static void hci_cmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_timer.work); if (hdev->sent_cmd) { struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; u16 opcode = __le16_to_cpu(sent->opcode); bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); } else { bt_dev_err(hdev, "command tx timeout"); } if (hdev->cmd_timeout) hdev->cmd_timeout(hdev); atomic_set(&hdev->cmd_cnt, 1); queue_work(hdev->workqueue, &hdev->cmd_work); } /* HCI ncmd timer function */ static void hci_ncmd_timeout(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, ncmd_timer.work); bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); /* During HCI_INIT phase no events can be injected if the ncmd timer * triggers since the procedure has its own timeout handling. */ if (test_bit(HCI_INIT, &hdev->flags)) return; /* This is an irrecoverable state, inject hardware error event */ hci_reset_dev(hdev); } struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; list_for_each_entry(data, &hdev->remote_oob_data, list) { if (bacmp(bdaddr, &data->bdaddr) != 0) continue; if (data->bdaddr_type != bdaddr_type) continue; return data; } return NULL; } int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) return -ENOENT; BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); list_del(&data->list); kfree(data); return 0; } void hci_remote_oob_data_clear(struct hci_dev *hdev) { struct oob_data *data, *n; list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { list_del(&data->list); kfree(data); } } int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 *hash192, u8 *rand192, u8 *hash256, u8 *rand256) { struct oob_data *data; data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); if (!data) { data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bacpy(&data->bdaddr, bdaddr); data->bdaddr_type = bdaddr_type; list_add(&data->list, &hdev->remote_oob_data); } if (hash192 && rand192) { memcpy(data->hash192, hash192, sizeof(data->hash192)); memcpy(data->rand192, rand192, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x03; } else { memset(data->hash192, 0, sizeof(data->hash192)); memset(data->rand192, 0, sizeof(data->rand192)); if (hash256 && rand256) data->present = 0x02; else data->present = 0x00; } if (hash256 && rand256) { memcpy(data->hash256, hash256, sizeof(data->hash256)); memcpy(data->rand256, rand256, sizeof(data->rand256)); } else { memset(data->hash256, 0, sizeof(data->hash256)); memset(data->rand256, 0, sizeof(data->rand256)); if (hash192 && rand192) data->present = 0x01; } BT_DBG("%s for %pMR", hdev->name, bdaddr); return 0; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { if (adv_instance->instance == instance) return adv_instance; } return NULL; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *cur_instance; cur_instance = hci_find_adv_instance(hdev, instance); if (!cur_instance) return NULL; if (cur_instance == list_last_entry(&hdev->adv_instances, struct adv_info, list)) return list_first_entry(&hdev->adv_instances, struct adv_info, list); else return list_next_entry(cur_instance, list); } /* This function requires the caller holds hdev->lock */ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) { struct adv_info *adv_instance; adv_instance = hci_find_adv_instance(hdev, instance); if (!adv_instance) return -ENOENT; BT_DBG("%s removing %dMR", hdev->name, instance); if (hdev->cur_adv_instance == instance) { if (hdev->adv_instance_timeout) { cancel_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } hdev->cur_adv_instance = 0x00; } cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); hdev->adv_instance_cnt--; return 0; } void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) { struct adv_info *adv_instance, *n; list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) adv_instance->rpa_expired = rpa_expired; } /* This function requires the caller holds hdev->lock */ void hci_adv_instances_clear(struct hci_dev *hdev) { struct adv_info *adv_instance, *n; if (hdev->adv_instance_timeout) { cancel_delayed_work(&hdev->adv_instance_expire); hdev->adv_instance_timeout = 0; } list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); list_del(&adv_instance->list); kfree(adv_instance); } hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; } static void adv_instance_rpa_expired(struct work_struct *work) { struct adv_info *adv_instance = container_of(work, struct adv_info, rpa_expired_cb.work); BT_DBG(""); adv_instance->rpa_expired = true; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data, u16 timeout, u16 duration, s8 tx_power, u32 min_interval, u32 max_interval, u8 mesh_handle) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); if (adv) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); } else { if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || instance < 1 || instance > hdev->le_num_of_adv_sets + 1) return ERR_PTR(-EOVERFLOW); adv = kzalloc(sizeof(*adv), GFP_KERNEL); if (!adv) return ERR_PTR(-ENOMEM); adv->pending = true; adv->instance = instance; list_add(&adv->list, &hdev->adv_instances); hdev->adv_instance_cnt++; } adv->flags = flags; adv->min_interval = min_interval; adv->max_interval = max_interval; adv->tx_power = tx_power; /* Defining a mesh_handle changes the timing units to ms, * rather than seconds, and ties the instance to the requested * mesh_tx queue. */ adv->mesh = mesh_handle; hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, scan_rsp_len, scan_rsp_data); adv->timeout = timeout; adv->remaining_time = timeout; if (duration == 0) adv->duration = hdev->def_multi_adv_rotation_duration; else adv->duration = duration; INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); BT_DBG("%s for %dMR", hdev->name, instance); return adv; } /* This function requires the caller holds hdev->lock */ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, u32 flags, u8 data_len, u8 *data, u32 min_interval, u32 max_interval) { struct adv_info *adv; adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, min_interval, max_interval, 0); if (IS_ERR(adv)) return adv; adv->periodic = true; adv->per_adv_data_len = data_len; if (data) memcpy(adv->per_adv_data, data, data_len); return adv; } /* This function requires the caller holds hdev->lock */ int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data) { struct adv_info *adv; adv = hci_find_adv_instance(hdev, instance); /* If advertisement doesn't exist, we can't modify its data */ if (!adv) return -ENOENT; if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { memset(adv->adv_data, 0, sizeof(adv->adv_data)); memcpy(adv->adv_data, adv_data, adv_data_len); adv->adv_data_len = adv_data_len; adv->adv_data_changed = true; } if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); adv->scan_rsp_len = scan_rsp_len; adv->scan_rsp_changed = true; } /* Mark as changed if there are flags which would affect it */ if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) adv->scan_rsp_changed = true; return 0; } /* This function requires the caller holds hdev->lock */ u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) { u32 flags; struct adv_info *adv; if (instance == 0x00) { /* Instance 0 always manages the "Tx Power" and "Flags" * fields */ flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting * corresponds to the "connectable" instance flag. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) flags |= MGMT_ADV_FLAG_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) flags |= MGMT_ADV_FLAG_DISCOV; return flags; } adv = hci_find_adv_instance(hdev, instance); /* Return 0 when we got an invalid instance identifier. */ if (!adv) return 0; return adv->flags; } bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) { struct adv_info *adv; /* Instance 0x00 always set local name */ if (instance == 0x00) return true; adv = hci_find_adv_instance(hdev, instance); if (!adv) return false; if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) return true; return adv->scan_rsp_len ? true : false; } /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { struct adv_monitor *monitor; int handle; idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) hci_free_adv_monitor(hdev, monitor); idr_destroy(&hdev->adv_monitors_idr); } /* Frees the monitor structure and do some bookkeepings. * This function requires the caller holds hdev->lock. */ void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { struct adv_pattern *pattern; struct adv_pattern *tmp; if (!monitor) return; list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { list_del(&pattern->list); kfree(pattern); } if (monitor->handle) idr_remove(&hdev->adv_monitors_idr, monitor->handle); if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { hdev->adv_monitors_cnt--; mgmt_adv_monitor_removed(hdev, monitor->handle); } kfree(monitor); } /* Assigns handle to a monitor, and if offloading is supported and power is on, * also attempts to forward the request to the controller. * This function requires the caller holds hci_req_sync_lock. */ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int min, max, handle; int status = 0; if (!monitor) return -EINVAL; hci_dev_lock(hdev); min = HCI_MIN_ADV_MONITOR_HANDLE; max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, GFP_KERNEL); hci_dev_unlock(hdev); if (handle < 0) return handle; monitor->handle = handle; if (!hdev_is_powered(hdev)) return status; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: bt_dev_dbg(hdev, "add monitor %d status %d", monitor->handle, status); /* Message was not forwarded to controller - not an error */ break; case HCI_ADV_MONITOR_EXT_MSFT: status = msft_add_monitor_pattern(hdev, monitor); bt_dev_dbg(hdev, "add monitor %d msft status %d", handle, status); break; } return status; } /* Attempts to tell the controller and free the monitor. If somehow the * controller doesn't have a corresponding handle, remove anyway. * This function requires the caller holds hci_req_sync_lock. */ static int hci_remove_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) { int status = 0; int handle; switch (hci_get_adv_monitor_offload_ext(hdev)) { case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ bt_dev_dbg(hdev, "remove monitor %d status %d", monitor->handle, status); goto free_monitor; case HCI_ADV_MONITOR_EXT_MSFT: handle = monitor->handle; status = msft_remove_monitor(hdev, monitor); bt_dev_dbg(hdev, "remove monitor %d msft status %d", handle, status); break; } /* In case no matching handle registered, just free the monitor */ if (status == -ENOENT) goto free_monitor; return status; free_monitor: if (status == -ENOENT) bt_dev_warn(hdev, "Removing monitor with no matching handle %d", monitor->handle); hci_free_adv_monitor(hdev, monitor); return status; } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) { struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); if (!monitor) return -EINVAL; return hci_remove_adv_monitor(hdev, monitor); } /* This function requires the caller holds hci_req_sync_lock */ int hci_remove_all_adv_monitor(struct hci_dev *hdev) { struct adv_monitor *monitor; int idr_next_id = 0; int status = 0; while (1) { monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); if (!monitor) break; status = hci_remove_adv_monitor(hdev, monitor); if (status) return status; idr_next_id++; } return status; } /* This function requires the caller holds hdev->lock */ bool hci_is_adv_monitoring(struct hci_dev *hdev) { return !idr_is_empty(&hdev->adv_monitors_idr); } int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) { if (msft_monitor_supported(hdev)) return HCI_ADV_MONITOR_EXT_MSFT; return HCI_ADV_MONITOR_EXT_NONE; } struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } struct bdaddr_list_with_flags * hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_flags *b; list_for_each_entry(b, bdaddr_list, list) { if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) return b; } return NULL; } void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; list_for_each_entry_safe(b, n, bdaddr_list, list) { list_del(&b->list); kfree(b); } } int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type, u8 *peer_irk, u8 *local_irk) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; if (peer_irk) memcpy(entry->peer_irk, peer_irk, 16); if (local_irk) memcpy(entry->local_irk, local_irk, 16); list_add(&entry->list, list); return 0; } int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type, u32 flags) { struct bdaddr_list_with_flags *entry; if (!bacmp(bdaddr, BDADDR_ANY)) return -EBADF; if (hci_bdaddr_list_lookup(list, bdaddr, type)) return -EEXIST; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; bacpy(&entry->bdaddr, bdaddr); entry->bdaddr_type = type; entry->flags = flags; list_add(&entry->list, list); return 0; } int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_irk *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list_with_flags *entry; if (!bacmp(bdaddr, BDADDR_ANY)) { hci_bdaddr_list_clear(list); return 0; } entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); if (!entry) return -ENOENT; list_del(&entry->list); kfree(entry); return 0; } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; list_for_each_entry(params, &hdev->le_conn_params, list) { if (bacmp(¶ms->addr, addr) == 0 && params->addr_type == addr_type) { return params; } } return NULL; } /* This function requires the caller holds hdev->lock or rcu_read_lock */ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *param; rcu_read_lock(); list_for_each_entry_rcu(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && param->addr_type == addr_type) { rcu_read_unlock(); return param; } } rcu_read_unlock(); return NULL; } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_del_init(struct hci_conn_params *param) { if (list_empty(¶m->action)) return; list_del_rcu(¶m->action); synchronize_rcu(); INIT_LIST_HEAD(¶m->action); } /* This function requires the caller holds hdev->lock */ void hci_pend_le_list_add(struct hci_conn_params *param, struct list_head *list) { list_add_rcu(¶m->action, list); } /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (params) return params; params = kzalloc(sizeof(*params), GFP_KERNEL); if (!params) { bt_dev_err(hdev, "out of memory"); return NULL; } bacpy(¶ms->addr, addr); params->addr_type = addr_type; list_add(¶ms->list, &hdev->le_conn_params); INIT_LIST_HEAD(¶ms->action); params->conn_min_interval = hdev->le_conn_min_interval; params->conn_max_interval = hdev->le_conn_max_interval; params->conn_latency = hdev->le_conn_latency; params->supervision_timeout = hdev->le_supv_timeout; params->auto_connect = HCI_AUTO_CONN_DISABLED; BT_DBG("addr %pMR (type %u)", addr, addr_type); return params; } void hci_conn_params_free(struct hci_conn_params *params) { hci_pend_le_list_del_init(params); if (params->conn) { hci_conn_drop(params->conn); hci_conn_put(params->conn); } list_del(¶ms->list); kfree(params); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) { struct hci_conn_params *params; params = hci_conn_params_lookup(hdev, addr, addr_type); if (!params) return; hci_conn_params_free(params); hci_update_passive_scan(hdev); BT_DBG("addr %pMR (type %u)", addr, addr_type); } /* This function requires the caller holds hdev->lock */ void hci_conn_params_clear_disabled(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { if (params->auto_connect != HCI_AUTO_CONN_DISABLED) continue; /* If trying to establish one time connection to disabled * device, leave the params, but mark them as just once. */ if (params->explicit_connect) { params->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } hci_conn_params_free(params); } BT_DBG("All LE disabled connection parameters were removed"); } /* This function requires the caller holds hdev->lock */ static void hci_conn_params_clear_all(struct hci_dev *hdev) { struct hci_conn_params *params, *tmp; list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) hci_conn_params_free(params); BT_DBG("All LE connection parameters were removed"); } /* Copy the Identity Address of the controller. * * If the controller has a public BD_ADDR, then by default use that one. * If this is a LE only controller without a public address, default to * the static random address. * * For debugging purposes it is possible to force controllers with a * public address to use the static random address instead. * * In case BR/EDR has been disabled on a dual-mode controller and * userspace has configured a static address, then that address * becomes the identity address instead of the public BR/EDR address. */ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *bdaddr_type) { if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { bacpy(bdaddr, &hdev->static_addr); *bdaddr_type = ADDR_LE_DEV_RANDOM; } else { bacpy(bdaddr, &hdev->bdaddr); *bdaddr_type = ADDR_LE_DEV_PUBLIC; } } static void hci_clear_wake_reason(struct hci_dev *hdev) { hci_dev_lock(hdev); hdev->wake_reason = 0; bacpy(&hdev->wake_addr, BDADDR_ANY); hdev->wake_addr_type = 0; hci_dev_unlock(hdev); } static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct hci_dev *hdev = container_of(nb, struct hci_dev, suspend_notifier); int ret = 0; /* Userspace has full control of this device. Do nothing. */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) return NOTIFY_DONE; /* To avoid a potential race with hci_unregister_dev. */ hci_dev_hold(hdev); if (action == PM_SUSPEND_PREPARE) ret = hci_suspend_dev(hdev); else if (action == PM_POST_SUSPEND) ret = hci_resume_dev(hdev); if (ret) bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); hci_dev_put(hdev); return NOTIFY_DONE; } /* Alloc HCI device */ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) { struct hci_dev *hdev; unsigned int alloc_size; alloc_size = sizeof(*hdev); if (sizeof_priv) { /* Fixme: May need ALIGN-ment? */ alloc_size += sizeof_priv; } hdev = kzalloc(alloc_size, GFP_KERNEL); if (!hdev) return NULL; hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); hdev->esco_type = (ESCO_HV1); hdev->link_mode = (HCI_LM_ACCEPT); hdev->num_iac = 0x01; /* One IAC support is mandatory */ hdev->io_capability = 0x03; /* No Input No Output */ hdev->manufacturer = 0xffff; /* Default to internal use */ hdev->inq_tx_power = HCI_TX_POWER_INVALID; hdev->adv_tx_power = HCI_TX_POWER_INVALID; hdev->adv_instance_cnt = 0; hdev->cur_adv_instance = 0x00; hdev->adv_instance_timeout = 0; hdev->advmon_allowlist_duration = 300; hdev->advmon_no_filter_duration = 500; hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ hdev->sniff_max_interval = 800; hdev->sniff_min_interval = 80; hdev->le_adv_channel_map = 0x07; hdev->le_adv_min_interval = 0x0800; hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; hdev->le_scan_int_suspend = 0x0400; hdev->le_scan_window_suspend = 0x0012; hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; hdev->le_scan_int_adv_monitor = 0x0060; hdev->le_scan_window_adv_monitor = 0x0030; hdev->le_scan_int_connect = 0x0060; hdev->le_scan_window_connect = 0x0060; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; hdev->le_supv_timeout = 0x002a; hdev->le_def_tx_len = 0x001b; hdev->le_def_tx_time = 0x0148; hdev->le_max_tx_len = 0x001b; hdev->le_max_tx_time = 0x0148; hdev->le_max_rx_len = 0x001b; hdev->le_max_rx_time = 0x0148; hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; hdev->min_le_tx_power = HCI_TX_POWER_INVALID; hdev->max_le_tx_power = HCI_TX_POWER_INVALID; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; /* default 1.28 sec page scan */ hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; hdev->def_page_scan_int = 0x0800; hdev->def_page_scan_window = 0x0012; mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); ida_init(&hdev->unset_handle_ida); INIT_LIST_HEAD(&hdev->mesh_pending); INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->reject_list); INIT_LIST_HEAD(&hdev->accept_list); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); INIT_LIST_HEAD(&hdev->identity_resolving_keys); INIT_LIST_HEAD(&hdev->remote_oob_data); INIT_LIST_HEAD(&hdev->le_accept_list); INIT_LIST_HEAD(&hdev->le_resolv_list); INIT_LIST_HEAD(&hdev->le_conn_params); INIT_LIST_HEAD(&hdev->pend_le_conns); INIT_LIST_HEAD(&hdev->pend_le_reports); INIT_LIST_HEAD(&hdev->conn_hash.list); INIT_LIST_HEAD(&hdev->adv_instances); INIT_LIST_HEAD(&hdev->blocked_keys); INIT_LIST_HEAD(&hdev->monitored_devices); INIT_LIST_HEAD(&hdev->local_codecs); INIT_WORK(&hdev->rx_work, hci_rx_work); INIT_WORK(&hdev->cmd_work, hci_cmd_work); INIT_WORK(&hdev->tx_work, hci_tx_work); INIT_WORK(&hdev->power_on, hci_power_on); INIT_WORK(&hdev->error_reset, hci_error_reset); hci_cmd_sync_init(hdev); INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); skb_queue_head_init(&hdev->rx_q); skb_queue_head_init(&hdev->cmd_q); skb_queue_head_init(&hdev->raw_q); init_waitqueue_head(&hdev->req_wait_q); INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); hci_devcd_setup(hdev); hci_request_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); return hdev; } EXPORT_SYMBOL(hci_alloc_dev_priv); /* Free HCI device */ void hci_free_dev(struct hci_dev *hdev) { /* will free via device release */ put_device(&hdev->dev); } EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { int id, error; if (!hdev->open || !hdev->close || !hdev->send) return -EINVAL; /* Do not allow HCI_AMP devices to register at index 0, * so the index can be used as the AMP controller ID. */ switch (hdev->dev_type) { case HCI_PRIMARY: id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); break; case HCI_AMP: id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); break; default: return -EINVAL; } if (id < 0) return id; error = dev_set_name(&hdev->dev, "hci%u", id); if (error) return error; hdev->name = dev_name(&hdev->dev); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->workqueue) { error = -ENOMEM; goto err; } hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); if (!hdev->req_workqueue) { destroy_workqueue(hdev->workqueue); error = -ENOMEM; goto err; } if (!IS_ERR_OR_NULL(bt_debugfs)) hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); error = device_add(&hdev->dev); if (error < 0) goto err_wqueue; hci_leds_init(hdev); hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); if (hdev->rfkill) { if (rfkill_register(hdev->rfkill) < 0) { rfkill_destroy(hdev->rfkill); hdev->rfkill = NULL; } } if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) hci_dev_set_flag(hdev, HCI_RFKILLED); hci_dev_set_flag(hdev, HCI_SETUP); hci_dev_set_flag(hdev, HCI_AUTO_OFF); if (hdev->dev_type == HCI_PRIMARY) { /* Assume BR/EDR support until proven otherwise (such as * through reading supported features during init. */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); } write_lock(&hci_dev_list_lock); list_add(&hdev->list, &hci_dev_list); write_unlock(&hci_dev_list_lock); /* Devices that are marked for raw-only usage are unconfigured * and should not be included in normal operation. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) hci_dev_set_flag(hdev, HCI_UNCONFIGURED); /* Mark Remote Wakeup connection flag as supported if driver has wakeup * callback. */ if (hdev->wakeup) hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; hci_sock_dev_event(hdev, HCI_DEV_REG); hci_dev_hold(hdev); error = hci_register_suspend_notifier(hdev); if (error) BT_WARN("register suspend notifier failed error:%d\n", error); queue_work(hdev->req_workqueue, &hdev->power_on); idr_init(&hdev->adv_monitors_idr); msft_register(hdev); return id; err_wqueue: debugfs_remove_recursive(hdev->debugfs); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); err: ida_simple_remove(&hci_index_ida, hdev->id); return error; } EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); mutex_lock(&hdev->unregister_lock); hci_dev_set_flag(hdev, HCI_UNREGISTER); mutex_unlock(&hdev->unregister_lock); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); cancel_work_sync(&hdev->power_on); hci_cmd_sync_clear(hdev); hci_unregister_suspend_notifier(hdev); msft_unregister(hdev); hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && !hci_dev_test_flag(hdev, HCI_CONFIG)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); } /* mgmt_index_removed should take care of emptying the * pending list */ BUG_ON(!list_empty(&hdev->mgmt_pending)); hci_sock_dev_event(hdev, HCI_DEV_UNREG); if (hdev->rfkill) { rfkill_unregister(hdev->rfkill); rfkill_destroy(hdev->rfkill); } device_del(&hdev->dev); /* Actual cleanup is deferred until hci_release_dev(). */ hci_dev_put(hdev); } EXPORT_SYMBOL(hci_unregister_dev); /* Release HCI device */ void hci_release_dev(struct hci_dev *hdev) { debugfs_remove_recursive(hdev->debugfs); kfree_const(hdev->hw_info); kfree_const(hdev->fw_info); destroy_workqueue(hdev->workqueue); destroy_workqueue(hdev->req_workqueue); hci_dev_lock(hdev); hci_bdaddr_list_clear(&hdev->reject_list); hci_bdaddr_list_clear(&hdev->accept_list); hci_uuids_clear(hdev); hci_link_keys_clear(hdev); hci_smp_ltks_clear(hdev); hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_accept_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); hci_discovery_filter_clear(hdev); hci_blocked_keys_clear(hdev); hci_codec_list_clear(&hdev->local_codecs); hci_dev_unlock(hdev); ida_destroy(&hdev->unset_handle_ida); ida_simple_remove(&hci_index_ida, hdev->id); kfree_skb(hdev->sent_cmd); kfree_skb(hdev->recv_event); kfree(hdev); } EXPORT_SYMBOL(hci_release_dev); int hci_register_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (!hdev->suspend_notifier.notifier_call && !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { hdev->suspend_notifier.notifier_call = hci_suspend_notifier; ret = register_pm_notifier(&hdev->suspend_notifier); } return ret; } int hci_unregister_suspend_notifier(struct hci_dev *hdev) { int ret = 0; if (hdev->suspend_notifier.notifier_call) { ret = unregister_pm_notifier(&hdev->suspend_notifier); if (!ret) hdev->suspend_notifier.notifier_call = NULL; } return ret; } /* Suspend HCI device */ int hci_suspend_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Suspend should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to suspend */ if (mgmt_powering_down(hdev)) return 0; /* Cancel potentially blocking sync operation before suspend */ __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); hci_req_sync_lock(hdev); ret = hci_suspend_sync(hdev); hci_req_sync_unlock(hdev); hci_clear_wake_reason(hdev); mgmt_suspending(hdev, hdev->suspend_state); hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); return ret; } EXPORT_SYMBOL(hci_suspend_dev); /* Resume HCI device */ int hci_resume_dev(struct hci_dev *hdev) { int ret; bt_dev_dbg(hdev, ""); /* Resume should only act on when powered. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; /* If powering down don't attempt to resume */ if (mgmt_powering_down(hdev)) return 0; hci_req_sync_lock(hdev); ret = hci_resume_sync(hdev); hci_req_sync_unlock(hdev); mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, hdev->wake_addr_type); hci_sock_dev_event(hdev, HCI_DEV_RESUME); return ret; } EXPORT_SYMBOL(hci_resume_dev); /* Reset HCI device */ int hci_reset_dev(struct hci_dev *hdev) { static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; struct sk_buff *skb; skb = bt_skb_alloc(3, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, hw_err, 3); bt_dev_err(hdev, "Injecting HCI hardware error event"); /* Send Hardware Error to upper stack */ return hci_recv_frame(hdev, skb); } EXPORT_SYMBOL(hci_reset_dev); /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: break; case HCI_ACLDATA_PKT: /* Detect if ISO packet has been sent as ACL */ if (hci_conn_num(hdev, ISO_LINK)) { __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); __u8 type; type = hci_conn_lookup_type(hdev, hci_handle(handle)); if (type == ISO_LINK) hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; } break; case HCI_SCODATA_PKT: break; case HCI_ISODATA_PKT: break; default: kfree_skb(skb); return -EINVAL; } /* Incoming skb */ bt_cb(skb)->incoming = 1; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_frame); /* Receive diagnostic message from HCI drivers */ int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) { /* Mark as diagnostic packet */ hci_skb_pkt_type(skb) = HCI_DIAG_PKT; /* Time stamp */ __net_timestamp(skb); skb_queue_tail(&hdev->rx_q, skb); queue_work(hdev->workqueue, &hdev->rx_work); return 0; } EXPORT_SYMBOL(hci_recv_diag); void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->hw_info); hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_hw_info); void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) { va_list vargs; va_start(vargs, fmt); kfree_const(hdev->fw_info); hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); va_end(vargs); } EXPORT_SYMBOL(hci_set_fw_info); /* ---- Interface to upper protocols ---- */ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); mutex_lock(&hci_cb_list_lock); list_add_tail(&cb->list, &hci_cb_list); mutex_unlock(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_register_cb); int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); mutex_lock(&hci_cb_list_lock); list_del(&cb->list); mutex_unlock(&hci_cb_list_lock); return 0; } EXPORT_SYMBOL(hci_unregister_cb); static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { int err; BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); /* Time stamp */ __net_timestamp(skb); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* Get rid of skb owner, prior to sending to the driver. */ skb_orphan(skb); if (!test_bit(HCI_RUNNING, &hdev->flags)) { kfree_skb(skb); return -EINVAL; } err = hdev->send(hdev, skb); if (err < 0) { bt_dev_err(hdev, "sending frame failed (%d)", err); kfree_skb(skb); return err; } return 0; } /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param) { struct sk_buff *skb; BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; } /* Stand-alone HCI commands must be flagged as * single-command requests. */ bt_cb(skb)->hci.req_flags |= HCI_REQ_START; skb_queue_tail(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); return 0; } int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param) { struct sk_buff *skb; if (hci_opcode_ogf(opcode) != 0x3f) { /* A controller receiving a command shall respond with either * a Command Status Event or a Command Complete Event. * Therefore, all standard HCI commands must be sent via the * standard API, using hci_send_cmd or hci_cmd_sync helpers. * Some vendors do not comply with this rule for vendor-specific * commands and do not return any event. We want to support * unresponded commands for such cases only. */ bt_dev_err(hdev, "unresponded command not supported"); return -EINVAL; } skb = hci_prepare_cmd(hdev, opcode, plen, param); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); return -ENOMEM; } hci_send_frame(hdev, skb); return 0; } EXPORT_SYMBOL(__hci_cmd_send); /* Get data from the previously sent command */ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) { struct hci_command_hdr *hdr; if (!hdev->sent_cmd) return NULL; hdr = (void *) hdev->sent_cmd->data; if (hdr->opcode != cpu_to_le16(opcode)) return NULL; BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; } /* Get data from last received event */ void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) { struct hci_event_hdr *hdr; int offset; if (!hdev->recv_event) return NULL; hdr = (void *)hdev->recv_event->data; offset = sizeof(*hdr); if (hdr->evt != event) { /* In case of LE metaevent check the subevent match */ if (hdr->evt == HCI_EV_LE_META) { struct hci_ev_le_meta *ev; ev = (void *)hdev->recv_event->data + offset; offset += sizeof(*ev); if (ev->subevent == event) goto found; } return NULL; } found: bt_dev_dbg(hdev, "event 0x%2.2x", event); return hdev->recv_event->data + offset; } /* Send ACL data */ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) { struct hci_acl_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ACL_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_acl_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, struct sk_buff *skb, __u16 flags) { struct hci_conn *conn = chan->conn; struct hci_dev *hdev = conn->hdev; struct sk_buff *list; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; switch (hdev->dev_type) { case HCI_PRIMARY: hci_add_acl_hdr(skb, conn->handle, flags); break; case HCI_AMP: hci_add_acl_hdr(skb, chan->handle, flags); break; default: bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); return; } list = skb_shinfo(skb)->frag_list; if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; /* Queue all fragments atomically. We need to use spin_lock_bh * here because of 6LoWPAN links, as there this function is * called from softirq and using normal spin lock could cause * deadlocks. */ spin_lock_bh(&queue->lock); __skb_queue_tail(queue, skb); flags &= ~ACL_START; flags |= ACL_CONT; do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_add_acl_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); spin_unlock_bh(&queue->lock); } } void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = chan->conn->hdev; BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); hci_queue_acl(chan, &chan->data_q, skb, flags); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send SCO data */ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct hci_sco_hdr hdr; BT_DBG("%s len %d", hdev->name, skb->len); hdr.handle = cpu_to_le16(conn->handle); hdr.dlen = skb->len; skb_push(skb, HCI_SCO_HDR_SIZE); skb_reset_transport_header(skb); memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; skb_queue_tail(&conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* Send ISO data */ static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) { struct hci_iso_hdr *hdr; int len = skb->len; skb_push(skb, HCI_ISO_HDR_SIZE); skb_reset_transport_header(skb); hdr = (struct hci_iso_hdr *)skb_transport_header(skb); hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); hdr->dlen = cpu_to_le16(len); } static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; __u16 flags; skb->len = skb_headlen(skb); skb->data_len = 0; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; list = skb_shinfo(skb)->frag_list; flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); if (!list) { /* Non fragmented */ BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); skb_queue_tail(queue, skb); } else { /* Fragmented */ BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); skb_shinfo(skb)->frag_list = NULL; __skb_queue_tail(queue, skb); do { skb = list; list = list->next; hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, 0x00); hci_add_iso_hdr(skb, conn->handle, flags); BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); __skb_queue_tail(queue, skb); } while (list); } } void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) { struct hci_dev *hdev = conn->hdev; BT_DBG("%s len %d", hdev->name, skb->len); hci_queue_iso(conn, &conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) { struct hci_dev *hdev; int cnt, q; if (!conn) { *quote = 0; return; } hdev = conn->hdev; switch (conn->type) { case ACL_LINK: cnt = hdev->acl_cnt; break; case AMP_LINK: cnt = hdev->block_cnt; break; case SCO_LINK: case ESCO_LINK: cnt = hdev->sco_cnt; break; case LE_LINK: cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; case ISO_LINK: cnt = hdev->iso_mtu ? hdev->iso_cnt : hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; break; default: cnt = 0; bt_dev_err(hdev, "unknown link type %d", conn->type); } q = cnt / num; *quote = q ? q : 1; } static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; unsigned int num = 0, min = ~0; /* We don't have to lock device here. Connections are always * added and removed with TX task disabled. */ rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { if (c->type != type || skb_queue_empty(&c->data_q)) continue; if (c->state != BT_CONNECTED && c->state != BT_CONFIG) continue; num++; if (c->sent < min) { min = c->sent; conn = c; } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); hci_quote_sent(conn, num, quote); BT_DBG("conn %p quote %d", conn, *quote); return conn; } static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; bt_dev_err(hdev, "link tx timeout"); rcu_read_lock(); /* Kill stalled connections */ list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->sent) { bt_dev_err(hdev, "killing stalled connection %pMR", &c->dst); /* hci_disconnect might sleep, so, we have to release * the RCU read lock before calling it. */ rcu_read_unlock(); hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); rcu_read_lock(); } } rcu_read_unlock(); } static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; unsigned int num = 0, min = ~0, cur_prio = 0; struct hci_conn *conn; int conn_num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *tmp; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; conn_num++; list_for_each_entry_rcu(tmp, &conn->chan_list, list) { struct sk_buff *skb; if (skb_queue_empty(&tmp->data_q)) continue; skb = skb_peek(&tmp->data_q); if (skb->priority < cur_prio) continue; if (skb->priority > cur_prio) { num = 0; min = ~0; cur_prio = skb->priority; } num++; if (conn->sent < min) { min = conn->sent; chan = tmp; } } if (hci_conn_num(hdev, type) == conn_num) break; } rcu_read_unlock(); if (!chan) return NULL; hci_quote_sent(chan->conn, num, quote); BT_DBG("chan %p quote %d", chan, *quote); return chan; } static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn; int num = 0; BT_DBG("%s", hdev->name); rcu_read_lock(); list_for_each_entry_rcu(conn, &h->list, list) { struct hci_chan *chan; if (conn->type != type) continue; if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) continue; num++; list_for_each_entry_rcu(chan, &conn->chan_list, list) { struct sk_buff *skb; if (chan->sent) { chan->sent = 0; continue; } if (skb_queue_empty(&chan->data_q)) continue; skb = skb_peek(&chan->data_q); if (skb->priority >= HCI_PRIO_MAX - 1) continue; skb->priority = HCI_PRIO_MAX - 1; BT_DBG("chan %p skb %p promoted to %d", chan, skb, skb->priority); } if (hci_conn_num(hdev, type) == num) break; } rcu_read_unlock(); } static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) { /* Calculate count of blocks used by this packet */ return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) { unsigned long last_tx; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) return; switch (type) { case LE_LINK: last_tx = hdev->le_last_tx; break; default: last_tx = hdev->acl_last_tx; break; } /* tx timeout must be longer than maximum link supervision timeout * (40.9 seconds) */ if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) hci_link_tx_to(hdev, type); } /* Schedule SCO */ static void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, SCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ESCO_LINK)) return; while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; } } } static void hci_sched_acl_pkt(struct hci_dev *hdev) { unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; __check_timeout(hdev, cnt, ACL_LINK); while (hdev->acl_cnt && (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); hci_send_frame(hdev, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (cnt != hdev->acl_cnt) hci_prio_recalculate(hdev, ACL_LINK); } static void hci_sched_acl_blk(struct hci_dev *hdev) { unsigned int cnt = hdev->block_cnt; struct hci_chan *chan; struct sk_buff *skb; int quote; u8 type; BT_DBG("%s", hdev->name); if (hdev->dev_type == HCI_AMP) type = AMP_LINK; else type = ACL_LINK; __check_timeout(hdev, cnt, type); while (hdev->block_cnt > 0 && (chan = hci_chan_sent(hdev, type, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote > 0 && (skb = skb_peek(&chan->data_q))) { int blocks; BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); blocks = __get_blocks(hdev, skb); if (blocks > hdev->block_cnt) return; hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); hci_send_frame(hdev, skb); hdev->acl_last_tx = jiffies; hdev->block_cnt -= blocks; quote -= blocks; chan->sent += blocks; chan->conn->sent += blocks; } } if (cnt != hdev->block_cnt) hci_prio_recalculate(hdev, type); } static void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); /* No ACL link over BR/EDR controller */ if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) return; /* No AMP link over AMP controller */ if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) return; switch (hdev->flow_ctl_mode) { case HCI_FLOW_CTL_MODE_PACKET_BASED: hci_sched_acl_pkt(hdev); break; case HCI_FLOW_CTL_MODE_BLOCK_BASED: hci_sched_acl_blk(hdev); break; } } static void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; int quote, cnt, tmp; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, LE_LINK)) return; cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; __check_timeout(hdev, cnt, LE_LINK); tmp = cnt; while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) break; skb = skb_dequeue(&chan->data_q); hci_send_frame(hdev, skb); hdev->le_last_tx = jiffies; cnt--; chan->sent++; chan->conn->sent++; /* Send pending SCO packets right away */ hci_sched_sco(hdev); hci_sched_esco(hdev); } } if (hdev->le_pkts) hdev->le_cnt = cnt; else hdev->acl_cnt = cnt; if (cnt != tmp) hci_prio_recalculate(hdev, LE_LINK); } /* Schedule CIS */ static void hci_sched_iso(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; int quote, *cnt; BT_DBG("%s", hdev->name); if (!hci_conn_num(hdev, ISO_LINK)) return; cnt = hdev->iso_pkts ? &hdev->iso_cnt : hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(hdev, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; (*cnt)--; } } } static void hci_tx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); struct sk_buff *skb; BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ hci_sched_sco(hdev); hci_sched_esco(hdev); hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); } /* Send next queued raw (unknown type) packet */ while ((skb = skb_dequeue(&hdev->raw_q))) hci_send_frame(hdev, skb); } /* ----- HCI RX task (incoming data processing) ----- */ /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_ACL_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); /* Send to upper protocol */ l2cap_recv_acldata(conn, skb, flags); return; } else { bt_dev_err(hdev, "ACL packet for unknown connection handle %d", handle); } kfree_skb(skb); } /* SCO data packet */ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; __u16 handle, flags; skb_pull(skb, HCI_SCO_HDR_SIZE); handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, handle, flags); hdev->stat.sco_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (conn) { /* Send to upper protocol */ hci_skb_pkt_status(skb) = flags & 0x03; sco_recv_scodata(conn, skb); return; } else { bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", handle); } kfree_skb(skb); } static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_iso_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; hdr = skb_pull_data(skb, sizeof(*hdr)); if (!hdr) { bt_dev_err(hdev, "ISO packet too small"); goto drop; } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, handle, flags); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); hci_dev_unlock(hdev); if (!conn) { bt_dev_err(hdev, "ISO packet for unknown connection handle %d", handle); goto drop; } /* Send to upper protocol */ iso_recv(conn, skb, flags); return; drop: kfree_skb(skb); } static bool hci_req_is_complete(struct hci_dev *hdev) { struct sk_buff *skb; skb = skb_peek(&hdev->cmd_q); if (!skb) return true; return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); } static void hci_resend_last(struct hci_dev *hdev) { struct hci_command_hdr *sent; struct sk_buff *skb; u16 opcode; if (!hdev->sent_cmd) return; sent = (void *) hdev->sent_cmd->data; opcode = __le16_to_cpu(sent->opcode); if (opcode == HCI_OP_RESET) return; skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); if (!skb) return; skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, hci_req_complete_t *req_complete, hci_req_complete_skb_t *req_complete_skb) { struct sk_buff *skb; unsigned long flags; BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); /* If the completed command doesn't match the last one that was * sent we need to do special handling of it. */ if (!hci_sent_cmd_data(hdev, opcode)) { /* Some CSR based controllers generate a spontaneous * reset complete event during init and any pending * command will never be completed. In such a case we * need to resend whatever was the last sent * command. */ if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) hci_resend_last(hdev); return; } /* If we reach this point this event matches the last command sent */ hci_dev_clear_flag(hdev, HCI_CMD_PENDING); /* If the command succeeded and there's still more commands in * this request the request is not yet complete. */ if (!status && !hci_req_is_complete(hdev)) return; /* If this was the last command in a request the complete * callback would be found in hdev->sent_cmd instead of the * command queue (hdev->cmd_q). */ if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; return; } if (bt_cb(hdev->sent_cmd)->hci.req_complete) { *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; return; } /* Remove all pending commands belonging to this request */ spin_lock_irqsave(&hdev->cmd_q.lock, flags); while ((skb = __skb_dequeue(&hdev->cmd_q))) { if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { __skb_queue_head(&hdev->cmd_q, skb); break; } if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; else *req_complete = bt_cb(skb)->hci.req_complete; dev_kfree_skb_irq(skb); } spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); } static void hci_rx_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); struct sk_buff *skb; BT_DBG("%s", hdev->name); /* The kcov_remote functions used for collecting packet parsing * coverage information from this background thread and associate * the coverage with the syscall's thread which originally injected * the packet. This helps fuzzing the kernel. */ for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { kcov_remote_start_common(skb_get_kcov_handle(skb)); /* Send copy to monitor */ hci_send_to_monitor(hdev, skb); if (atomic_read(&hdev->promisc)) { /* Send copy to the sockets */ hci_send_to_sock(hdev, skb); } /* If the device has been opened in HCI_USER_CHANNEL, * the userspace has exclusive access to device. * When device is HCI_INIT, we still need to process * the data packets to the driver in order * to complete its setup(). */ if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && !test_bit(HCI_INIT, &hdev->flags)) { kfree_skb(skb); continue; } if (test_bit(HCI_INIT, &hdev->flags)) { /* Don't process data packets in this states. */ switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: kfree_skb(skb); continue; } } /* Process frame */ switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: BT_DBG("%s Event packet", hdev->name); hci_event_packet(hdev, skb); break; case HCI_ACLDATA_PKT: BT_DBG("%s ACL data packet", hdev->name); hci_acldata_packet(hdev, skb); break; case HCI_SCODATA_PKT: BT_DBG("%s SCO data packet", hdev->name); hci_scodata_packet(hdev, skb); break; case HCI_ISODATA_PKT: BT_DBG("%s ISO data packet", hdev->name); hci_isodata_packet(hdev, skb); break; default: kfree_skb(skb); break; } } } static void hci_cmd_work(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); struct sk_buff *skb; BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); /* Send queued commands */ if (atomic_read(&hdev->cmd_cnt)) { skb = skb_dequeue(&hdev->cmd_q); if (!skb) return; kfree_skb(hdev->sent_cmd); hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); if (hdev->sent_cmd) { int res; if (hci_req_status_pend(hdev)) hci_dev_set_flag(hdev, HCI_CMD_PENDING); atomic_dec(&hdev->cmd_cnt); res = hci_send_frame(hdev, skb); if (res < 0) __hci_cmd_sync_cancel(hdev, -res); rcu_read_lock(); if (test_bit(HCI_RESET, &hdev->flags) || hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) cancel_delayed_work(&hdev->cmd_timer); else queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, HCI_CMD_TIMEOUT); rcu_read_unlock(); } else { skb_queue_head(&hdev->cmd_q, skb); queue_work(hdev->workqueue, &hdev->cmd_work); } } } |
5 |