Total coverage: 234354 (14%)of 1770793
2 9 2 7 31 20 106 5 5 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth kernel library. */ #define pr_fmt(fmt) "Bluetooth: " fmt #include <linux/export.h> #include <net/bluetooth/bluetooth.h> /** * baswap() - Swaps the order of a bd address * @dst: Pointer to a bdaddr_t struct that will store the swapped * bd address. * @src: Pointer to the bdaddr_t struct to be swapped. * * This function reverses the byte order of a Bluetooth device * address. */ void baswap(bdaddr_t *dst, const bdaddr_t *src) { const unsigned char *s = (const unsigned char *)src; unsigned char *d = (unsigned char *)dst; unsigned int i; for (i = 0; i < 6; i++) d[i] = s[5 - i]; } EXPORT_SYMBOL(baswap); /** * bt_to_errno() - Bluetooth error codes to standard errno * @code: Bluetooth error code to be converted * * This function takes a Bluetooth error code as input and converts * it to an equivalent Unix/standard errno value. * * Return: * * If the bt error code is known, an equivalent Unix errno value * is returned. * If the given bt error code is not known, ENOSYS is returned. */ int bt_to_errno(__u16 code) { switch (code) { case 0: return 0; case 0x01: return EBADRQC; case 0x02: return ENOTCONN; case 0x03: return EIO; case 0x04: case 0x3c: return EHOSTDOWN; case 0x05: return EACCES; case 0x06: return EBADE; case 0x07: return ENOMEM; case 0x08: return ETIMEDOUT; case 0x09: return EMLINK; case 0x0a: return EMLINK; case 0x0b: return EALREADY; case 0x0c: return EBUSY; case 0x0d: case 0x0e: case 0x0f: return ECONNREFUSED; case 0x10: return ETIMEDOUT; case 0x11: case 0x27: case 0x29: case 0x20: return EOPNOTSUPP; case 0x12: return EINVAL; case 0x13: case 0x14: case 0x15: return ECONNRESET; case 0x16: return ECONNABORTED; case 0x17: return ELOOP; case 0x18: return EACCES; case 0x1a: return EPROTONOSUPPORT; case 0x1b: return ECONNREFUSED; case 0x19: case 0x1e: case 0x23: case 0x24: case 0x25: return EPROTO; default: return ENOSYS; } } EXPORT_SYMBOL(bt_to_errno); /** * bt_status() - Standard errno value to Bluetooth error code * @err: Unix/standard errno value to be converted * * This function converts a standard/Unix errno value to an * equivalent Bluetooth error code. * * Return: Bluetooth error code. * * If the given errno is not found, 0x1f is returned by default * which indicates an unspecified error. * For err >= 0, no conversion is performed, and the same value * is immediately returned. */ __u8 bt_status(int err) { if (err >= 0) return err; switch (err) { case -EBADRQC: return 0x01; case -ENOTCONN: return 0x02; case -EIO: return 0x03; case -EHOSTDOWN: return 0x04; case -EACCES: return 0x05; case -EBADE: return 0x06; case -ENOMEM: return 0x07; case -ETIMEDOUT: return 0x08; case -EMLINK: return 0x09; case -EALREADY: return 0x0b; case -EBUSY: return 0x0c; case -ECONNREFUSED: return 0x0d; case -EOPNOTSUPP: return 0x11; case -EINVAL: return 0x12; case -ECONNRESET: return 0x13; case -ECONNABORTED: return 0x16; case -ELOOP: return 0x17; case -EPROTONOSUPPORT: return 0x1a; case -EPROTO: return 0x19; default: return 0x1f; } } EXPORT_SYMBOL(bt_status); /** * bt_info() - Log Bluetooth information message * @format: Message's format string */ void bt_info(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_info("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_info); /** * bt_warn() - Log Bluetooth warning message * @format: Message's format string */ void bt_warn(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn); /** * bt_err() - Log Bluetooth error message * @format: Message's format string */ void bt_err(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err); #ifdef CONFIG_BT_FEATURE_DEBUG static bool debug_enable; void bt_dbg_set(bool enable) { debug_enable = enable; } bool bt_dbg_get(void) { return debug_enable; } /** * bt_dbg() - Log Bluetooth debugging message * @format: Message's format string */ void bt_dbg(const char *format, ...) { struct va_format vaf; va_list args; if (likely(!debug_enable)) return; va_start(args, format); vaf.fmt = format; vaf.va = &args; printk(KERN_DEBUG pr_fmt("%pV"), &vaf); va_end(args); } EXPORT_SYMBOL(bt_dbg); #endif /** * bt_warn_ratelimited() - Log rate-limited Bluetooth warning message * @format: Message's format string * * This functions works like bt_warn, but it uses rate limiting * to prevent the message from being logged too often. */ void bt_warn_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_warn_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_warn_ratelimited); /** * bt_err_ratelimited() - Log rate-limited Bluetooth error message * @format: Message's format string * * This functions works like bt_err, but it uses rate limiting * to prevent the message from being logged too often. */ void bt_err_ratelimited(const char *format, ...) { struct va_format vaf; va_list args; va_start(args, format); vaf.fmt = format; vaf.va = &args; pr_err_ratelimited("%pV", &vaf); va_end(args); } EXPORT_SYMBOL(bt_err_ratelimited);
1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver for marvell devices * * Copyright (C) 2016 Marvell International Ltd. * Copyright (C) 2016 Intel Corporation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/of.h> #include <linux/serdev.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #define HCI_FW_REQ_PKT 0xA5 #define HCI_CHIP_VER_PKT 0xAA #define MRVL_ACK 0x5A #define MRVL_NAK 0xBF #define MRVL_RAW_DATA 0x1F #define MRVL_SET_BAUDRATE 0xFC09 enum { STATE_CHIP_VER_PENDING, STATE_FW_REQ_PENDING, STATE_FW_LOADED, }; struct mrvl_data { struct sk_buff *rx_skb; struct sk_buff_head txq; struct sk_buff_head rawq; unsigned long flags; unsigned int tx_len; u8 id, rev; }; struct mrvl_serdev { struct hci_uart hu; }; struct hci_mrvl_pkt { __le16 lhs; __le16 rhs; } __packed; #define HCI_MRVL_PKT_SIZE 4 static int mrvl_open(struct hci_uart *hu) { struct mrvl_data *mrvl; int ret; BT_DBG("hu %p", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; skb_queue_head_init(&mrvl->txq); skb_queue_head_init(&mrvl->rawq); set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); hu->priv = mrvl; if (hu->serdev) { ret = serdev_device_open(hu->serdev); if (ret) goto err; } return 0; err: kfree(mrvl); return ret; } static int mrvl_close(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; BT_DBG("hu %p", hu); if (hu->serdev) serdev_device_close(hu->serdev); skb_queue_purge(&mrvl->txq); skb_queue_purge(&mrvl->rawq); kfree_skb(mrvl->rx_skb); kfree(mrvl); hu->priv = NULL; return 0; } static int mrvl_flush(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&mrvl->txq); skb_queue_purge(&mrvl->rawq); return 0; } static struct sk_buff *mrvl_dequeue(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; struct sk_buff *skb; skb = skb_dequeue(&mrvl->txq); if (!skb) { /* Any raw data ? */ skb = skb_dequeue(&mrvl->rawq); } else { /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); } return skb; } static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct mrvl_data *mrvl = hu->priv; skb_queue_tail(&mrvl->txq, skb); return 0; } static void mrvl_send_ack(struct hci_uart *hu, unsigned char type) { struct mrvl_data *mrvl = hu->priv; struct sk_buff *skb; /* No H4 payload, only 1 byte header */ skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) { bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet"); return; } hci_skb_pkt_type(skb) = type; skb_queue_tail(&mrvl->txq, skb); hci_uart_tx_wakeup(hu); } static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_mrvl_pkt *pkt = (void *)skb->data; struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; int ret = 0; if ((pkt->lhs ^ pkt->rhs) != 0xffff) { bt_dev_err(hdev, "Corrupted mrvl header"); mrvl_send_ack(hu, MRVL_NAK); ret = -EINVAL; goto done; } mrvl_send_ack(hu, MRVL_ACK); if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) { bt_dev_err(hdev, "Received unexpected firmware request"); ret = -EINVAL; goto done; } mrvl->tx_len = le16_to_cpu(pkt->lhs); clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags); smp_mb__after_atomic(); wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING); done: kfree_skb(skb); return ret; } static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_mrvl_pkt *pkt = (void *)skb->data; struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; u16 version = le16_to_cpu(pkt->lhs); int ret = 0; if ((pkt->lhs ^ pkt->rhs) != 0xffff) { bt_dev_err(hdev, "Corrupted mrvl header"); mrvl_send_ack(hu, MRVL_NAK); ret = -EINVAL; goto done; } mrvl_send_ack(hu, MRVL_ACK); if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) { bt_dev_err(hdev, "Received unexpected chip version"); goto done; } mrvl->id = version; mrvl->rev = version >> 8; bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev); clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); smp_mb__after_atomic(); wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING); done: kfree_skb(skb); return ret; } #define HCI_RECV_CHIP_VER \ .type = HCI_CHIP_VER_PKT, \ .hlen = HCI_MRVL_PKT_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MRVL_PKT_SIZE #define HCI_RECV_FW_REQ \ .type = HCI_FW_REQ_PKT, \ .hlen = HCI_MRVL_PKT_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MRVL_PKT_SIZE static const struct h4_recv_pkt mrvl_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req }, { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver }, }; static int mrvl_recv(struct hci_uart *hu, const void *data, int count) { struct mrvl_data *mrvl = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; /* We might receive some noise when there is no firmware loaded. Therefore, * we drop data if the firmware is not loaded yet and if there is no fw load * request pending. */ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags) && !test_bit(STATE_FW_LOADED, &mrvl->flags)) return count; mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count, mrvl_recv_pkts, ARRAY_SIZE(mrvl_recv_pkts)); if (IS_ERR(mrvl->rx_skb)) { int err = PTR_ERR(mrvl->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); mrvl->rx_skb = NULL; return err; } return count; } static int mrvl_load_firmware(struct hci_dev *hdev, const char *name) { struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; const struct firmware *fw = NULL; const u8 *fw_ptr, *fw_max; int err; err = request_firmware(&fw, name, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load firmware file %s", name); return err; } fw_ptr = fw->data; fw_max = fw->data + fw->size; bt_dev_info(hdev, "Loading %s", name); set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); while (fw_ptr <= fw_max) { struct sk_buff *skb; /* Controller drives the firmware load by sending firmware * request packets containing the expected fragment size. */ err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING, TASK_INTERRUPTIBLE, msecs_to_jiffies(2000)); if (err == 1) { bt_dev_err(hdev, "Firmware load interrupted"); err = -EINTR; break; } else if (err) { bt_dev_err(hdev, "Firmware request timeout"); err = -ETIMEDOUT; break; } bt_dev_dbg(hdev, "Firmware request, expecting %d bytes", mrvl->tx_len); if (fw_ptr == fw_max) { /* Controller requests a null size once firmware is * fully loaded. If controller expects more data, there * is an issue. */ if (!mrvl->tx_len) { bt_dev_info(hdev, "Firmware loading complete"); } else { bt_dev_err(hdev, "Firmware loading failure"); err = -EINVAL; } break; } if (fw_ptr + mrvl->tx_len > fw_max) { mrvl->tx_len = fw_max - fw_ptr; bt_dev_dbg(hdev, "Adjusting tx_len to %d", mrvl->tx_len); } skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to alloc mem for FW packet"); err = -ENOMEM; break; } bt_cb(skb)->pkt_type = MRVL_RAW_DATA; skb_put_data(skb, fw_ptr, mrvl->tx_len); fw_ptr += mrvl->tx_len; set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); skb_queue_tail(&mrvl->rawq, skb); hci_uart_tx_wakeup(hu); } release_firmware(fw); return err; } static int mrvl_setup(struct hci_uart *hu) { int err; struct mrvl_data *mrvl = hu->priv; hci_uart_set_flow_control(hu, true); err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin"); if (err) { bt_dev_err(hu->hdev, "Unable to download firmware helper"); return -EINVAL; } /* Let the final ack go out before switching the baudrate */ hci_uart_wait_until_sent(hu); if (hu->serdev) serdev_device_set_baudrate(hu->serdev, hu->oper_speed); else hci_uart_set_baudrate(hu, hu->oper_speed); hci_uart_set_flow_control(hu, false); err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin"); if (err) return err; set_bit(STATE_FW_LOADED, &mrvl->flags); return 0; } static int mrvl_set_baudrate(struct hci_uart *hu, unsigned int speed) { int err; struct mrvl_data *mrvl = hu->priv; __le32 speed_le = cpu_to_le32(speed); /* The firmware might be loaded by the Wifi driver over SDIO. We wait * up to 10s for the CTS to go up. Afterward, we know that the firmware * is ready. */ err = serdev_device_wait_for_cts(hu->serdev, true, 10000); if (err) { bt_dev_err(hu->hdev, "Wait for CTS failed with %d\n", err); return err; } set_bit(STATE_FW_LOADED, &mrvl->flags); err = __hci_cmd_sync_status(hu->hdev, MRVL_SET_BAUDRATE, sizeof(speed_le), &speed_le, HCI_INIT_TIMEOUT); if (err) { bt_dev_err(hu->hdev, "send command failed: %d", err); return err; } serdev_device_set_baudrate(hu->serdev, speed); /* We forcefully have to send a command to the bluetooth module so that * the driver detects it after a baudrate change. This is foreseen by * hci_serdev by setting HCI_UART_VND_DETECT which then causes a dummy * local version read. */ set_bit(HCI_UART_VND_DETECT, &hu->hdev_flags); return 0; } static const struct hci_uart_proto mrvl_proto_8897 = { .id = HCI_UART_MRVL, .name = "Marvell", .init_speed = 115200, .oper_speed = 3000000, .open = mrvl_open, .close = mrvl_close, .flush = mrvl_flush, .setup = mrvl_setup, .recv = mrvl_recv, .enqueue = mrvl_enqueue, .dequeue = mrvl_dequeue, }; static const struct hci_uart_proto mrvl_proto_8997 = { .id = HCI_UART_MRVL, .name = "Marvell 8997", .init_speed = 115200, .oper_speed = 3000000, .open = mrvl_open, .close = mrvl_close, .flush = mrvl_flush, .set_baudrate = mrvl_set_baudrate, .recv = mrvl_recv, .enqueue = mrvl_enqueue, .dequeue = mrvl_dequeue, }; static int mrvl_serdev_probe(struct serdev_device *serdev) { struct mrvl_serdev *mrvldev; const struct hci_uart_proto *mrvl_proto = device_get_match_data(&serdev->dev); mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL); if (!mrvldev) return -ENOMEM; mrvldev->hu.oper_speed = mrvl_proto->oper_speed; if (mrvl_proto->set_baudrate) of_property_read_u32(serdev->dev.of_node, "max-speed", &mrvldev->hu.oper_speed); mrvldev->hu.serdev = serdev; serdev_device_set_drvdata(serdev, mrvldev); return hci_uart_register_device(&mrvldev->hu, mrvl_proto); } static void mrvl_serdev_remove(struct serdev_device *serdev) { struct mrvl_serdev *mrvldev = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&mrvldev->hu); } static const struct of_device_id __maybe_unused mrvl_bluetooth_of_match[] = { { .compatible = "mrvl,88w8897", .data = &mrvl_proto_8897}, { .compatible = "mrvl,88w8997", .data = &mrvl_proto_8997}, { }, }; MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match); static struct serdev_device_driver mrvl_serdev_driver = { .probe = mrvl_serdev_probe, .remove = mrvl_serdev_remove, .driver = { .name = "hci_uart_mrvl", .of_match_table = of_match_ptr(mrvl_bluetooth_of_match), }, }; int __init mrvl_init(void) { serdev_device_driver_register(&mrvl_serdev_driver); return hci_uart_register_proto(&mrvl_proto_8897); } int __exit mrvl_deinit(void) { serdev_device_driver_unregister(&mrvl_serdev_driver); return hci_uart_unregister_proto(&mrvl_proto_8897); }
2 1 1 1 1 1 1 1 1 1 25 3 1 28 19 9 28 4 4 61 61 61 61 61 12 12 5 2 2 3 5 5 5 3 3 9 6 2 2 4 4 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 // SPDX-License-Identifier: GPL-2.0-only /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ #include <net/tcp.h> #include <net/tcp_ecn.h> #include <net/xfrm.h> #include <net/busy_poll.h> #include <net/rstreason.h> #include <net/psp.h> static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { if (seq == s_win) return true; if (after(end_seq, s_win) && before(seq, e_win)) return true; return seq == e_win && seq == end_seq; } static enum tcp_tw_status tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, const struct sk_buff *skb, int mib_idx) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, &tcptw->tw_last_oow_ack_time)) { /* Send ACK. Note, we do not put the bucket, * it will be released by caller. */ return TCP_TW_ACK_OOW; } /* We are rate-limiting, so just release the tw sock and drop skb. */ inet_twsk_put(tw); return TCP_TW_SUCCESS; } static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq, u32 rcv_nxt) { #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao; ao = rcu_dereference(tcptw->ao_info); if (unlikely(ao && seq < rcv_nxt)) WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1); #endif WRITE_ONCE(tcptw->tw_rcv_nxt, seq); } /* * * Main purpose of TIME-WAIT state is to close connection gracefully, * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN * (and, probably, tail of data) and one or more our ACKs are lost. * * What is TIME-WAIT timeout? It is associated with maximal packet * lifetime in the internet, which results in wrong conclusion, that * it is set to catch "old duplicate segments" wandering out of their path. * It is not quite correct. This timeout is calculated so that it exceeds * maximal retransmission timeout enough to allow to lose one (or more) * segments sent by peer and our ACKs. This time may be calculated from RTO. * * When TIME-WAIT socket receives RST, it means that another end * finally closed and we are allowed to kill TIME-WAIT too. * * Second purpose of TIME-WAIT is catching old duplicate segments. * Well, certainly it is pure paranoia, but if we load TIME-WAIT * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. * * If we invented some more clever way to catch duplicates * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. * * The algorithm below is based on FORMAL INTERPRETATION of RFCs. * When you compare it to RFCs, please, read section SEGMENT ARRIVES * from the very beginning. * * NOTE. With recycling (and later with fin-wait-2) TW bucket * is _not_ stateless. It means, that strictly speaking we must * spinlock it. I do not want! Well, probability of misbehaviour * is ridiculously low and, seems, we could use some mb() tricks * to avoid misread sequence numbers, states etc. --ANK * * We don't need to initialize tmp_out.sack_ok as we don't use the results */ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, const struct tcphdr *th, u32 *tw_isn, enum skb_drop_reason *drop_reason) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); u32 rcv_nxt = READ_ONCE(tcptw->tw_rcv_nxt); struct tcp_options_received tmp_opt; enum skb_drop_reason psp_drop; bool paws_reject = false; int ts_recent_stamp; /* Instead of dropping immediately, wait to see what value is * returned. We will accept a non psp-encapsulated syn in the * case where TCP_TW_SYN is returned. */ psp_drop = psp_twsk_rx_policy_check(tw, skb); tmp_opt.saw_tstamp = 0; ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) { tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { if (tmp_opt.rcv_tsecr) tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); tmp_opt.ts_recent_stamp = ts_recent_stamp; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } if (READ_ONCE(tw->tw_substate) == TCP_FIN_WAIT2) { /* Just repeat all the checks of tcp_rcv_state_process() */ if (psp_drop) goto out_put; /* Out of window, send ACK */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, rcv_nxt, rcv_nxt + tcptw->tw_rcv_wnd)) return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); if (th->rst) goto kill; if (th->syn && !before(TCP_SKB_CB(skb)->seq, rcv_nxt)) return TCP_TW_RST; /* Dup ACK? */ if (!th->ack || !after(TCP_SKB_CB(skb)->end_seq, rcv_nxt) || TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { inet_twsk_put(tw); return TCP_TW_SUCCESS; } /* New data or FIN. If new data arrive after half-duplex close, * reset. */ if (!th->fin || TCP_SKB_CB(skb)->end_seq != rcv_nxt + 1) return TCP_TW_RST; /* FIN arrived, enter true time-wait state. */ WRITE_ONCE(tw->tw_substate, TCP_TIME_WAIT); twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq, rcv_nxt); if (tmp_opt.saw_tstamp) { u64 ts = tcp_clock_ms(); WRITE_ONCE(tw->tw_entry_stamp, ts); WRITE_ONCE(tcptw->tw_ts_recent_stamp, div_u64(ts, MSEC_PER_SEC)); WRITE_ONCE(tcptw->tw_ts_recent, tmp_opt.rcv_tsval); } inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return TCP_TW_ACK; } /* * Now real TIME-WAIT state. * * RFC 1122: * "When a connection is [...] on TIME-WAIT state [...] * [a TCP] MAY accept a new SYN from the remote TCP to * reopen the connection directly, if it: * * (1) assigns its initial sequence number for the new * connection to be larger than the largest sequence * number it used on the previous connection incarnation, * and * * (2) returns to TIME-WAIT state if the SYN turns out * to be an old duplicate". */ if (!paws_reject && (TCP_SKB_CB(skb)->seq == rcv_nxt && (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { /* In window segment, it may be only reset or bare ack. */ if (psp_drop) goto out_put; if (th->rst) { /* This is TIME_WAIT assassination, in two flavors. * Oh well... nobody has a sufficient solution to this * protocol bug yet. */ if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; } } else { inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); } if (tmp_opt.saw_tstamp) { WRITE_ONCE(tcptw->tw_ts_recent, tmp_opt.rcv_tsval); WRITE_ONCE(tcptw->tw_ts_recent_stamp, ktime_get_seconds()); } inet_twsk_put(tw); return TCP_TW_SUCCESS; } /* Out of window segment. All the segments are ACKed immediately. The only exception is new SYN. We accept it, if it is not old duplicate and we are not in danger to be killed by delayed old duplicates. RFC check is that it has newer sequence number works at rates <40Mbit/sec. However, if paws works, it is reliable AND even more, we even may relax silly seq space cutoff. RED-PEN: we violate main RFC requirement, if this SYN will appear old duplicate (i.e. we receive RST in reply to SYN-ACK), we must return socket to time-wait state. It is not good, but not fatal yet. */ if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, rcv_nxt) || (tmp_opt.saw_tstamp && (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) { u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; *tw_isn = isn; return TCP_TW_SYN; } if (psp_drop) goto out_put; if (paws_reject) { *drop_reason = SKB_DROP_REASON_TCP_RFC7323_TW_PAWS; __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWS_TW_REJECTED); } if (!th->rst) { /* In this case we must reset the TIMEWAIT timer. * * If it is ACKless SYN it may be both old duplicate * and new good SYN with random sequence number <rcv_nxt. * Do not reschedule in the last case. */ if (paws_reject || th->ack) inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); return tcp_timewait_check_oow_rate_limit( tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); } out_put: inet_twsk_put(tw); return TCP_TW_SUCCESS; } EXPORT_IPV6_MOD(tcp_timewait_state_process); static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) { #ifdef CONFIG_TCP_MD5SIG const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; /* * The timewait bucket does not have the key DB from the * sock structure. We just make a quick copy of the * md5 key being used (if indeed we are using one) * so the timewait ack generating code has the key. */ tcptw->tw_md5_key = NULL; if (!static_branch_unlikely(&tcp_md5_needed.key)) return; key = tp->af_specific->md5_lookup(sk, sk); if (key) { tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); if (!tcptw->tw_md5_key) return; if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key)) goto out_free; } return; out_free: WARN_ON_ONCE(1); kfree(tcptw->tw_md5_key); tcptw->tw_md5_key = NULL; #endif } /* * Move a socket to time-wait or dead fin-wait-2 state. */ void tcp_time_wait(struct sock *sk, int state, int timeo) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct inet_timewait_sock *tw; tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state); if (tw) { struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); tw->tw_mark = sk->sk_mark; tw->tw_priority = READ_ONCE(sk->sk_priority); tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; /* refreshed when we enter true TIME-WAIT state */ tw->tw_entry_stamp = tcp_time_stamp_ms(tp); tcptw->tw_rcv_nxt = tp->rcv_nxt; tcptw->tw_snd_nxt = tp->snd_nxt; tcptw->tw_rcv_wnd = tcp_receive_window(tp); tcptw->tw_ts_recent = tp->rx_opt.ts_recent; tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_offset = tp->tsoffset; tw->tw_usec_ts = tp->tcp_usec_ts; tcptw->tw_last_oow_ack_time = 0; tcptw->tw_tx_delay = tp->tcp_tx_delay; tw->tw_txhash = sk->sk_txhash; tw->tw_tx_queue_mapping = sk->sk_tx_queue_mapping; #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING tw->tw_rx_queue_mapping = sk->sk_rx_queue_mapping; #endif #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); tw->tw_v6_daddr = sk->sk_v6_daddr; tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; tw->tw_tclass = np->tclass; tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); tw->tw_ipv6only = sk->sk_ipv6only; } #endif tcp_time_wait_init(sk, tcptw); tcp_ao_time_wait(tcptw, tp); /* Get the TIME_WAIT timeout firing. */ if (timeo < rto) timeo = rto; if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; /* Linkage updates. * Note that access to tw after this point is illegal. */ inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than * non-graceful socket closings. */ NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW); } tcp_update_metrics(sk); tcp_done(sk); } EXPORT_SYMBOL(tcp_time_wait); void tcp_twsk_destructor(struct sock *sk) { #ifdef CONFIG_TCP_MD5SIG if (static_branch_unlikely(&tcp_md5_needed.key)) { struct tcp_timewait_sock *twsk = tcp_twsk(sk); if (twsk->tw_md5_key) { kfree(twsk->tw_md5_key); static_branch_slow_dec_deferred(&tcp_md5_needed); } } #endif tcp_ao_destroy_sock(sk, true); psp_twsk_assoc_free(inet_twsk(sk)); } void tcp_twsk_purge(struct list_head *net_exit_list) { bool purged_once = false; struct net *net; list_for_each_entry(net, net_exit_list, exit_list) { if (net->ipv4.tcp_death_row.hashinfo->pernet) { /* Even if tw_refcount == 1, we must clean up kernel reqsk */ inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo); } else if (!purged_once) { inet_twsk_purge(&tcp_hashinfo); purged_once = true; } } } /* Warning : This function is called without sk_listener being locked. * Be sure to read socket fields once, as their value could change under us. */ void tcp_openreq_init_rwin(struct request_sock *req, const struct sock *sk_listener, const struct dst_entry *dst) { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk_listener); int full_space = tcp_full_space(sk_listener); u32 window_clamp; __u8 rcv_wscale; u32 rcv_wnd; int mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); /* limit the window selection if the user enforce a smaller rx buffer */ if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); else if (full_space < rcv_wnd * mss) full_space = rcv_wnd * mss; /* tcp_full_space because it is guaranteed to be the first packet */ tcp_select_initial_window(sk_listener, full_space, mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, &rcv_wscale, rcv_wnd); ireq->rcv_wscale = rcv_wscale; } static void tcp_ecn_openreq_child(struct sock *sk, const struct request_sock *req, const struct sk_buff *skb) { const struct tcp_request_sock *treq = tcp_rsk(req); struct tcp_sock *tp = tcp_sk(sk); if (treq->accecn_ok) { tcp_ecn_mode_set(tp, TCP_ECN_MODE_ACCECN); tp->syn_ect_snt = treq->syn_ect_snt; tcp_accecn_third_ack(sk, skb, treq->syn_ect_snt); tp->saw_accecn_opt = treq->saw_accecn_opt; tp->prev_ecnfield = treq->syn_ect_rcv; tp->accecn_opt_demand = 1; tcp_ecn_received_counters_payload(sk, skb); } else { tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ? TCP_ECN_MODE_RFC3168 : TCP_ECN_DISABLED); } } void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); bool ca_got_dst = false; if (ca_key != TCP_CA_UNSPEC) { const struct tcp_congestion_ops *ca; rcu_read_lock(); ca = tcp_ca_find_key(ca_key); if (likely(ca && bpf_try_module_get(ca, ca->owner))) { icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); icsk->icsk_ca_ops = ca; ca_got_dst = true; } rcu_read_unlock(); } /* If no valid choice made yet, assign current system default ca. */ if (!ca_got_dst && (!icsk->icsk_ca_setsockopt || !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) tcp_assign_congestion_control(sk); tcp_set_ca_state(sk, TCP_CA_Open); } EXPORT_IPV6_MOD_GPL(tcp_ca_openreq_child); static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, struct request_sock *req, struct tcp_sock *newtp) { #if IS_ENABLED(CONFIG_SMC) struct inet_request_sock *ireq; if (static_branch_unlikely(&tcp_have_smc)) { ireq = inet_rsk(req); if (oldtp->syn_smc && !ireq->smc_ok) newtp->syn_smc = 0; } #endif } /* This is not only more efficient than what we used to do, it eliminates * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * * Actually, we could lots of memory writes here. tp of listening * socket contains all necessary default parameters. */ struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, struct sk_buff *skb) { struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); const struct inet_request_sock *ireq = inet_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req); struct inet_connection_sock *newicsk; const struct tcp_sock *oldtp; struct tcp_sock *newtp; u32 seq; if (!newsk) return NULL; newicsk = inet_csk(newsk); newtp = tcp_sk(newsk); oldtp = tcp_sk(sk); smc_check_reset_syn_req(oldtp, req, newtp); /* Now setup tcp_sock */ newtp->pred_flags = 0; seq = treq->rcv_isn + 1; newtp->rcv_wup = seq; WRITE_ONCE(newtp->copied_seq, seq); WRITE_ONCE(newtp->rcv_nxt, seq); newtp->segs_in = 1; seq = treq->snt_isn + 1; newtp->snd_sml = newtp->snd_una = seq; WRITE_ONCE(newtp->snd_nxt, seq); newtp->snd_up = seq; INIT_LIST_HEAD(&newtp->tsq_node); INIT_LIST_HEAD(&newtp->tsorted_sent_queue); tcp_init_wl(newtp, treq->rcv_isn); minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); newicsk->icsk_ack.lrcvtime = tcp_jiffies32; newtp->lsndtime = tcp_jiffies32; newsk->sk_txhash = READ_ONCE(treq->txhash); newtp->total_retrans = req->num_retrans; tcp_init_xmit_timers(newsk); WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); if (sock_flag(newsk, SOCK_KEEPOPEN)) tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; newtp->rx_opt.sack_ok = ireq->sack_ok; newtp->window_clamp = req->rsk_window_clamp; newtp->rcv_ssthresh = req->rsk_rcv_wnd; newtp->rcv_wnd = req->rsk_rcv_wnd; newtp->rx_opt.wscale_ok = ireq->wscale_ok; if (newtp->rx_opt.wscale_ok) { newtp->rx_opt.snd_wscale = ireq->snd_wscale; newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; } else { newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp, 65535U); } newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale; newtp->max_window = newtp->snd_wnd; if (newtp->rx_opt.tstamp_ok) { newtp->tcp_usec_ts = treq->req_usec_ts; newtp->rx_opt.ts_recent = req->ts_recent; newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { newtp->tcp_usec_ts = 0; newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } if (req->num_timeout) { newtp->total_rto = req->num_timeout; newtp->undo_marker = treq->snt_isn; if (newtp->tcp_usec_ts) { newtp->retrans_stamp = treq->snt_synack; newtp->total_rto_time = (u32)(tcp_clock_us() - newtp->retrans_stamp) / USEC_PER_MSEC; } else { newtp->retrans_stamp = div_u64(treq->snt_synack, USEC_PER_SEC / TCP_TS_HZ); newtp->total_rto_time = tcp_clock_ms() - newtp->retrans_stamp; } newtp->total_rto_recoveries = 1; } newtp->tsoffset = treq->ts_off; #ifdef CONFIG_TCP_MD5SIG newtp->md5sig_info = NULL; /*XXX*/ #endif #ifdef CONFIG_TCP_AO newtp->ao_info = NULL; if (tcp_rsk_used_ao(req)) { struct tcp_ao_key *ao_key; ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1); if (ao_key) newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); } #endif if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; newtp->rx_opt.mss_clamp = req->mss; tcp_ecn_openreq_child(newsk, req, skb); newtp->fastopen_req = NULL; RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); newtp->bpf_chg_cc_inprogress = 0; tcp_bpf_clone(sk, newsk); __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); xa_init_flags(&newsk->sk_user_frags, XA_FLAGS_ALLOC1); return newsk; } EXPORT_SYMBOL(tcp_create_openreq_child); /* * Process an incoming packet for SYN_RECV sockets represented as a * request_sock. Normally sk is the listener socket but for TFO it * points to the child socket. * * XXX (TFO) - The current impl contains a special check for ack * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? * * We don't need to initialize tmp_opt.sack_ok as we don't use the results * * Note: If @fastopen is true, this can be called from process context. * Otherwise, this is from BH context. */ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, bool fastopen, bool *req_stolen, enum skb_drop_reason *drop_reason) { struct tcp_options_received tmp_opt; struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); bool tsecr_reject = false; bool paws_reject = false; bool own_req; tmp_opt.saw_tstamp = 0; tmp_opt.accecn = 0; if (th->doff > (sizeof(struct tcphdr)>>2)) { tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; if (tmp_opt.rcv_tsecr) { if (inet_rsk(req)->tstamp_ok && !fastopen) tsecr_reject = !between(tmp_opt.rcv_tsecr, tcp_rsk(req)->snt_tsval_first, READ_ONCE(tcp_rsk(req)->snt_tsval_last)); tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; } /* We do not store true stamp, but it is not required, * it can be estimated (approximately) * from another data. */ tmp_opt.ts_recent_stamp = ktime_get_seconds() - tcp_reqsk_timeout(req) / HZ; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } /* Check for pure retransmitted SYN. */ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && flg == TCP_FLAG_SYN && !paws_reject) { /* * RFC793 draws (Incorrectly! It was fixed in RFC1122) * this case on figure 6 and figure 8, but formal * protocol description says NOTHING. * To be more exact, it says that we should send ACK, * because this segment (at least, if it has no data) * is out of window. * * CONCLUSION: RFC793 (even with RFC1122) DOES NOT * describe SYN-RECV state. All the description * is wrong, we cannot believe to it and should * rely only on common sense and implementation * experience. * * Enforce "SYN-ACK" according to figure 8, figure 6 * of RFC793, fixed by RFC1122. * * Note that even if there is new data in the SYN packet * they will be thrown away too. * * Reset timer after retransmitting SYNACK, similar to * the idea of fast retransmit in recovery. */ if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time) && !tcp_rtx_synack(sk, req)) { unsigned long expires = jiffies; expires += tcp_reqsk_timeout(req); if (!fastopen) mod_timer_pending(&req->rsk_timer, expires); else req->rsk_timer.expires = expires; } return NULL; } /* Further reproduces section "SEGMENT ARRIVES" for state SYN-RECEIVED of RFC793. It is broken, however, it does not work only when SYNs are crossed. You would think that SYN crossing is impossible here, since we should have a SYN_SENT socket (from connect()) on our end, but this is not true if the crossed SYNs were sent to both ends by a malicious third party. We must defend against this, and to do that we first verify the ACK (as per RFC793, page 36) and reset if it is invalid. Is this a true full defense? To convince ourselves, let us consider a way in which the ACK test can still pass in this 'malicious crossed SYNs' case. Malicious sender sends identical SYNs (and thus identical sequence numbers) to both A and B: A: gets SYN, seq=7 B: gets SYN, seq=7 By our good fortune, both A and B select the same initial send sequence number of seven :-) A: sends SYN|ACK, seq=7, ack_seq=8 B: sends SYN|ACK, seq=7, ack_seq=8 So we are now A eating this SYN|ACK, ACK test passes. So does sequence test, SYN is truncated, and thus we consider it a bare ACK. If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this bare ACK. Otherwise, we create an established connection. Both ends (listening sockets) accept the new incoming connection and try to talk to each other. 8-) Note: This case is both harmless, and rare. Possibility is about the same as us discovering intelligent life on another plant tomorrow. But generally, we should (RFC lies!) to accept ACK from SYNACK both here and in tcp_rcv_state_process(). tcp_rcv_state_process() does not, hence, we do not too. Note that the case is absolutely generic: we cannot optimize anything here without violating protocol. All the checks must be made before attempt to create socket. */ /* RFC793 page 36: "If the connection is in any non-synchronized state ... * and the incoming segment acknowledges something not yet * sent (the segment carries an unacceptable ACK) ... * a reset is sent." * * Invalid ACK: reset will be sent by listening socket. * Note that the ACK validity check for a Fast Open socket is done * elsewhere and is checked directly against the child socket rather * than req because user data may have been sent out. */ if ((flg & TCP_FLAG_ACK) && !fastopen && (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) return sk; /* RFC793: "first check sequence number". */ if (paws_reject || tsecr_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + tcp_synack_window(req))) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST) && !tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) { SKB_DR_SET(*drop_reason, TCP_RFC7323_PAWS); NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); } else if (tsecr_reject) { SKB_DR_SET(*drop_reason, TCP_RFC7323_TSECR); NET_INC_STATS(sock_net(sk), LINUX_MIB_TSECRREJECTED); } else { SKB_DR_SET(*drop_reason, TCP_OVERWINDOW); } return NULL; } /* In sequence, PAWS is OK. */ if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting at tcp_rsk(req)->rcv_isn + 1. */ flg &= ~TCP_FLAG_SYN; } /* RFC793: "second check the RST bit" and * "fourth, check the SYN bit" */ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; } /* ACK sequence verified above, just make sure ACK is * set. If ACK not set, just silently drop the packet. * * XXX (TFO) - if we ever allow "data after SYN", the * following check needs to be removed. */ if (!(flg & TCP_FLAG_ACK)) return NULL; if (tcp_rsk(req)->accecn_ok && tmp_opt.accecn && tcp_rsk(req)->saw_accecn_opt < TCP_ACCECN_OPT_COUNTER_SEEN) { u8 saw_opt = tcp_accecn_option_init(skb, tmp_opt.accecn); tcp_rsk(req)->saw_accecn_opt = saw_opt; if (tcp_rsk(req)->saw_accecn_opt == TCP_ACCECN_OPT_FAIL_SEEN) { u8 fail_mode = TCP_ACCECN_OPT_FAIL_RECV; tcp_rsk(req)->accecn_fail_mode |= fail_mode; } } /* For Fast Open no more processing is needed (sk is the * child socket). */ if (fastopen) return sk; /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { inet_rsk(req)->acked = 1; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); return NULL; } /* OK, ACK is valid, create big socket and * feed this segment to it. It will repeat all * the tests. THIS SEGMENT MUST MOVE SOCKET TO * ESTABLISHED STATE. If it will be dropped after * socket is created, wait for troubles. */ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, req, &own_req); if (!child) goto listen_overflow; if (own_req && tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval; if (own_req && rsk_drop_req(req)) { reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); return child; } sock_rps_save_rxhash(child, skb); tcp_synack_rtt_meas(child, req); *req_stolen = !own_req; return inet_csk_complete_hashdance(sk, child, req, own_req); listen_overflow: SKB_DR_SET(*drop_reason, TCP_LISTEN_OVERFLOW); if (sk != req->rsk_listener) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { inet_rsk(req)->acked = 1; return NULL; } embryonic_reset: if (!(flg & TCP_FLAG_RST)) { /* Received a bad SYN pkt - for TFO We try not to reset * the local connection unless it's really necessary to * avoid becoming vulnerable to outside attack aiming at * resetting legit local connections. */ req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN); } else if (fastopen) { /* received a valid RST pkt */ reqsk_fastopen_remove(sk, req, true); tcp_reset(sk, skb); } if (!fastopen) { bool unlinked = inet_csk_reqsk_queue_drop(sk, req); if (unlinked) __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); *req_stolen = !unlinked; } return NULL; } EXPORT_IPV6_MOD(tcp_check_req); /* * Queue segment on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket. * * For the vast majority of cases child->sk_state will be TCP_SYN_RECV * when entering. But other states are possible due to a race condition * where after __inet_lookup_established() fails but before the listener * locked is obtained, other packets cause the same connection to * be created. */ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) __releases(&((child)->sk_lock.slock)) { enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; int state = child->sk_state; /* record sk_napi_id and sk_rx_queue_mapping of child. */ sk_mark_napi_id_set(child, skb); tcp_segs_in(tcp_sk(child), skb); if (!sock_owned_by_user(child)) { reason = tcp_rcv_state_process(child, skb); /* Wakeup parent, send SIGIO */ if (state == TCP_SYN_RECV && child->sk_state != state) parent->sk_data_ready(parent); } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening * socket does not protect us more. */ __sk_add_backlog(child, skb); } bh_unlock_sock(child); sock_put(child); return reason; } EXPORT_IPV6_MOD(tcp_child_process);
5 5 3 3 2 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ufs/util.c * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@email.cz> * Charles University, Faculty of Mathematics and Physics */ #include <linux/string.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include "ufs_fs.h" #include "ufs.h" #include "swab.h" #include "util.h" struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size) { struct ufs_buffer_head * ubh; unsigned i, j ; u64 count = 0; if (size & ~uspi->s_fmask) return NULL; count = size >> uspi->s_fshift; if (count > UFS_MAXFRAG) return NULL; ubh = kmalloc (sizeof (struct ufs_buffer_head), GFP_NOFS); if (!ubh) return NULL; ubh->fragment = fragment; ubh->count = count; for (i = 0; i < count; i++) if (!(ubh->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++) ubh->bh[i] = NULL; return ubh; failed: for (j = 0; j < i; j++) brelse (ubh->bh[j]); kfree(ubh); return NULL; } struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi, struct super_block *sb, u64 fragment, u64 size) { unsigned i, j; u64 count = 0; if (size & ~uspi->s_fmask) return NULL; count = size >> uspi->s_fshift; if (count <= 0 || count > UFS_MAXFRAG) return NULL; USPI_UBH(uspi)->fragment = fragment; USPI_UBH(uspi)->count = count; for (i = 0; i < count; i++) if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i))) goto failed; for (; i < UFS_MAXFRAG; i++) USPI_UBH(uspi)->bh[i] = NULL; return USPI_UBH(uspi); failed: for (j = 0; j < i; j++) brelse (USPI_UBH(uspi)->bh[j]); return NULL; } void ubh_brelse (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for (i = 0; i < ubh->count; i++) brelse (ubh->bh[i]); kfree (ubh); } void ubh_brelse_uspi (struct ufs_sb_private_info * uspi) { unsigned i; if (!USPI_UBH(uspi)) return; for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) { brelse (USPI_UBH(uspi)->bh[i]); USPI_UBH(uspi)->bh[i] = NULL; } } void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ ) mark_buffer_dirty (ubh->bh[i]); } void ubh_sync_block(struct ufs_buffer_head *ubh) { if (ubh) { unsigned i; for (i = 0; i < ubh->count; i++) write_dirty_buffer(ubh->bh[i], 0); for (i = 0; i < ubh->count; i++) wait_on_buffer(ubh->bh[i]); } } void ubh_bforget (struct ufs_buffer_head * ubh) { unsigned i; if (!ubh) return; for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] ) bforget (ubh->bh[i]); } int ubh_buffer_dirty (struct ufs_buffer_head * ubh) { unsigned i; unsigned result = 0; if (!ubh) return 0; for ( i = 0; i < ubh->count; i++ ) result |= buffer_dirty(ubh->bh[i]); return result; } dev_t ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi) { __u32 fs32; dev_t dev; if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]); else fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]); switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUNx86: case UFS_ST_SUN: if ((fs32 & 0xffff0000) == 0 || (fs32 & 0xffff0000) == 0xffff0000) dev = old_decode_dev(fs32 & 0x7fff); else dev = MKDEV(sysv_major(fs32), sysv_minor(fs32)); break; default: dev = old_decode_dev(fs32); break; } return dev; } void ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev) { __u32 fs32; switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) { case UFS_ST_SUNx86: case UFS_ST_SUN: fs32 = sysv_encode_dev(dev); if ((fs32 & 0xffff8000) == 0) { fs32 = old_encode_dev(dev); } break; default: fs32 = old_encode_dev(dev); break; } if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86) ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32); else ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32); } /** * ufs_get_locked_folio() - locate, pin and lock a pagecache folio, if not exist * read it from disk. * @mapping: the address_space to search * @index: the page index * * Locates the desired pagecache folio, if not exist we'll read it, * locks it, increments its reference * count and returns its address. * */ struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index) { struct inode *inode = mapping->host; struct folio *folio = filemap_lock_folio(mapping, index); if (IS_ERR(folio)) { folio = read_mapping_folio(mapping, index, NULL); if (IS_ERR(folio)) { printk(KERN_ERR "ufs_change_blocknr: read_mapping_folio error: ino %lu, index: %lu\n", mapping->host->i_ino, index); return folio; } folio_lock(folio); if (unlikely(folio->mapping == NULL)) { /* Truncate got there first */ folio_unlock(folio); folio_put(folio); return NULL; } } if (!folio_buffers(folio)) create_empty_buffers(folio, 1 << inode->i_blkbits, 0); return folio; }
13 13 60 59 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2001 Momchil Velikov * Portions Copyright (C) 2001 Christoph Hellwig * Copyright (C) 2006 Nick Piggin * Copyright (C) 2012 Konstantin Khlebnikov */ #ifndef _LINUX_RADIX_TREE_H #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> #include <linux/gfp_types.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/math.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/xarray.h> #include <linux/local_lock.h> /* Keep unconverted code working */ #define radix_tree_root xarray #define radix_tree_node xa_node struct radix_tree_preload { local_lock_t lock; unsigned nr; /* nodes->parent points to next preallocated node */ struct radix_tree_node *nodes; }; DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: * * 00 - data pointer * 10 - internal entry * x1 - value entry * * The internal entry may be a pointer to the next level in the tree, a * sibling entry, or an indicator that the entry in this slot has been moved * to another location in the tree and the lookup should be restarted. While * NULL fits the 'data pointer' pattern, it means that there is no entry in * the tree for this index (no matter what level of the tree it is found at). * This means that storing a NULL entry in the tree is the same as deleting * the entry from the tree. */ #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_INTERNAL_NODE 2UL static inline bool radix_tree_is_internal_node(void *ptr) { return ((unsigned long)ptr & RADIX_TREE_ENTRY_MASK) == RADIX_TREE_INTERNAL_NODE; } /*** radix-tree API starts here ***/ #define RADIX_TREE_MAP_SHIFT XA_CHUNK_SHIFT #define RADIX_TREE_MAP_SIZE (1UL << RADIX_TREE_MAP_SHIFT) #define RADIX_TREE_MAP_MASK (RADIX_TREE_MAP_SIZE-1) #define RADIX_TREE_MAX_TAGS XA_MAX_MARKS #define RADIX_TREE_TAG_LONGS XA_MARK_LONGS #define RADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \ RADIX_TREE_MAP_SHIFT)) /* The IDR tag is stored in the low bits of xa_flags */ #define ROOT_IS_IDR ((__force gfp_t)4) /* The top bits of xa_flags are used to store the root tags */ #define ROOT_TAG_SHIFT (__GFP_BITS_SHIFT) #define RADIX_TREE_INIT(name, mask) XARRAY_INIT(name, mask) #define RADIX_TREE(name, mask) \ struct radix_tree_root name = RADIX_TREE_INIT(name, mask) #define INIT_RADIX_TREE(root, mask) xa_init_flags(root, mask) static inline bool radix_tree_empty(const struct radix_tree_root *root) { return root->xa_head == NULL; } /** * struct radix_tree_iter - radix tree iterator state * * @index: index of current slot * @next_index: one beyond the last index for this chunk * @tags: bit-mask for tag-iterating * @node: node that contains current slot * * This radix tree iterator works in terms of "chunks" of slots. A chunk is a * subinterval of slots contained within one radix tree leaf node. It is * described by a pointer to its first slot and a struct radix_tree_iter * which holds the chunk's position in the tree and its size. For tagged * iteration radix_tree_iter also holds the slots' bit-mask for one chosen * radix tree tag. */ struct radix_tree_iter { unsigned long index; unsigned long next_index; unsigned long tags; struct radix_tree_node *node; }; /** * Radix-tree synchronization * * The radix-tree API requires that users provide all synchronisation (with * specific exceptions, noted below). * * Synchronization of access to the data items being stored in the tree, and * management of their lifetimes must be completely managed by API users. * * For API usage, in general, * - any function _modifying_ the tree or tags (inserting or deleting * items, setting or clearing tags) must exclude other modifications, and * exclude any functions reading the tree. * - any function _reading_ the tree or tags (looking up items or tags, * gang lookups) must exclude modifications to the tree, but may occur * concurrently with other readers. * * The notable exceptions to this rule are the following functions: * __radix_tree_lookup * radix_tree_lookup * radix_tree_lookup_slot * radix_tree_tag_get * radix_tree_gang_lookup * radix_tree_gang_lookup_tag * radix_tree_gang_lookup_tag_slot * radix_tree_tagged * * The first 7 functions are able to be called locklessly, using RCU. The * caller must ensure calls to these functions are made within rcu_read_lock() * regions. Other readers (lock-free or otherwise) and modifications may be * running concurrently. * * It is still required that the caller manage the synchronization and lifetimes * of the items. So if RCU lock-free lookups are used, typically this would mean * that the items have their own locks, or are amenable to lock-free access; and * that the items are freed by RCU (or only freed after having been deleted from * the radix tree *and* a synchronize_rcu() grace period). * * (Note, rcu_assign_pointer and rcu_dereference are not needed to control * access to data items when inserting into or looking up from the radix tree) * * Note that the value returned by radix_tree_tag_get() may not be relied upon * if only the RCU read lock is held. Functions to set/clear tags and to * delete nodes running concurrently with it may affect its result such that * two consecutive reads in the same locked section may return different * values. If reliability is required, modification functions must also be * excluded from concurrency. * * radix_tree_tagged is able to be called without locking or RCU. */ /** * radix_tree_deref_slot - dereference a slot * @slot: slot pointer, returned by radix_tree_lookup_slot * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read * locked across slot lookup and dereference. Not required if write lock is * held (ie. items cannot be concurrently inserted). * * radix_tree_deref_retry must be used to confirm validity of the pointer if * only the read lock is held. * * Return: entry stored in that slot. */ static inline void *radix_tree_deref_slot(void __rcu **slot) { return rcu_dereference(*slot); } /** * radix_tree_deref_slot_protected - dereference a slot with tree lock held * @slot: slot pointer, returned by radix_tree_lookup_slot * * Similar to radix_tree_deref_slot. The caller does not hold the RCU read * lock but it must hold the tree lock to prevent parallel updates. * * Return: entry stored in that slot. */ static inline void *radix_tree_deref_slot_protected(void __rcu **slot, spinlock_t *treelock) { return rcu_dereference_protected(*slot, lockdep_is_held(treelock)); } /** * radix_tree_deref_retry - check radix_tree_deref_slot * @arg: pointer returned by radix_tree_deref_slot * Returns: 0 if retry is not required, otherwise retry is required * * radix_tree_deref_retry must be used with radix_tree_deref_slot. */ static inline int radix_tree_deref_retry(void *arg) { return unlikely(radix_tree_is_internal_node(arg)); } /** * radix_tree_exception - radix_tree_deref_slot returned either exception? * @arg: value returned by radix_tree_deref_slot * Returns: 0 if well-aligned pointer, non-0 if either kind of exception. */ static inline int radix_tree_exception(void *arg) { return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK); } int radix_tree_insert(struct radix_tree_root *, unsigned long index, void *); void *__radix_tree_lookup(const struct radix_tree_root *, unsigned long index, struct radix_tree_node **nodep, void __rcu ***slotp); void *radix_tree_lookup(const struct radix_tree_root *, unsigned long); void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *, unsigned long index); void __radix_tree_replace(struct radix_tree_root *, struct radix_tree_node *, void __rcu **slot, void *entry); void radix_tree_iter_replace(struct radix_tree_root *, const struct radix_tree_iter *, void __rcu **slot, void *entry); void radix_tree_replace_slot(struct radix_tree_root *, void __rcu **slot, void *entry); void radix_tree_iter_delete(struct radix_tree_root *, struct radix_tree_iter *iter, void __rcu **slot); void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *); void *radix_tree_delete(struct radix_tree_root *, unsigned long); unsigned int radix_tree_gang_lookup(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *, unsigned long index, unsigned int tag); void *radix_tree_tag_clear(struct radix_tree_root *, unsigned long index, unsigned int tag); int radix_tree_tag_get(const struct radix_tree_root *, unsigned long index, unsigned int tag); void radix_tree_iter_tag_clear(struct radix_tree_root *, const struct radix_tree_iter *iter, unsigned int tag); unsigned int radix_tree_gang_lookup_tag(const struct radix_tree_root *, void **results, unsigned long first_index, unsigned int max_items, unsigned int tag); unsigned int radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *, void __rcu ***results, unsigned long first_index, unsigned int max_items, unsigned int tag); int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { local_unlock(&radix_tree_preloads.lock); } void __rcu **idr_get_free(struct radix_tree_root *root, struct radix_tree_iter *iter, gfp_t gfp, unsigned long max); enum { RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ RADIX_TREE_ITER_TAGGED = 0x10, /* lookup tagged slots */ RADIX_TREE_ITER_CONTIG = 0x20, /* stop at first hole */ }; /** * radix_tree_iter_init - initialize radix tree iterator * * @iter: pointer to iterator state * @start: iteration starting index * Returns: NULL */ static __always_inline void __rcu ** radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) { /* * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it * in the case of a successful tagged chunk lookup. If the lookup was * unsuccessful or non-tagged then nobody cares about ->tags. * * Set index to zero to bypass next_index overflow protection. * See the comment in radix_tree_next_chunk() for details. */ iter->index = 0; iter->next_index = start; return NULL; } /** * radix_tree_next_chunk - find next chunk of slots for iteration * * @root: radix tree root * @iter: iterator state * @flags: RADIX_TREE_ITER_* flags and tag index * Returns: pointer to chunk first slot, or NULL if there no more left * * This function looks up the next chunk in the radix tree starting from * @iter->next_index. It returns a pointer to the chunk's first slot. * Also it fills @iter with data about chunk: position in the tree (index), * its end (next_index), and constructs a bit mask for tagged iterating (tags). */ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *, struct radix_tree_iter *iter, unsigned flags); /** * radix_tree_iter_lookup - look up an index in the radix tree * @root: radix tree root * @iter: iterator state * @index: key to look up * * If @index is present in the radix tree, this function returns the slot * containing it and updates @iter to describe the entry. If @index is not * present, it returns NULL. */ static inline void __rcu ** radix_tree_iter_lookup(const struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned long index) { radix_tree_iter_init(iter, index); return radix_tree_next_chunk(root, iter, RADIX_TREE_ITER_CONTIG); } /** * radix_tree_iter_retry - retry this chunk of the iteration * @iter: iterator state * * If we iterate over a tree protected only by the RCU lock, a race * against deletion or creation may result in seeing a slot for which * radix_tree_deref_retry() returns true. If so, call this function * and continue the iteration. */ static inline __must_check void __rcu **radix_tree_iter_retry(struct radix_tree_iter *iter) { iter->next_index = iter->index; iter->tags = 0; return NULL; } static inline unsigned long __radix_tree_iter_add(struct radix_tree_iter *iter, unsigned long slots) { return iter->index + slots; } /** * radix_tree_iter_resume - resume iterating when the chunk may be invalid * @slot: pointer to current slot * @iter: iterator state * Returns: New slot pointer * * If the iterator needs to release then reacquire a lock, the chunk may * have been invalidated by an insertion or deletion. Call this function * before releasing the lock to continue the iteration from the next index. */ void __rcu **__must_check radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter); /** * radix_tree_chunk_size - get current chunk size * * @iter: pointer to radix tree iterator * Returns: current chunk size */ static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { return iter->next_index - iter->index; } /** * radix_tree_next_slot - find next slot in chunk * * @slot: pointer to current slot * @iter: pointer to iterator state * @flags: RADIX_TREE_ITER_*, should be constant * Returns: pointer to next slot, or NULL if there no more left * * This function updates @iter->index in the case of a successful lookup. * For tagged lookup it also eats @iter->tags. * * There are several cases where 'slot' can be passed in as NULL to this * function. These cases result from the use of radix_tree_iter_resume() or * radix_tree_iter_retry(). In these cases we don't end up dereferencing * 'slot' because either: * a) we are doing tagged iteration and iter->tags has been set to 0, or * b) we are doing non-tagged iteration, and iter->index and iter->next_index * have been set up so that radix_tree_chunk_size() returns 1 or 0. */ static __always_inline void __rcu **radix_tree_next_slot(void __rcu **slot, struct radix_tree_iter *iter, unsigned flags) { if (flags & RADIX_TREE_ITER_TAGGED) { iter->tags >>= 1; if (unlikely(!iter->tags)) return NULL; if (likely(iter->tags & 1ul)) { iter->index = __radix_tree_iter_add(iter, 1); slot++; goto found; } if (!(flags & RADIX_TREE_ITER_CONTIG)) { unsigned offset = __ffs(iter->tags); iter->tags >>= offset++; iter->index = __radix_tree_iter_add(iter, offset); slot += offset; goto found; } } else { long count = radix_tree_chunk_size(iter); while (--count > 0) { slot++; iter->index = __radix_tree_iter_add(iter, 1); if (likely(*slot)) goto found; if (flags & RADIX_TREE_ITER_CONTIG) { /* forbid switching to the next chunk */ iter->next_index = 0; break; } } } return NULL; found: return slot; } /** * radix_tree_for_each_slot - iterate over non-empty slots * * @slot: the void** variable for pointer to slot * @root: the struct radix_tree_root pointer * @iter: the struct radix_tree_iter pointer * @start: iteration starting index * * @slot points to radix tree slot, @iter->index contains its index. */ #define radix_tree_for_each_slot(slot, root, iter, start) \ for (slot = radix_tree_iter_init(iter, start) ; \ slot || (slot = radix_tree_next_chunk(root, iter, 0)) ; \ slot = radix_tree_next_slot(slot, iter, 0)) /** * radix_tree_for_each_tagged - iterate over tagged slots * * @slot: the void** variable for pointer to slot * @root: the struct radix_tree_root pointer * @iter: the struct radix_tree_iter pointer * @start: iteration starting index * @tag: tag index * * @slot points to radix tree slot, @iter->index contains its index. */ #define radix_tree_for_each_tagged(slot, root, iter, start, tag) \ for (slot = radix_tree_iter_init(iter, start) ; \ slot || (slot = radix_tree_next_chunk(root, iter, \ RADIX_TREE_ITER_TAGGED | tag)) ; \ slot = radix_tree_next_slot(slot, iter, \ RADIX_TREE_ITER_TAGGED | tag)) #endif /* _LINUX_RADIX_TREE_H */
8 1 8 8 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 // SPDX-License-Identifier: GPL-2.0-only #include <linux/string.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include "udl_drv.h" #include "udl_edid.h" static int udl_read_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct udl_device *udl = data; struct drm_device *dev = &udl->drm; struct usb_device *udev = udl_to_usb_device(udl); u8 *read_buff; int idx, ret; size_t i; read_buff = kmalloc(2, GFP_KERNEL); if (!read_buff) return -ENOMEM; if (!drm_dev_enter(dev, &idx)) { ret = -ENODEV; goto err_kfree; } for (i = 0; i < len; i++) { int bval = (i + block * EDID_LENGTH) << 8; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x02, (0x80 | (0x02 << 5)), bval, 0xA1, read_buff, 2, USB_CTRL_GET_TIMEOUT); if (ret < 0) { drm_err(dev, "Read EDID byte %zu failed err %x\n", i, ret); goto err_drm_dev_exit; } else if (ret < 1) { ret = -EIO; drm_err(dev, "Read EDID byte %zu failed\n", i); goto err_drm_dev_exit; } buf[i] = read_buff[1]; } drm_dev_exit(idx); kfree(read_buff); return 0; err_drm_dev_exit: drm_dev_exit(idx); err_kfree: kfree(read_buff); return ret; } bool udl_probe_edid(struct udl_device *udl) { u8 hdr[8]; int ret; ret = udl_read_edid_block(udl, hdr, 0, sizeof(hdr)); if (ret) return false; /* * The adapter sends all-zeros if no monitor has been * connected. We consider anything else a connection. */ return !mem_is_zero(hdr, sizeof(hdr)); } const struct drm_edid *udl_edid_read(struct drm_connector *connector) { struct udl_device *udl = to_udl(connector->dev); return drm_edid_read_custom(connector, udl_read_edid_block, udl); }
103 17 86 101 2 104 24 1 1 1 21 1 6 1 17 28 1 4 23 27 28 53 3 2 2 1 45 45 1 6 53 59 59 131 1 2 60 68 7 5 115 1 1 1 2 1 1 2 108 22 83 102 7 61 66 64 3 2 1 2 16 1 4 28 3 24 5 5 10 13 3 7 7 19 5 10 18 27 13 17 1 16 4 2 2 9 13 10 4 1 5 1 33 2 31 30 11 13 2 11 11 28 7 22 14 14 43 40 47 47 47 20 26 46 46 1 37 1 9 31 30 13 8 18 1 16 21 4 17 16 21 1 555 12 2642 77 1192 1023 556 1404 1219 4 2573 10 1 2566 2049 1810 796 2172 363 1547 1002 2497 33 2510 118 117 118 75 75 5 4 2 1 2634 162 2530 269 259 10 5 5 12 2349 20 256 2134 2460 2 2 1 330 2151 2 2429 24 2427 25 2169 289 16 2442 2 2 11 11 11 29 29 2 27 2412 9 14 2369 2370 2325 91 91 2239 2239 43 6 1 31 32 4 28 70 1432 1413 35 11 1423 574 871 18 863 386 386 324 1448 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/open.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/string.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fsnotify.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/namei.h> #include <linux/backing-dev.h> #include <linux/capability.h> #include <linux/securebits.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/audit.h> #include <linux/falloc.h> #include <linux/fs_struct.h> #include <linux/dnotify.h> #include <linux/compat.h> #include <linux/mnt_idmapping.h> #include <linux/filelock.h> #include "internal.h" int do_truncate(struct mnt_idmap *idmap, struct dentry *dentry, loff_t length, unsigned int time_attrs, struct file *filp) { int ret; struct iattr newattrs; /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */ if (length < 0) return -EINVAL; newattrs.ia_size = length; newattrs.ia_valid = ATTR_SIZE | time_attrs; if (filp) { newattrs.ia_file = filp; newattrs.ia_valid |= ATTR_FILE; } /* Remove suid, sgid, and file capabilities on truncate too */ ret = dentry_needs_remove_privs(idmap, dentry); if (ret < 0) return ret; if (ret) newattrs.ia_valid |= ret | ATTR_FORCE; ret = inode_lock_killable(dentry->d_inode); if (ret) return ret; /* Note any delegations or leases have already been broken: */ ret = notify_change(idmap, dentry, &newattrs, NULL); inode_unlock(dentry->d_inode); return ret; } int vfs_truncate(const struct path *path, loff_t length) { struct mnt_idmap *idmap; struct inode *inode; int error; inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode)) return -EINVAL; idmap = mnt_idmap(path->mnt); error = inode_permission(idmap, inode, MAY_WRITE); if (error) return error; error = fsnotify_truncate_perm(path, length); if (error) return error; error = mnt_want_write(path->mnt); if (error) return error; error = -EPERM; if (IS_APPEND(inode)) goto mnt_drop_write_and_out; error = get_write_access(inode); if (error) goto mnt_drop_write_and_out; /* * Make sure that there are no leases. get_write_access() protects * against the truncate racing with a lease-granting setlease(). */ error = break_lease(inode, O_WRONLY); if (error) goto put_write_and_out; error = security_path_truncate(path); if (!error) error = do_truncate(idmap, path->dentry, length, 0, NULL); put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: mnt_drop_write(path->mnt); return error; } EXPORT_SYMBOL_GPL(vfs_truncate); int do_sys_truncate(const char __user *pathname, loff_t length) { unsigned int lookup_flags = LOOKUP_FOLLOW; struct path path; int error; if (length < 0) /* sorry, but loff_t says... */ return -EINVAL; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_truncate(&path, length); path_put(&path); } if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { return do_sys_truncate(path, length); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length) { return do_sys_truncate(path, length); } #endif int do_ftruncate(struct file *file, loff_t length, int small) { struct inode *inode; struct dentry *dentry; int error; /* explicitly opened as large or we are on 64-bit box */ if (file->f_flags & O_LARGEFILE) small = 0; dentry = file->f_path.dentry; inode = dentry->d_inode; if (!S_ISREG(inode->i_mode) || !(file->f_mode & FMODE_WRITE)) return -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) return -EINVAL; /* Check IS_APPEND on real upper inode */ if (IS_APPEND(file_inode(file))) return -EPERM; error = security_file_truncate(file); if (error) return error; error = fsnotify_truncate_perm(&file->f_path, length); if (error) return error; scoped_guard(super_write, inode->i_sb) return do_truncate(file_mnt_idmap(file), dentry, length, ATTR_MTIME | ATTR_CTIME, file); } int do_sys_ftruncate(unsigned int fd, loff_t length, int small) { if (length < 0) return -EINVAL; CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return do_ftruncate(fd_file(f), length, small); } SYSCALL_DEFINE2(ftruncate, unsigned int, fd, off_t, length) { return do_sys_ftruncate(fd, length, 1); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_off_t, length) { return do_sys_ftruncate(fd, length, 1); } #endif /* LFS versions of truncate are only needed on 32 bit machines */ #if BITS_PER_LONG == 32 SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length) { return do_sys_truncate(path, length); } SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length) { return do_sys_ftruncate(fd, length, 0); } #endif /* BITS_PER_LONG == 32 */ #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_TRUNCATE64) COMPAT_SYSCALL_DEFINE3(truncate64, const char __user *, pathname, compat_arg_u64_dual(length)) { return ksys_truncate(pathname, compat_arg_u64_glue(length)); } #endif #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FTRUNCATE64) COMPAT_SYSCALL_DEFINE3(ftruncate64, unsigned int, fd, compat_arg_u64_dual(length)) { return ksys_ftruncate(fd, compat_arg_u64_glue(length)); } #endif int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); int ret; loff_t sum; if (offset < 0 || len <= 0) return -EINVAL; if (mode & ~(FALLOC_FL_MODE_MASK | FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* * Modes are exclusive, even if that is not obvious from the encoding * as bit masks and the mix with the flag in the same namespace. * * To make things even more complicated, FALLOC_FL_ALLOCATE_RANGE is * encoded as no bit set. */ switch (mode & FALLOC_FL_MODE_MASK) { case FALLOC_FL_ALLOCATE_RANGE: case FALLOC_FL_UNSHARE_RANGE: case FALLOC_FL_ZERO_RANGE: break; case FALLOC_FL_PUNCH_HOLE: if (!(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; break; case FALLOC_FL_COLLAPSE_RANGE: case FALLOC_FL_INSERT_RANGE: case FALLOC_FL_WRITE_ZEROES: if (mode & FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* * On append-only files only space preallocation is supported. */ if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * We cannot allow any fallocate operation on an active swapfile */ if (IS_SWAPFILE(inode)) return -ETXTBSY; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; ret = fsnotify_file_area_perm(file, MAY_WRITE, &offset, len); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -ENODEV; /* Check for wraparound */ if (check_add_overflow(offset, len, &sum)) return -EFBIG; if (sum > inode->i_sb->s_maxbytes) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; file_start_write(file); ret = file->f_op->fallocate(file, mode, offset, len); /* * Create inotify and fanotify events. * * To keep the logic simple always create events if fallocate succeeds. * This implies that events are even created if the file size remains * unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE. */ if (ret == 0) fsnotify_modify(file); file_end_write(file); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); int ksys_fallocate(int fd, int mode, loff_t offset, loff_t len) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_fallocate(fd_file(f), mode, offset, len); } SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) { return ksys_fallocate(fd, mode, offset, len); } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_FALLOCATE) COMPAT_SYSCALL_DEFINE6(fallocate, int, fd, int, mode, compat_arg_u64_dual(offset), compat_arg_u64_dual(len)) { return ksys_fallocate(fd, mode, compat_arg_u64_glue(offset), compat_arg_u64_glue(len)); } #endif /* * access() needs to use the real uid/gid, not the effective uid/gid. * We do this by temporarily clearing all FS-related capabilities and * switching the fsuid/fsgid around to the real ones. * * Creating new credentials is expensive, so we try to skip doing it, * which we can if the result would match what we already got. */ static bool access_need_override_creds(int flags) { const struct cred *cred; if (flags & AT_EACCESS) return false; cred = current_cred(); if (!uid_eq(cred->fsuid, cred->uid) || !gid_eq(cred->fsgid, cred->gid)) return true; if (!issecure(SECURE_NO_SETUID_FIXUP)) { kuid_t root_uid = make_kuid(cred->user_ns, 0); if (!uid_eq(cred->uid, root_uid)) { if (!cap_isclear(cred->cap_effective)) return true; } else { if (!cap_isidentical(cred->cap_effective, cred->cap_permitted)) return true; } } return false; } static const struct cred *access_override_creds(void) { struct cred *override_cred; override_cred = prepare_creds(); if (!override_cred) return NULL; /* * XXX access_need_override_creds performs checks in hopes of skipping * this work. Make sure it stays in sync if making any changes in this * routine. */ override_cred->fsuid = override_cred->uid; override_cred->fsgid = override_cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ kuid_t root_uid = make_kuid(override_cred->user_ns, 0); if (!uid_eq(override_cred->uid, root_uid)) cap_clear(override_cred->cap_effective); else override_cred->cap_effective = override_cred->cap_permitted; } /* * The new set of credentials can *only* be used in * task-synchronous circumstances, and does not need * RCU freeing, unless somebody then takes a separate * reference to it. * * NOTE! This is _only_ true because this credential * is used purely for override_creds() that installs * it as the subjective cred. Other threads will be * accessing ->real_cred, not the subjective cred. * * If somebody _does_ make a copy of this (using the * 'get_current_cred()' function), that will clear the * non_rcu field, because now that other user may be * expecting RCU freeing. But normal thread-synchronous * cred accesses will keep things non-racy to avoid RCU * freeing. */ override_cred->non_rcu = 1; return override_creds(override_cred); } static int do_faccessat(int dfd, const char __user *filename, int mode, int flags) { struct path path; struct inode *inode; int res; unsigned int lookup_flags = LOOKUP_FOLLOW; const struct cred *old_cred = NULL; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; if (flags & ~(AT_EACCESS | AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) return -EINVAL; if (flags & AT_SYMLINK_NOFOLLOW) lookup_flags &= ~LOOKUP_FOLLOW; if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; if (access_need_override_creds(flags)) { old_cred = access_override_creds(); if (!old_cred) return -ENOMEM; } retry: res = user_path_at(dfd, filename, lookup_flags, &path); if (res) goto out; inode = d_backing_inode(path.dentry); if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) { /* * MAY_EXEC on regular files is denied if the fs is mounted * with the "noexec" flag. */ res = -EACCES; if (path_noexec(&path)) goto out_path_release; } res = inode_permission(mnt_idmap(path.mnt), inode, mode | MAY_ACCESS); /* SuS v2 requires we report a read only fs too */ if (res || !(mode & S_IWOTH) || special_file(inode->i_mode)) goto out_path_release; /* * This is a rare case where using __mnt_is_readonly() * is OK without a mnt_want/drop_write() pair. Since * no actual write to the fs is performed here, we do * not need to telegraph to that to anyone. * * By doing this, we accept that this access is * inherently racy and know that the fs may change * state before we even see this result. */ if (__mnt_is_readonly(path.mnt)) res = -EROFS; out_path_release: path_put(&path); if (retry_estale(res, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: if (old_cred) put_cred(revert_creds(old_cred)); return res; } SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) { return do_faccessat(dfd, filename, mode, 0); } SYSCALL_DEFINE4(faccessat2, int, dfd, const char __user *, filename, int, mode, int, flags) { return do_faccessat(dfd, filename, mode, flags); } SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) { return do_faccessat(AT_FDCWD, filename, mode, 0); } SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = path_permission(&path, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; set_fs_pwd(current->fs, &path); dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE1(fchdir, unsigned int, fd) { CLASS(fd_raw, f)(fd); int error; if (fd_empty(f)) return -EBADF; if (!d_can_lookup(fd_file(f)->f_path.dentry)) return -ENOTDIR; error = file_permission(fd_file(f), MAY_EXEC | MAY_CHDIR); if (!error) set_fs_pwd(current->fs, &fd_file(f)->f_path); return error; } SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = path_permission(&path, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; error = -EPERM; if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT)) goto dput_and_out; error = security_path_chroot(&path); if (error) goto dput_and_out; set_fs_root(current->fs, &path); error = 0; dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } int chmod_common(const struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct delegated_inode delegated_inode = { }; struct iattr newattrs; int error; error = mnt_want_write(path->mnt); if (error) return error; retry_deleg: error = inode_lock_killable(inode); if (error) goto out_mnt_unlock; error = security_path_chmod(path, mode); if (error) goto out_unlock; newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(mnt_idmap(path->mnt), path->dentry, &newattrs, &delegated_inode); out_unlock: inode_unlock(inode); if (is_delegated(&delegated_inode)) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } out_mnt_unlock: mnt_drop_write(path->mnt); return error; } int vfs_fchmod(struct file *file, umode_t mode) { audit_file(file); return chmod_common(&file->f_path, mode); } SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_fchmod(fd_file(f), mode); } static int do_fchmodat(int dfd, const char __user *filename, umode_t mode, unsigned int flags) { struct path path; int error; unsigned int lookup_flags; if (unlikely(flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH))) return -EINVAL; lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (!error) { error = chmod_common(&path, mode); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } SYSCALL_DEFINE4(fchmodat2, int, dfd, const char __user *, filename, umode_t, mode, unsigned int, flags) { return do_fchmodat(dfd, filename, mode, flags); } SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode) { return do_fchmodat(dfd, filename, mode, 0); } SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) { return do_fchmodat(AT_FDCWD, filename, mode, 0); } /* * Check whether @kuid is valid and if so generate and set vfsuid_t in * ia_vfsuid. * * Return: true if @kuid is valid, false if not. */ static inline bool setattr_vfsuid(struct iattr *attr, kuid_t kuid) { if (!uid_valid(kuid)) return false; attr->ia_valid |= ATTR_UID; attr->ia_vfsuid = VFSUIDT_INIT(kuid); return true; } /* * Check whether @kgid is valid and if so generate and set vfsgid_t in * ia_vfsgid. * * Return: true if @kgid is valid, false if not. */ static inline bool setattr_vfsgid(struct iattr *attr, kgid_t kgid) { if (!gid_valid(kgid)) return false; attr->ia_valid |= ATTR_GID; attr->ia_vfsgid = VFSGIDT_INIT(kgid); return true; } int chown_common(const struct path *path, uid_t user, gid_t group) { struct mnt_idmap *idmap; struct user_namespace *fs_userns; struct inode *inode = path->dentry->d_inode; struct delegated_inode delegated_inode = { }; int error; struct iattr newattrs; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); idmap = mnt_idmap(path->mnt); fs_userns = i_user_ns(inode); retry_deleg: newattrs.ia_vfsuid = INVALID_VFSUID; newattrs.ia_vfsgid = INVALID_VFSGID; newattrs.ia_valid = ATTR_CTIME; if ((user != (uid_t)-1) && !setattr_vfsuid(&newattrs, uid)) return -EINVAL; if ((group != (gid_t)-1) && !setattr_vfsgid(&newattrs, gid)) return -EINVAL; error = inode_lock_killable(inode); if (error) return error; if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_PRIV | setattr_should_drop_sgid(idmap, inode); /* Continue to send actual fs values, not the mount values. */ error = security_path_chown( path, from_vfsuid(idmap, fs_userns, newattrs.ia_vfsuid), from_vfsgid(idmap, fs_userns, newattrs.ia_vfsgid)); if (!error) error = notify_change(idmap, path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (is_delegated(&delegated_inode)) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } return error; } int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag) { struct path path; int error = -EINVAL; int lookup_flags; if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) goto out; lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; error = mnt_want_write(path.mnt); if (error) goto out_release; error = chown_common(&path, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag) { return do_fchownat(dfd, filename, user, group, flag); } SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) { return do_fchownat(AT_FDCWD, filename, user, group, 0); } SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group) { return do_fchownat(AT_FDCWD, filename, user, group, AT_SYMLINK_NOFOLLOW); } int vfs_fchown(struct file *file, uid_t user, gid_t group) { int error; error = mnt_want_write_file(file); if (error) return error; audit_file(file); error = chown_common(&file->f_path, user, group); mnt_drop_write_file(file); return error; } int ksys_fchown(unsigned int fd, uid_t user, gid_t group) { CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_fchown(fd_file(f), user, group); } SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) { return ksys_fchown(fd, user, group); } static inline int file_get_write_access(struct file *f) { int error; error = get_write_access(f->f_inode); if (unlikely(error)) return error; error = mnt_get_write_access(f->f_path.mnt); if (unlikely(error)) goto cleanup_inode; if (unlikely(f->f_mode & FMODE_BACKING)) { error = mnt_get_write_access(backing_file_user_path(f)->mnt); if (unlikely(error)) goto cleanup_mnt; } return 0; cleanup_mnt: mnt_put_write_access(f->f_path.mnt); cleanup_inode: put_write_access(f->f_inode); return error; } static int do_dentry_open(struct file *f, int (*open)(struct inode *, struct file *)) { static const struct file_operations empty_fops = {}; struct inode *inode = f->f_path.dentry->d_inode; int error; path_get(&f->f_path); f->f_inode = inode; f->f_mapping = inode->i_mapping; f->f_wb_err = filemap_sample_wb_err(f->f_mapping); f->f_sb_err = file_sample_sb_err(f); if (unlikely(f->f_flags & O_PATH)) { f->f_mode = FMODE_PATH | FMODE_OPENED; file_set_fsnotify_mode(f, FMODE_NONOTIFY); f->f_op = &empty_fops; return 0; } if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { i_readcount_inc(inode); } else if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { error = file_get_write_access(f); if (unlikely(error)) goto cleanup_file; f->f_mode |= FMODE_WRITER; } /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) f->f_mode |= FMODE_ATOMIC_POS; f->f_op = fops_get(inode->i_fop); if (WARN_ON(!f->f_op)) { error = -ENODEV; goto cleanup_all; } error = security_file_open(f); if (unlikely(error)) goto cleanup_all; /* * Call fsnotify open permission hook and set FMODE_NONOTIFY_* bits * according to existing permission watches. * If FMODE_NONOTIFY mode was already set for an fanotify fd or for a * pseudo file, this call will not change the mode. */ error = fsnotify_open_perm_and_set_mode(f); if (unlikely(error)) goto cleanup_all; error = break_lease(file_inode(f), f->f_flags); if (unlikely(error)) goto cleanup_all; /* normally all 3 are set; ->open() can clear them if needed */ f->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; if (!open) open = f->f_op->open; if (open) { error = open(inode, f); if (error) goto cleanup_all; } f->f_mode |= FMODE_OPENED; if ((f->f_mode & FMODE_READ) && likely(f->f_op->read || f->f_op->read_iter)) f->f_mode |= FMODE_CAN_READ; if ((f->f_mode & FMODE_WRITE) && likely(f->f_op->write || f->f_op->write_iter)) f->f_mode |= FMODE_CAN_WRITE; if ((f->f_mode & FMODE_LSEEK) && !f->f_op->llseek) f->f_mode &= ~FMODE_LSEEK; if (f->f_mapping->a_ops && f->f_mapping->a_ops->direct_IO) f->f_mode |= FMODE_CAN_ODIRECT; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); f->f_iocb_flags = iocb_flags(f); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); if ((f->f_flags & O_DIRECT) && !(f->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; /* * XXX: Huge page cache doesn't support writing yet. Drop all page * cache for this file before processing writes. */ if (f->f_mode & FMODE_WRITE) { /* * Depends on full fence from get_write_access() to synchronize * against collapse_file() regarding i_writecount and nr_thps * updates. Ensures subsequent insertion of THPs into the page * cache will fail. */ if (filemap_nr_thps(inode->i_mapping)) { struct address_space *mapping = inode->i_mapping; filemap_invalidate_lock(inode->i_mapping); /* * unmap_mapping_range just need to be called once * here, because the private pages is not need to be * unmapped mapping (e.g. data segment of dynamic * shared libraries here). */ unmap_mapping_range(mapping, 0, 0, 0); truncate_inode_pages(mapping, 0); filemap_invalidate_unlock(inode->i_mapping); } } return 0; cleanup_all: if (WARN_ON_ONCE(error > 0)) error = -EINVAL; fops_put(f->f_op); put_file_access(f); cleanup_file: path_put(&f->f_path); f->__f_path.mnt = NULL; f->__f_path.dentry = NULL; f->f_inode = NULL; return error; } /** * finish_open - finish opening a file * @file: file pointer * @dentry: pointer to dentry * @open: open callback * * This can be used to finish opening a file passed to i_op->atomic_open(). * * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. * * NB: the dentry reference is _not_ consumed. If, for example, the dentry is * the return value of d_splice_alias(), then the caller needs to perform dput() * on it after finish_open(). * * Returns zero on success or -errno if the open failed. */ int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *)) { BUG_ON(file->f_mode & FMODE_OPENED); /* once it's opened, it's opened */ file->__f_path.dentry = dentry; return do_dentry_open(file, open); } EXPORT_SYMBOL(finish_open); /** * finish_no_open - finish ->atomic_open() without opening the file * * @file: file pointer * @dentry: dentry, ERR_PTR(-E...) or NULL (as returned from ->lookup()) * * This can be used to set the result of a lookup in ->atomic_open(). * * NB: unlike finish_open() this function does consume the dentry reference and * the caller need not dput() it. * * Returns 0 or -E..., which must be the return value of ->atomic_open() after * having called this function. */ int finish_no_open(struct file *file, struct dentry *dentry) { if (IS_ERR(dentry)) return PTR_ERR(dentry); file->__f_path.dentry = dentry; return 0; } EXPORT_SYMBOL(finish_no_open); char *file_path(struct file *filp, char *buf, int buflen) { return d_path(&filp->f_path, buf, buflen); } EXPORT_SYMBOL(file_path); /** * vfs_open - open the file at the given path * @path: path to open * @file: newly allocated file with f_flag initialized */ int vfs_open(const struct path *path, struct file *file) { int ret; file->__f_path = *path; ret = do_dentry_open(file, NULL); if (!ret) { /* * Once we return a file with FMODE_OPENED, __fput() will call * fsnotify_close(), so we need fsnotify_open() here for * symmetry. */ fsnotify_open(file); } return ret; } struct file *dentry_open(const struct path *path, int flags, const struct cred *cred) { int error; struct file *f; /* We must always pass in a valid mount pointer. */ BUG_ON(!path->mnt); f = alloc_empty_file(flags, cred); if (!IS_ERR(f)) { error = vfs_open(path, f); if (error) { fput(f); f = ERR_PTR(error); } } return f; } EXPORT_SYMBOL(dentry_open); struct file *dentry_open_nonotify(const struct path *path, int flags, const struct cred *cred) { struct file *f = alloc_empty_file(flags, cred); if (!IS_ERR(f)) { int error; file_set_fsnotify_mode(f, FMODE_NONOTIFY); error = vfs_open(path, f); if (error) { fput(f); f = ERR_PTR(error); } } return f; } /** * dentry_create - Create and open a file * @path: path to create * @flags: O_ flags * @mode: mode bits for new file * @cred: credentials to use * * Caller must hold the parent directory's lock, and have prepared * a negative dentry, placed in @path->dentry, for the new file. * * Caller sets @path->mnt to the vfsmount of the filesystem where * the new file is to be created. The parent directory and the * negative dentry must reside on the same filesystem instance. * * On success, returns a "struct file *". Otherwise a ERR_PTR * is returned. */ struct file *dentry_create(const struct path *path, int flags, umode_t mode, const struct cred *cred) { struct file *f; int error; f = alloc_empty_file(flags, cred); if (IS_ERR(f)) return f; error = vfs_create(mnt_idmap(path->mnt), path->dentry, mode, NULL); if (!error) error = vfs_open(path, f); if (unlikely(error)) { fput(f); return ERR_PTR(error); } return f; } EXPORT_SYMBOL(dentry_create); /** * kernel_file_open - open a file for kernel internal use * @path: path of the file to open * @flags: open flags * @cred: credentials for open * * Open a file for use by in-kernel consumers. The file is not accounted * against nr_files and must not be installed into the file descriptor * table. * * Return: Opened file on success, an error pointer on failure. */ struct file *kernel_file_open(const struct path *path, int flags, const struct cred *cred) { struct file *f; int error; f = alloc_empty_file_noaccount(flags, cred); if (IS_ERR(f)) return f; error = vfs_open(path, f); if (error) { fput(f); return ERR_PTR(error); } return f; } EXPORT_SYMBOL_GPL(kernel_file_open); #define WILL_CREATE(flags) (flags & (O_CREAT | __O_TMPFILE)) #define O_PATH_FLAGS (O_DIRECTORY | O_NOFOLLOW | O_PATH | O_CLOEXEC) inline struct open_how build_open_how(int flags, umode_t mode) { struct open_how how = { .flags = flags & VALID_OPEN_FLAGS, .mode = mode & S_IALLUGO, }; /* O_PATH beats everything else. */ if (how.flags & O_PATH) how.flags &= O_PATH_FLAGS; /* Modes should only be set for create-like flags. */ if (!WILL_CREATE(how.flags)) how.mode = 0; return how; } inline int build_open_flags(const struct open_how *how, struct open_flags *op) { u64 flags = how->flags; u64 strip = O_CLOEXEC; int lookup_flags = 0; int acc_mode = ACC_MODE(flags); BUILD_BUG_ON_MSG(upper_32_bits(VALID_OPEN_FLAGS), "struct open_flags doesn't yet handle flags > 32 bits"); /* * Strip flags that aren't relevant in determining struct open_flags. */ flags &= ~strip; /* * Older syscalls implicitly clear all of the invalid flags or argument * values before calling build_open_flags(), but openat2(2) checks all * of its arguments. */ if (flags & ~VALID_OPEN_FLAGS) return -EINVAL; if (how->resolve & ~VALID_RESOLVE_FLAGS) return -EINVAL; /* Scoping flags are mutually exclusive. */ if ((how->resolve & RESOLVE_BENEATH) && (how->resolve & RESOLVE_IN_ROOT)) return -EINVAL; /* Deal with the mode. */ if (WILL_CREATE(flags)) { if (how->mode & ~S_IALLUGO) return -EINVAL; op->mode = how->mode | S_IFREG; } else { if (how->mode != 0) return -EINVAL; op->mode = 0; } /* * Block bugs where O_DIRECTORY | O_CREAT created regular files. * Note, that blocking O_DIRECTORY | O_CREAT here also protects * O_TMPFILE below which requires O_DIRECTORY being raised. */ if ((flags & (O_DIRECTORY | O_CREAT)) == (O_DIRECTORY | O_CREAT)) return -EINVAL; /* Now handle the creative implementation of O_TMPFILE. */ if (flags & __O_TMPFILE) { /* * In order to ensure programs get explicit errors when trying * to use O_TMPFILE on old kernels we enforce that O_DIRECTORY * is raised alongside __O_TMPFILE. */ if (!(flags & O_DIRECTORY)) return -EINVAL; if (!(acc_mode & MAY_WRITE)) return -EINVAL; } if (flags & O_PATH) { /* O_PATH only permits certain other flags to be set. */ if (flags & ~O_PATH_FLAGS) return -EINVAL; acc_mode = 0; } /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) { op->intent |= LOOKUP_EXCL; flags |= O_NOFOLLOW; } } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (how->resolve & RESOLVE_NO_XDEV) lookup_flags |= LOOKUP_NO_XDEV; if (how->resolve & RESOLVE_NO_MAGICLINKS) lookup_flags |= LOOKUP_NO_MAGICLINKS; if (how->resolve & RESOLVE_NO_SYMLINKS) lookup_flags |= LOOKUP_NO_SYMLINKS; if (how->resolve & RESOLVE_BENEATH) lookup_flags |= LOOKUP_BENEATH; if (how->resolve & RESOLVE_IN_ROOT) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } op->lookup_flags = lookup_flags; return 0; } /** * file_open_name - open file and return file pointer * * @name: struct filename containing path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *file_open_name(struct filename *name, int flags, umode_t mode) { struct open_flags op; struct open_how how = build_open_how(flags, mode); int err = build_open_flags(&how, &op); if (err) return ERR_PTR(err); return do_filp_open(AT_FDCWD, name, &op); } /** * filp_open - open file and return file pointer * * @filename: path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *filp_open(const char *filename, int flags, umode_t mode) { struct filename *name = getname_kernel(filename); struct file *file = ERR_CAST(name); if (!IS_ERR(name)) { file = file_open_name(name, flags, mode); putname(name); } return file; } EXPORT_SYMBOL(filp_open); struct file *file_open_root(const struct path *root, const char *filename, int flags, umode_t mode) { struct open_flags op; struct open_how how = build_open_how(flags, mode); int err = build_open_flags(&how, &op); if (err) return ERR_PTR(err); return do_file_open_root(root, filename, &op); } EXPORT_SYMBOL(file_open_root); static int do_sys_openat2(int dfd, const char __user *filename, struct open_how *how) { struct open_flags op; struct filename *tmp __free(putname) = NULL; int err; err = build_open_flags(how, &op); if (unlikely(err)) return err; tmp = getname(filename); if (IS_ERR(tmp)) return PTR_ERR(tmp); return FD_ADD(how->flags, do_filp_open(dfd, tmp, &op)); } int do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_how how = build_open_how(flags, mode); return do_sys_openat2(dfd, filename, &how); } SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(dfd, filename, flags, mode); } SYSCALL_DEFINE4(openat2, int, dfd, const char __user *, filename, struct open_how __user *, how, size_t, usize) { int err; struct open_how tmp; BUILD_BUG_ON(sizeof(struct open_how) < OPEN_HOW_SIZE_VER0); BUILD_BUG_ON(sizeof(struct open_how) != OPEN_HOW_SIZE_LATEST); if (unlikely(usize < OPEN_HOW_SIZE_VER0)) return -EINVAL; if (unlikely(usize > PAGE_SIZE)) return -E2BIG; err = copy_struct_from_user(&tmp, sizeof(tmp), how, usize); if (err) return err; audit_openat2_how(&tmp); /* O_LARGEFILE is only allowed for non-O_PATH. */ if (!(tmp.flags & O_PATH) && force_o_largefile()) tmp.flags |= O_LARGEFILE; return do_sys_openat2(dfd, filename, &tmp); } #ifdef CONFIG_COMPAT /* * Exactly like sys_open(), except that it doesn't set the * O_LARGEFILE flag. */ COMPAT_SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { return do_sys_open(AT_FDCWD, filename, flags, mode); } /* * Exactly like sys_openat(), except that it doesn't set the * O_LARGEFILE flag. */ COMPAT_SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { return do_sys_open(dfd, filename, flags, mode); } #endif #ifndef __alpha__ /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { int flags = O_CREAT | O_WRONLY | O_TRUNC; if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(AT_FDCWD, pathname, flags, mode); } #endif /* * "id" is the POSIX thread ID. We use the * files pointer for this.. */ static int filp_flush(struct file *filp, fl_owner_t id) { int retval = 0; if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, filp, "VFS: Close: file count is 0 (f_op=%ps)", filp->f_op)) { return 0; } if (filp->f_op->flush) retval = filp->f_op->flush(filp, id); if (likely(!(filp->f_mode & FMODE_PATH))) { dnotify_flush(filp, id); locks_remove_posix(filp, id); } return retval; } int filp_close(struct file *filp, fl_owner_t id) { int retval; retval = filp_flush(filp, id); fput_close(filp); return retval; } EXPORT_SYMBOL(filp_close); /* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { int retval; struct file *file; file = file_close_fd(fd); if (!file) return -EBADF; retval = filp_flush(file, current->files); /* * We're returning to user space. Don't bother * with any delayed fput() cases. */ fput_close_sync(file); if (likely(retval == 0)) return 0; /* can't restart close syscall because file table entry was cleared */ if (retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK) retval = -EINTR; return retval; } /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. */ SYSCALL_DEFINE0(vhangup) { if (capable(CAP_SYS_TTY_CONFIG)) { tty_vhangup_self(); return 0; } return -EPERM; } /* * Called when an inode is about to be open. * We use this to disallow opening large files on 32bit systems if * the caller didn't specify O_LARGEFILE. On 64bit systems we force * on this flag in sys_open. */ int generic_file_open(struct inode * inode, struct file * filp) { if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EOVERFLOW; return 0; } EXPORT_SYMBOL(generic_file_open); /* * This is used by subsystems that don't want seekable * file descriptors. The function is not supposed to ever fail, the only * reason it returns an 'int' and not 'void' is so that it can be plugged * directly into file_operations structure. */ int nonseekable_open(struct inode *inode, struct file *filp) { filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } EXPORT_SYMBOL(nonseekable_open); /* * stream_open is used by subsystems that want stream-like file descriptors. * Such file descriptors are not seekable and don't have notion of position * (file.f_pos is always 0 and ppos passed to .read()/.write() is always NULL). * Contrary to file descriptors of other regular files, .read() and .write() * can run simultaneously. * * stream_open never fails and is marked to return int so that it could be * directly used as file_operations.open . */ int stream_open(struct inode *inode, struct file *filp) { filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS); filp->f_mode |= FMODE_STREAM; return 0; } EXPORT_SYMBOL(stream_open);
4 495 92 96 1 84 73 136 145 23 14 13 1 130 130 3 87 3 85 68 68 68 68 1 82 1 1 84 34 81 81 261 81 208 19 18 3 6 3 3 19 19 18 133 17 17 4 4 6 26 26 192 4 188 192 192 2 187 186 186 100 102 101 89 89 89 91 88 90 90 90 96 95 94 96 96 93 14 95 53 53 139 67 73 16 131 106 38 1 43 44 155 12 3 142 1 142 142 84 59 89 83 23 142 102 45 142 142 141 44 24 78 1 140 59 84 2 71 1 4 23 112 11 98 98 2 9 4 5 91 91 89 91 106 2 13 96 105 16 91 105 90 4 89 119 7 23 6 86 85 84 1 85 85 8 8 8 8 8 8 12 3 9 12 120 84 85 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2016 - 2020 Christoph Hellwig */ #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/major.h> #include <linux/device_cgroup.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include <linux/backing-dev.h> #include <linux/module.h> #include <linux/blkpg.h> #include <linux/magic.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/uio.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/part_stat.h> #include <linux/uaccess.h> #include <linux/stat.h> #include "../fs/internal.h" #include "blk.h" /* Should we allow writing to mounted block devices? */ static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED); struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; static inline struct bdev_inode *BDEV_I(struct inode *inode) { return container_of(inode, struct bdev_inode, vfs_inode); } static inline struct inode *BD_INODE(struct block_device *bdev) { return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; } struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } EXPORT_SYMBOL(I_BDEV); struct block_device *file_bdev(struct file *bdev_file) { return I_BDEV(bdev_file->f_mapping->host); } EXPORT_SYMBOL(file_bdev); static void bdev_write_inode(struct block_device *bdev) { struct inode *inode = BD_INODE(bdev); int ret; spin_lock(&inode->i_lock); while (inode_state_read(inode) & I_DIRTY) { spin_unlock(&inode->i_lock); ret = write_inode_now(inode, true); if (ret) pr_warn_ratelimited( "VFS: Dirty inode writeback failed for block device %pg (err=%d).\n", bdev, ret); spin_lock(&inode->i_lock); } spin_unlock(&inode->i_lock); } /* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_mapping; if (mapping_empty(mapping)) return; invalidate_bh_lrus(); truncate_inode_pages(mapping, 0); } /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_mapping; if (mapping->nrpages) { invalidate_bh_lrus(); lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); } } EXPORT_SYMBOL(invalidate_bdev); /* * Drop all buffers & page cache for given bdev range. This function bails * with error if bdev has other exclusive owner (such as filesystem). */ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, loff_t lstart, loff_t lend) { /* * If we don't hold exclusive handle for the device, upgrade to it * while we discard the buffer cache to avoid discarding buffers * under live filesystem. */ if (!(mode & BLK_OPEN_EXCL)) { int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); if (err) goto invalidate; } truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, truncate_bdev_range); return 0; invalidate: /* * Someone else has handle exclusively open. Try invalidating instead. * The 'end' argument is inclusive so the rounding is safe. */ return invalidate_inode_pages2_range(bdev->bd_mapping, lstart >> PAGE_SHIFT, lend >> PAGE_SHIFT); } static void set_init_blocksize(struct block_device *bdev) { unsigned int bsize = bdev_logical_block_size(bdev); loff_t size = i_size_read(BD_INODE(bdev)); while (bsize < PAGE_SIZE) { if (size & bsize) break; bsize <<= 1; } BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping, get_order(bsize)); } /** * bdev_validate_blocksize - check that this block size is acceptable * @bdev: blockdevice to check * @block_size: block size to check * * For block device users that do not use buffer heads or the block device * page cache, make sure that this block size can be used with the device. * * Return: On success zero is returned, negative error code on failure. */ int bdev_validate_blocksize(struct block_device *bdev, int block_size) { if (blk_validate_block_size(block_size)) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ if (block_size < bdev_logical_block_size(bdev)) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(bdev_validate_blocksize); int set_blocksize(struct file *file, int size) { struct inode *inode = file->f_mapping->host; struct block_device *bdev = I_BDEV(inode); int ret; ret = bdev_validate_blocksize(bdev, size); if (ret) return ret; if (!file->private_data) return -EINVAL; /* Don't change the size if it is same as current */ if (inode->i_blkbits != blksize_bits(size)) { /* * Flush and truncate the pagecache before we reconfigure the * mapping geometry because folio sizes are variable now. If a * reader has already allocated a folio whose size is smaller * than the new min_order but invokes readahead after the new * min_order becomes visible, readahead will think there are * "zero" blocks per folio and crash. Take the inode and * invalidation locks to avoid racing with * read/write/fallocate. */ inode_lock(inode); filemap_invalidate_lock(inode->i_mapping); sync_blockdev(bdev); kill_bdev(bdev); inode->i_blkbits = blksize_bits(size); mapping_set_folio_min_order(inode->i_mapping, get_order(size)); kill_bdev(bdev); filemap_invalidate_unlock(inode->i_mapping); inode_unlock(inode); } return 0; } EXPORT_SYMBOL(set_blocksize); static int sb_validate_large_blocksize(struct super_block *sb, int size) { const char *err_str = NULL; if (!(sb->s_type->fs_flags & FS_LBS)) err_str = "not supported by filesystem"; else if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) err_str = "is only supported with CONFIG_TRANSPARENT_HUGEPAGE"; if (!err_str) return 0; pr_warn_ratelimited("%s: block size(%d) > page size(%lu) %s\n", sb->s_type->name, size, PAGE_SIZE, err_str); return -EINVAL; } int sb_set_blocksize(struct super_block *sb, int size) { if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size)) return 0; if (set_blocksize(sb->s_bdev_file, size)) return 0; /* If we get here, we know size is validated */ sb->s_blocksize = size; sb->s_blocksize_bits = blksize_bits(size); return sb->s_blocksize; } EXPORT_SYMBOL(sb_set_blocksize); int __must_check sb_min_blocksize(struct super_block *sb, int size) { int minsize = bdev_logical_block_size(sb->s_bdev); if (size < minsize) size = minsize; return sb_set_blocksize(sb, size); } EXPORT_SYMBOL(sb_min_blocksize); int sync_blockdev_nowait(struct block_device *bdev) { if (!bdev) return 0; return filemap_flush(bdev->bd_mapping); } EXPORT_SYMBOL_GPL(sync_blockdev_nowait); /* * Write out and wait upon all the dirty data associated with a block * device via its mapping. Does not take the superblock lock. */ int sync_blockdev(struct block_device *bdev) { if (!bdev) return 0; return filemap_write_and_wait(bdev->bd_mapping); } EXPORT_SYMBOL(sync_blockdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) { return filemap_write_and_wait_range(bdev->bd_mapping, lstart, lend); } EXPORT_SYMBOL(sync_blockdev_range); /** * bdev_freeze - lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. * The reference counter (bd_fsfreeze_count) guarantees that only the last * unfreeze process can unfreeze the frozen filesystem actually when multiple * freeze requests arrive simultaneously. It counts up in bdev_freeze() and * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze * actually. * * Return: On success zero is returned, negative error code on failure. */ int bdev_freeze(struct block_device *bdev) { int error = 0; mutex_lock(&bdev->bd_fsfreeze_mutex); if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) { mutex_unlock(&bdev->bd_fsfreeze_mutex); return 0; } mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) { error = bdev->bd_holder_ops->freeze(bdev); lockdep_assert_not_held(&bdev->bd_holder_lock); } else { mutex_unlock(&bdev->bd_holder_lock); error = sync_blockdev(bdev); } if (error) atomic_dec(&bdev->bd_fsfreeze_count); mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(bdev_freeze); /** * bdev_thaw - unlock filesystem * @bdev: blockdevice to unlock * * Unlocks the filesystem and marks it writeable again after bdev_freeze(). * * Return: On success zero is returned, negative error code on failure. */ int bdev_thaw(struct block_device *bdev) { int error = -EINVAL, nr_freeze; mutex_lock(&bdev->bd_fsfreeze_mutex); /* * If this returns < 0 it means that @bd_fsfreeze_count was * already 0 and no decrement was performed. */ nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count); if (nr_freeze < 0) goto out; error = 0; if (nr_freeze > 0) goto out; mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) { error = bdev->bd_holder_ops->thaw(bdev); lockdep_assert_not_held(&bdev->bd_holder_lock); } else { mutex_unlock(&bdev->bd_holder_lock); } if (error) atomic_inc(&bdev->bd_fsfreeze_count); out: mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(bdev_thaw); /* * pseudo-fs */ static __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock); static struct kmem_cache *bdev_cachep __ro_after_init; static struct inode *bdev_alloc_inode(struct super_block *sb) { struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->bdev, 0, sizeof(ei->bdev)); if (security_bdev_alloc(&ei->bdev)) { kmem_cache_free(bdev_cachep, ei); return NULL; } return &ei->vfs_inode; } static void bdev_free_inode(struct inode *inode) { struct block_device *bdev = I_BDEV(inode); free_percpu(bdev->bd_stats); kfree(bdev->bd_meta_info); security_bdev_free(bdev); if (!bdev_is_partition(bdev)) { if (bdev->bd_disk && bdev->bd_disk->bdi) bdi_put(bdev->bd_disk->bdi); kfree(bdev->bd_disk); } if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) blk_free_ext_minor(MINOR(bdev->bd_dev)); kmem_cache_free(bdev_cachep, BDEV_I(inode)); } static void init_once(void *data) { struct bdev_inode *ei = data; inode_init_once(&ei->vfs_inode); } static void bdev_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); } static const struct super_operations bdev_sops = { .statfs = simple_statfs, .alloc_inode = bdev_alloc_inode, .free_inode = bdev_free_inode, .drop_inode = inode_just_drop, .evict_inode = bdev_evict_inode, }; static int bd_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_CGROUPWB; ctx->ops = &bdev_sops; return 0; } static struct file_system_type bd_type = { .name = "bdev", .init_fs_context = bd_init_fs_context, .kill_sb = kill_anon_super, }; struct super_block *blockdev_superblock __ro_after_init; static struct vfsmount *blockdev_mnt __ro_after_init; EXPORT_SYMBOL_GPL(blockdev_superblock); void __init bdev_cache_init(void) { int err; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT|SLAB_PANIC), init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); blockdev_mnt = kern_mount(&bd_type); if (IS_ERR(blockdev_mnt)) panic("Cannot create bdev pseudo-fs"); blockdev_superblock = blockdev_mnt->mnt_sb; /* For writeback */ } struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) { struct block_device *bdev; struct inode *inode; inode = new_inode(blockdev_superblock); if (!inode) return NULL; inode->i_mode = S_IFBLK; inode->i_rdev = 0; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); bdev = I_BDEV(inode); mutex_init(&bdev->bd_fsfreeze_mutex); spin_lock_init(&bdev->bd_size_lock); mutex_init(&bdev->bd_holder_lock); atomic_set(&bdev->__bd_flags, partno); bdev->bd_mapping = &inode->i_data; bdev->bd_queue = disk->queue; if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO)) bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); return NULL; } bdev->bd_disk = disk; return bdev; } void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) { spin_lock(&bdev->bd_size_lock); i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); bdev->bd_nr_sectors = sectors; spin_unlock(&bdev->bd_size_lock); } void bdev_add(struct block_device *bdev, dev_t dev) { struct inode *inode = BD_INODE(bdev); if (bdev_stable_writes(bdev)) mapping_set_stable_writes(bdev->bd_mapping); bdev->bd_dev = dev; inode->i_rdev = dev; inode->i_ino = dev; insert_inode_hash(inode); } void bdev_unhash(struct block_device *bdev) { remove_inode_hash(BD_INODE(bdev)); } void bdev_drop(struct block_device *bdev) { iput(BD_INODE(bdev)); } long nr_blockdev_pages(void) { struct inode *inode; long ret = 0; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) ret += inode->i_mapping->nrpages; spin_unlock(&blockdev_superblock->s_inode_list_lock); return ret; } /** * bd_may_claim - test whether a block device can be claimed * @bdev: block device of interest * @holder: holder trying to claim @bdev * @hops: holder ops * * Test whether @bdev can be claimed by @holder. * * RETURNS: * %true if @bdev can be claimed, %false otherwise. */ static bool bd_may_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); lockdep_assert_held(&bdev_lock); if (bdev->bd_holder) { /* * The same holder can always re-claim. */ if (bdev->bd_holder == holder) { if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) return false; return true; } return false; } /* * If the whole devices holder is set to bd_may_claim, a partition on * the device is claimed, but not the whole device. */ if (whole != bdev && whole->bd_holder && whole->bd_holder != bd_may_claim) return false; return true; } /** * bd_prepare_to_claim - claim a block device * @bdev: block device of interest * @holder: holder trying to claim @bdev * @hops: holder ops. * * Claim @bdev. This function fails if @bdev is already claimed by another * holder and waits if another claiming is in progress. return, the caller * has ownership of bd_claiming and bd_holder[s]. * * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ int bd_prepare_to_claim(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); if (WARN_ON_ONCE(!holder)) return -EINVAL; retry: mutex_lock(&bdev_lock); /* if someone else claimed, fail */ if (!bd_may_claim(bdev, holder, hops)) { mutex_unlock(&bdev_lock); return -EBUSY; } /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { wait_queue_head_t *wq = __var_waitqueue(&whole->bd_claiming); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); mutex_unlock(&bdev_lock); schedule(); finish_wait(wq, &wait); goto retry; } /* yay, all mine */ whole->bd_claiming = holder; mutex_unlock(&bdev_lock); return 0; } EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ static void bd_clear_claiming(struct block_device *whole, void *holder) { lockdep_assert_held(&bdev_lock); /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; wake_up_var(&whole->bd_claiming); } /** * bd_finish_claiming - finish claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * @hops: block device holder operations * * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ static void bd_finish_claiming(struct block_device *bdev, void *holder, const struct blk_holder_ops *hops) { struct block_device *whole = bdev_whole(bdev); mutex_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, holder, hops)); /* * Note that for a whole device bd_holders will be incremented twice, * and bd_holder will be set to bd_may_claim before being set to holder */ whole->bd_holders++; whole->bd_holder = bd_may_claim; bdev->bd_holders++; mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = holder; bdev->bd_holder_ops = hops; mutex_unlock(&bdev->bd_holder_lock); bd_clear_claiming(whole, holder); mutex_unlock(&bdev_lock); } /** * bd_abort_claiming - abort claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * * Abort claiming of a block device when the exclusive open failed. This can be * also used when exclusive open is not actually desired and we just needed * to block other exclusive openers for a while. */ void bd_abort_claiming(struct block_device *bdev, void *holder) { mutex_lock(&bdev_lock); bd_clear_claiming(bdev_whole(bdev), holder); mutex_unlock(&bdev_lock); } EXPORT_SYMBOL(bd_abort_claiming); static void bd_end_claim(struct block_device *bdev, void *holder) { struct block_device *whole = bdev_whole(bdev); bool unblock = false; /* * Release a claim on the device. The holder fields are protected with * bdev_lock. open_mutex is used to synchronize disk_holder unlinking. */ mutex_lock(&bdev_lock); WARN_ON_ONCE(bdev->bd_holder != holder); WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--whole->bd_holders < 0); if (!bdev->bd_holders) { mutex_lock(&bdev->bd_holder_lock); bdev->bd_holder = NULL; bdev->bd_holder_ops = NULL; mutex_unlock(&bdev->bd_holder_lock); if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) unblock = true; } if (!whole->bd_holders) whole->bd_holder = NULL; mutex_unlock(&bdev_lock); /* * If this was the last claim, remove holder link and unblock evpoll if * it was a write holder. */ if (unblock) { disk_unblock_events(bdev->bd_disk); bdev_clear_flag(bdev, BD_WRITE_HOLDER); } } static void blkdev_flush_mapping(struct block_device *bdev) { WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); bdev_write_inode(bdev); } static void blkdev_put_whole(struct block_device *bdev) { if (atomic_dec_and_test(&bdev->bd_openers)) blkdev_flush_mapping(bdev); if (bdev->bd_disk->fops->release) bdev->bd_disk->fops->release(bdev->bd_disk); } static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) { struct gendisk *disk = bdev->bd_disk; int ret; if (disk->fops->open) { ret = disk->fops->open(disk, mode); if (ret) { /* avoid ghost partitions on a removed medium */ if (ret == -ENOMEDIUM && test_bit(GD_NEED_PART_SCAN, &disk->state)) bdev_disk_changed(disk, true); return ret; } } if (!atomic_read(&bdev->bd_openers)) set_init_blocksize(bdev); atomic_inc(&bdev->bd_openers); if (test_bit(GD_NEED_PART_SCAN, &disk->state)) { /* * Only return scanning errors if we are called from contexts * that explicitly want them, e.g. the BLKRRPART ioctl. */ ret = bdev_disk_changed(disk, false); if (ret && (mode & BLK_OPEN_STRICT_SCAN)) { blkdev_put_whole(bdev); return ret; } } return 0; } static int blkdev_get_part(struct block_device *part, blk_mode_t mode) { struct gendisk *disk = part->bd_disk; int ret; ret = blkdev_get_whole(bdev_whole(part), mode); if (ret) return ret; ret = -ENXIO; if (!bdev_nr_sectors(part)) goto out_blkdev_put; if (!atomic_read(&part->bd_openers)) { disk->open_partitions++; set_init_blocksize(part); } atomic_inc(&part->bd_openers); return 0; out_blkdev_put: blkdev_put_whole(bdev_whole(part)); return ret; } int bdev_permission(dev_t dev, blk_mode_t mode, void *holder) { int ret; ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, MAJOR(dev), MINOR(dev), ((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) | ((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0)); if (ret) return ret; /* Blocking writes requires exclusive opener */ if (mode & BLK_OPEN_RESTRICT_WRITES && !holder) return -EINVAL; /* * We're using error pointers to indicate to ->release() when we * failed to open that block device. Also this doesn't make sense. */ if (WARN_ON_ONCE(IS_ERR(holder))) return -EINVAL; return 0; } static void blkdev_put_part(struct block_device *part) { struct block_device *whole = bdev_whole(part); if (atomic_dec_and_test(&part->bd_openers)) { blkdev_flush_mapping(part); whole->bd_disk->open_partitions--; } blkdev_put_whole(whole); } struct block_device *blkdev_get_no_open(dev_t dev, bool autoload) { struct block_device *bdev; struct inode *inode; inode = ilookup(blockdev_superblock, dev); if (!inode && autoload && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) { blk_request_module(dev); inode = ilookup(blockdev_superblock, dev); if (inode) pr_warn_ratelimited( "block device autoloading is deprecated and will be removed.\n"); } if (!inode) return NULL; /* switch from the inode reference to a device mode one: */ bdev = &BDEV_I(inode)->bdev; if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) bdev = NULL; iput(inode); return bdev; } void blkdev_put_no_open(struct block_device *bdev) { put_device(&bdev->bd_device); } static bool bdev_writes_blocked(struct block_device *bdev) { return bdev->bd_writers < 0; } static void bdev_block_writes(struct block_device *bdev) { bdev->bd_writers--; } static void bdev_unblock_writes(struct block_device *bdev) { bdev->bd_writers++; } static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode) { if (bdev_allow_write_mounted) return true; /* Writes blocked? */ if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev)) return false; if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0) return false; return true; } static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) { if (bdev_allow_write_mounted) return; /* Claim exclusive or shared write access. */ if (mode & BLK_OPEN_RESTRICT_WRITES) bdev_block_writes(bdev); else if (mode & BLK_OPEN_WRITE) bdev->bd_writers++; } static inline bool bdev_unclaimed(const struct file *bdev_file) { return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host); } static void bdev_yield_write_access(struct file *bdev_file) { struct block_device *bdev; if (bdev_allow_write_mounted) return; if (bdev_unclaimed(bdev_file)) return; bdev = file_bdev(bdev_file); if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED) bdev_unblock_writes(bdev); else if (bdev_file->f_mode & FMODE_WRITE) bdev->bd_writers--; } /** * bdev_open - open a block device * @bdev: block device to open * @mode: open mode (BLK_OPEN_*) * @holder: exclusive holder identifier * @hops: holder operations * @bdev_file: file for the block device * * Open the block device. If @holder is not %NULL, the block device is opened * with exclusive access. Exclusive opens may nest for the same @holder. * * CONTEXT: * Might sleep. * * RETURNS: * zero on success, -errno on failure. */ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops, struct file *bdev_file) { bool unblock_events = true; struct gendisk *disk = bdev->bd_disk; int ret; if (holder) { mode |= BLK_OPEN_EXCL; ret = bd_prepare_to_claim(bdev, holder, hops); if (ret) return ret; } else { if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL)) return -EIO; } disk_block_events(disk); mutex_lock(&disk->open_mutex); ret = -ENXIO; if (!disk_live(disk)) goto abort_claiming; if (!try_module_get(disk->fops->owner)) goto abort_claiming; ret = -EBUSY; if (!bdev_may_open(bdev, mode)) goto put_module; if (bdev_is_partition(bdev)) ret = blkdev_get_part(bdev, mode); else ret = blkdev_get_whole(bdev, mode); if (ret) goto put_module; bdev_claim_write_access(bdev, mode); if (holder) { bd_finish_claiming(bdev, holder, hops); /* * Block event polling for write claims if requested. Any write * holder makes the write_holder state stick until all are * released. This is good enough and tracking individual * writeable reference is too fragile given the way @mode is * used in blkdev_get/put(). */ if ((mode & BLK_OPEN_WRITE) && !bdev_test_flag(bdev, BD_WRITE_HOLDER) && (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { bdev_set_flag(bdev, BD_WRITE_HOLDER); unblock_events = false; } } mutex_unlock(&disk->open_mutex); if (unblock_events) disk_unblock_events(disk); bdev_file->f_flags |= O_LARGEFILE; bdev_file->f_mode |= FMODE_CAN_ODIRECT; if (bdev_nowait(bdev)) bdev_file->f_mode |= FMODE_NOWAIT; if (mode & BLK_OPEN_RESTRICT_WRITES) bdev_file->f_mode |= FMODE_WRITE_RESTRICTED; bdev_file->f_mapping = bdev->bd_mapping; bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping); bdev_file->private_data = holder; return 0; put_module: module_put(disk->fops->owner); abort_claiming: if (holder) bd_abort_claiming(bdev, holder); mutex_unlock(&disk->open_mutex); disk_unblock_events(disk); return ret; } /* * If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk * associated with the floppy driver where it has allowed ioctls if the * file was opened for writing, but does not allow reads or writes. * Make sure that this quirk is reflected in @f_flags. * * It can also happen if a block device is opened as O_RDWR | O_WRONLY. */ static unsigned blk_to_file_flags(blk_mode_t mode) { unsigned int flags = 0; if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) == (BLK_OPEN_READ | BLK_OPEN_WRITE)) flags |= O_RDWR; else if (mode & BLK_OPEN_WRITE_IOCTL) flags |= O_RDWR | O_WRONLY; else if (mode & BLK_OPEN_WRITE) flags |= O_WRONLY; else if (mode & BLK_OPEN_READ) flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */ else WARN_ON_ONCE(true); if (mode & BLK_OPEN_NDELAY) flags |= O_NDELAY; return flags; } struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { struct file *bdev_file; struct block_device *bdev; unsigned int flags; int ret; ret = bdev_permission(dev, mode, holder); if (ret) return ERR_PTR(ret); bdev = blkdev_get_no_open(dev, true); if (!bdev) return ERR_PTR(-ENXIO); flags = blk_to_file_flags(mode); bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops); if (IS_ERR(bdev_file)) { blkdev_put_no_open(bdev); return bdev_file; } ihold(BD_INODE(bdev)); ret = bdev_open(bdev, mode, holder, hops, bdev_file); if (ret) { /* We failed to open the block device. Let ->release() know. */ bdev_file->private_data = ERR_PTR(ret); fput(bdev_file); return ERR_PTR(ret); } return bdev_file; } EXPORT_SYMBOL(bdev_file_open_by_dev); struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode, void *holder, const struct blk_holder_ops *hops) { struct file *file; dev_t dev; int error; error = lookup_bdev(path, &dev); if (error) return ERR_PTR(error); file = bdev_file_open_by_dev(dev, mode, holder, hops); if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) { if (bdev_read_only(file_bdev(file))) { fput(file); file = ERR_PTR(-EACCES); } } return file; } EXPORT_SYMBOL(bdev_file_open_by_path); static inline void bd_yield_claim(struct file *bdev_file) { struct block_device *bdev = file_bdev(bdev_file); void *holder = bdev_file->private_data; lockdep_assert_held(&bdev->bd_disk->open_mutex); if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder))) return; if (!bdev_unclaimed(bdev_file)) bd_end_claim(bdev, holder); } void bdev_release(struct file *bdev_file) { struct block_device *bdev = file_bdev(bdev_file); void *holder = bdev_file->private_data; struct gendisk *disk = bdev->bd_disk; /* We failed to open that block device. */ if (IS_ERR(holder)) goto put_no_open; /* * Sync early if it looks like we're the last one. If someone else * opens the block device between now and the decrement of bd_openers * then we did a sync that we didn't need to, but that's not the end * of the world and we want to avoid long (could be several minute) * syncs while holding the mutex. */ if (atomic_read(&bdev->bd_openers) == 1) sync_blockdev(bdev); mutex_lock(&disk->open_mutex); bdev_yield_write_access(bdev_file); if (holder) bd_yield_claim(bdev_file); /* * Trigger event checking and tell drivers to flush MEDIA_CHANGE * event. This is to ensure detection of media removal commanded * from userland - e.g. eject(1). */ disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); if (bdev_is_partition(bdev)) blkdev_put_part(bdev); else blkdev_put_whole(bdev); mutex_unlock(&disk->open_mutex); module_put(disk->fops->owner); put_no_open: blkdev_put_no_open(bdev); } /** * bdev_fput - yield claim to the block device and put the file * @bdev_file: open block device * * Yield claim on the block device and put the file. Ensure that the * block device can be reclaimed before the file is closed which is a * deferred operation. */ void bdev_fput(struct file *bdev_file) { if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops)) return; if (bdev_file->private_data) { struct block_device *bdev = file_bdev(bdev_file); struct gendisk *disk = bdev->bd_disk; mutex_lock(&disk->open_mutex); bdev_yield_write_access(bdev_file); bd_yield_claim(bdev_file); /* * Tell release we already gave up our hold on the * device and if write restrictions are available that * we already gave up write access to the device. */ bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host); mutex_unlock(&disk->open_mutex); } fput(bdev_file); } EXPORT_SYMBOL(bdev_fput); /** * lookup_bdev() - Look up a struct block_device by name. * @pathname: Name of the block device in the filesystem. * @dev: Pointer to the block device's dev_t, if found. * * Lookup the block device's dev_t at @pathname in the current * namespace if possible and return it in @dev. * * Context: May sleep. * Return: 0 if succeeded, negative errno otherwise. */ int lookup_bdev(const char *pathname, dev_t *dev) { struct inode *inode; struct path path; int error; if (!pathname || !*pathname) return -EINVAL; error = kern_path(pathname, LOOKUP_FOLLOW, &path); if (error) return error; inode = d_backing_inode(path.dentry); error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto out_path_put; error = -EACCES; if (!may_open_dev(&path)) goto out_path_put; *dev = inode->i_rdev; error = 0; out_path_put: path_put(&path); return error; } EXPORT_SYMBOL(lookup_bdev); /** * bdev_mark_dead - mark a block device as dead * @bdev: block device to operate on * @surprise: indicate a surprise removal * * Tell the file system that this devices or media is dead. If @surprise is set * to %true the device or media is already gone, if not we are preparing for an * orderly removal. * * This calls into the file system, which then typicall syncs out all dirty data * and writes back inodes and then invalidates any cached data in the inodes on * the file system. In addition we also invalidate the block device mapping. */ void bdev_mark_dead(struct block_device *bdev, bool surprise) { mutex_lock(&bdev->bd_holder_lock); if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) bdev->bd_holder_ops->mark_dead(bdev, surprise); else { mutex_unlock(&bdev->bd_holder_lock); sync_blockdev(bdev); } invalidate_bdev(bdev); } /* * New drivers should not use this directly. There are some drivers however * that needs this for historical reasons. For example, the DASD driver has * historically had a shutdown to offline mode that doesn't actually remove the * gendisk that otherwise looks a lot like a safe device removal. */ EXPORT_SYMBOL_GPL(bdev_mark_dead); void sync_bdevs(bool wait) { struct inode *inode, *old_inode = NULL; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; struct block_device *bdev; spin_lock(&inode->i_lock); if (inode_state_read(inode) & (I_FREEING | I_WILL_FREE | I_NEW) || mapping->nrpages == 0) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&blockdev_superblock->s_inode_list_lock); /* * We hold a reference to 'inode' so it couldn't have been * removed from s_inodes list while we dropped the * s_inode_list_lock We cannot iput the inode now as we can * be holding the last reference and we cannot iput it under * s_inode_list_lock. So we keep the reference and iput it * later. */ iput(old_inode); old_inode = inode; bdev = I_BDEV(inode); mutex_lock(&bdev->bd_disk->open_mutex); if (!atomic_read(&bdev->bd_openers)) { ; /* skip */ } else if (wait) { /* * We keep the error status of individual mapping so * that applications can catch the writeback error using * fsync(2). See filemap_fdatawait_keep_errors() for * details. */ filemap_fdatawait_keep_errors(inode->i_mapping); } else { filemap_fdatawrite(inode->i_mapping); } mutex_unlock(&bdev->bd_disk->open_mutex); spin_lock(&blockdev_superblock->s_inode_list_lock); } spin_unlock(&blockdev_superblock->s_inode_list_lock); iput(old_inode); } /* * Handle STATX_{DIOALIGN, WRITE_ATOMIC} for block devices. */ void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask) { struct block_device *bdev; /* * Note that d_backing_inode() returns the block device node inode, not * the block device's internal inode. Therefore it is *not* valid to * use I_BDEV() here; the block device has to be looked up by i_rdev * instead. */ bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false); if (!bdev) return; if (request_mask & STATX_DIOALIGN) { stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; stat->dio_offset_align = bdev_logical_block_size(bdev); stat->result_mask |= STATX_DIOALIGN; } if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { struct request_queue *bd_queue = bdev->bd_queue; generic_fill_statx_atomic_writes(stat, queue_atomic_write_unit_min_bytes(bd_queue), queue_atomic_write_unit_max_bytes(bd_queue), 0); } stat->blksize = bdev_io_min(bdev); blkdev_put_no_open(bdev); } bool disk_live(struct gendisk *disk) { return !inode_unhashed(BD_INODE(disk->part0)); } EXPORT_SYMBOL_GPL(disk_live); unsigned int block_size(struct block_device *bdev) { return 1 << BD_INODE(bdev)->i_blkbits; } EXPORT_SYMBOL_GPL(block_size); static int __init setup_bdev_allow_write_mounted(char *str) { if (kstrtobool(str, &bdev_allow_write_mounted)) pr_warn("Invalid option string for bdev_allow_write_mounted:" " '%s'\n", str); return 1; } __setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
12 12 4 4 12 12 12 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 // SPDX-License-Identifier: GPL-2.0 #include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/vmalloc.h> #include <linux/ceph/messenger.h> #include <linux/ceph/msgpool.h> static void *msgpool_alloc(gfp_t gfp_mask, void *arg) { struct ceph_msgpool *pool = arg; struct ceph_msg *msg; msg = ceph_msg_new2(pool->type, pool->front_len, pool->max_data_items, gfp_mask, true); if (!msg) { dout("msgpool_alloc %s failed\n", pool->name); } else { dout("msgpool_alloc %s %p\n", pool->name, msg); msg->pool = pool; } return msg; } static void msgpool_free(void *element, void *arg) { struct ceph_msgpool *pool = arg; struct ceph_msg *msg = element; dout("msgpool_release %s %p\n", pool->name, msg); msg->pool = NULL; ceph_msg_put(msg); } int ceph_msgpool_init(struct ceph_msgpool *pool, int type, int front_len, int max_data_items, int size, const char *name) { dout("msgpool %s init\n", name); pool->type = type; pool->front_len = front_len; pool->max_data_items = max_data_items; pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool); if (!pool->pool) return -ENOMEM; pool->name = name; return 0; } void ceph_msgpool_destroy(struct ceph_msgpool *pool) { dout("msgpool %s destroy\n", pool->name); mempool_destroy(pool->pool); } struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len, int max_data_items) { struct ceph_msg *msg; if (front_len > pool->front_len || max_data_items > pool->max_data_items) { pr_warn_ratelimited("%s need %d/%d, pool %s has %d/%d\n", __func__, front_len, max_data_items, pool->name, pool->front_len, pool->max_data_items); WARN_ON_ONCE(1); /* try to alloc a fresh message */ return ceph_msg_new2(pool->type, front_len, max_data_items, GFP_NOFS, false); } msg = mempool_alloc(pool->pool, GFP_NOFS); dout("msgpool_get %s %p\n", pool->name, msg); return msg; } void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg) { dout("msgpool_put %s %p\n", pool->name, msg); /* reset msg front_len; user may have changed it */ msg->front.iov_len = pool->front_len; msg->hdr.front_len = cpu_to_le32(pool->front_len); msg->data_length = 0; msg->num_data_items = 0; kref_init(&msg->kref); /* retake single ref */ mempool_free(msg, pool->pool); }
3 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux driver for TerraTec DMX 6Fire USB * * Firmware loader * * Author: Torsten Schenk <torsten.schenk@zoho.com> * Created: Jan 01, 2011 * Copyright: (C) Torsten Schenk */ #include <linux/firmware.h> #include <linux/module.h> #include <linux/bitrev.h> #include <linux/kernel.h> #include "firmware.h" #include "chip.h" MODULE_FIRMWARE("6fire/dmx6firel2.ihx"); MODULE_FIRMWARE("6fire/dmx6fireap.ihx"); MODULE_FIRMWARE("6fire/dmx6firecf.bin"); enum { FPGA_BUFSIZE = 512, FPGA_EP = 2 }; /* * wMaxPacketSize of pcm endpoints. * keep synced with rates_in_packet_size and rates_out_packet_size in pcm.c * fpp: frames per isopacket * * CAUTION: keep sizeof <= buffer[] in usb6fire_fw_init */ static const u8 ep_w_max_packet_size[] = { 0xe4, 0x00, 0xe4, 0x00, /* alt 1: 228 EP2 and EP6 (7 fpp) */ 0xa4, 0x01, 0xa4, 0x01, /* alt 2: 420 EP2 and EP6 (13 fpp)*/ 0x94, 0x01, 0x5c, 0x02 /* alt 3: 404 EP2 and 604 EP6 (25 fpp) */ }; static const u8 known_fw_versions[][2] = { { 0x03, 0x01 } }; struct ihex_record { u16 address; u8 len; u8 data[256]; char error; /* true if an error occurred parsing this record */ u8 max_len; /* maximum record length in whole ihex */ /* private */ const char *txt_data; unsigned int txt_length; unsigned int txt_offset; /* current position in txt_data */ }; static u8 usb6fire_fw_ihex_hex(const u8 *data, u8 *crc) { u8 val = 0; int hval; hval = hex_to_bin(data[0]); if (hval >= 0) val |= (hval << 4); hval = hex_to_bin(data[1]); if (hval >= 0) val |= hval; *crc += val; return val; } /* * returns true if record is available, false otherwise. * iff an error occurred, false will be returned and record->error will be true. */ static bool usb6fire_fw_ihex_next_record(struct ihex_record *record) { u8 crc = 0; u8 type; int i; record->error = false; /* find begin of record (marked by a colon) */ while (record->txt_offset < record->txt_length && record->txt_data[record->txt_offset] != ':') record->txt_offset++; if (record->txt_offset == record->txt_length) return false; /* number of characters needed for len, addr and type entries */ record->txt_offset++; if (record->txt_offset + 8 > record->txt_length) { record->error = true; return false; } record->len = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; record->address = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc) << 8; record->txt_offset += 2; record->address |= usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; type = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; /* number of characters needed for data and crc entries */ if (record->txt_offset + 2 * (record->len + 1) > record->txt_length) { record->error = true; return false; } for (i = 0; i < record->len; i++) { record->data[i] = usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); record->txt_offset += 2; } usb6fire_fw_ihex_hex(record->txt_data + record->txt_offset, &crc); if (crc) { record->error = true; return false; } if (type == 1 || !record->len) /* eof */ return false; else if (type == 0) return true; else { record->error = true; return false; } } static int usb6fire_fw_ihex_init(const struct firmware *fw, struct ihex_record *record) { record->txt_data = fw->data; record->txt_length = fw->size; record->txt_offset = 0; record->max_len = 0; /* read all records, if loop ends, record->error indicates, * whether ihex is valid. */ while (usb6fire_fw_ihex_next_record(record)) record->max_len = max(record->len, record->max_len); if (record->error) return -EINVAL; record->txt_offset = 0; return 0; } static int usb6fire_fw_ezusb_write(struct usb_device *device, int type, int value, char *data, int len) { return usb_control_msg_send(device, 0, type, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, data, len, 1000, GFP_KERNEL); } static int usb6fire_fw_ezusb_read(struct usb_device *device, int type, int value, char *data, int len) { return usb_control_msg_recv(device, 0, type, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, data, len, 1000, GFP_KERNEL); } static int usb6fire_fw_fpga_write(struct usb_device *device, char *data, int len) { int actual_len; int ret; ret = usb_bulk_msg(device, usb_sndbulkpipe(device, FPGA_EP), data, len, &actual_len, 1000); if (ret < 0) return ret; else if (actual_len != len) return -EIO; return 0; } static int usb6fire_fw_ezusb_upload( struct usb_interface *intf, const char *fwname, unsigned int postaddr, u8 *postdata, unsigned int postlen) { int ret; u8 data; struct usb_device *device = interface_to_usbdev(intf); const struct firmware *fw = NULL; struct ihex_record *rec = kmalloc(sizeof(struct ihex_record), GFP_KERNEL); if (!rec) return -ENOMEM; ret = request_firmware(&fw, fwname, &device->dev); if (ret < 0) { kfree(rec); dev_err(&intf->dev, "error requesting ezusb firmware %s.\n", fwname); return ret; } ret = usb6fire_fw_ihex_init(fw, rec); if (ret < 0) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "error validating ezusb firmware %s.\n", fwname); return ret; } /* upload firmware image */ data = 0x01; /* stop ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "unable to upload ezusb firmware %s: begin message.\n", fwname); return ret; } while (usb6fire_fw_ihex_next_record(rec)) { /* write firmware */ ret = usb6fire_fw_ezusb_write(device, 0xa0, rec->address, rec->data, rec->len); if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, "unable to upload ezusb firmware %s: data urb.\n", fwname); return ret; } } release_firmware(fw); kfree(rec); if (postdata) { /* write data after firmware has been uploaded */ ret = usb6fire_fw_ezusb_write(device, 0xa0, postaddr, postdata, postlen); if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: post urb.\n", fwname); return ret; } } data = 0x00; /* resume ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: end message.\n", fwname); return ret; } return 0; } static int usb6fire_fw_fpga_upload( struct usb_interface *intf, const char *fwname) { int ret; int i; struct usb_device *device = interface_to_usbdev(intf); u8 *buffer = kmalloc(FPGA_BUFSIZE, GFP_KERNEL); const char *c; const char *end; const struct firmware *fw; if (!buffer) return -ENOMEM; ret = request_firmware(&fw, fwname, &device->dev); if (ret < 0) { dev_err(&intf->dev, "unable to get fpga firmware %s.\n", fwname); kfree(buffer); return -EIO; } c = fw->data; end = fw->data + fw->size; ret = usb6fire_fw_ezusb_write(device, 8, 0, NULL, 0); if (ret) { kfree(buffer); release_firmware(fw); dev_err(&intf->dev, "unable to upload fpga firmware: begin urb.\n"); return ret; } while (c != end) { for (i = 0; c != end && i < FPGA_BUFSIZE; i++, c++) buffer[i] = bitrev8((u8)*c); ret = usb6fire_fw_fpga_write(device, buffer, i); if (ret < 0) { release_firmware(fw); kfree(buffer); dev_err(&intf->dev, "unable to upload fpga firmware: fw urb.\n"); return ret; } } release_firmware(fw); kfree(buffer); ret = usb6fire_fw_ezusb_write(device, 9, 0, NULL, 0); if (ret) { dev_err(&intf->dev, "unable to upload fpga firmware: end urb.\n"); return ret; } return 0; } /* check, if the firmware version the devices has currently loaded * is known by this driver. 'version' needs to have 4 bytes version * info data. */ static int usb6fire_fw_check(struct usb_interface *intf, const u8 *version) { int i; for (i = 0; i < ARRAY_SIZE(known_fw_versions); i++) if (!memcmp(version, known_fw_versions + i, 2)) return 0; dev_err(&intf->dev, "invalid firmware version in device: %4ph. " "please reconnect to power. if this failure " "still happens, check your firmware installation.", version); return -EINVAL; } int usb6fire_fw_init(struct usb_interface *intf) { int i; int ret; struct usb_device *device = interface_to_usbdev(intf); /* buffer: 8 receiving bytes from device and * sizeof(EP_W_MAX_PACKET_SIZE) bytes for non-const copy */ u8 buffer[12]; ret = usb6fire_fw_ezusb_read(device, 1, 0, buffer, 8); if (ret) { dev_err(&intf->dev, "unable to receive device firmware state.\n"); return ret; } if (buffer[0] != 0xeb || buffer[1] != 0xaa || buffer[2] != 0x55) { dev_err(&intf->dev, "unknown device firmware state received from device:"); for (i = 0; i < 8; i++) printk(KERN_CONT "%02x ", buffer[i]); printk(KERN_CONT "\n"); return -EIO; } /* do we need fpga loader ezusb firmware? */ if (buffer[3] == 0x01) { ret = usb6fire_fw_ezusb_upload(intf, "6fire/dmx6firel2.ihx", 0, NULL, 0); if (ret < 0) return ret; return FW_NOT_READY; } /* do we need fpga firmware and application ezusb firmware? */ else if (buffer[3] == 0x02) { ret = usb6fire_fw_check(intf, buffer + 4); if (ret < 0) return ret; ret = usb6fire_fw_fpga_upload(intf, "6fire/dmx6firecf.bin"); if (ret < 0) return ret; memcpy(buffer, ep_w_max_packet_size, sizeof(ep_w_max_packet_size)); ret = usb6fire_fw_ezusb_upload(intf, "6fire/dmx6fireap.ihx", 0x0003, buffer, sizeof(ep_w_max_packet_size)); if (ret < 0) return ret; return FW_NOT_READY; } /* all fw loaded? */ else if (buffer[3] == 0x03) return usb6fire_fw_check(intf, buffer + 4); /* unknown data? */ else { dev_err(&intf->dev, "unknown device firmware state received from device: "); for (i = 0; i < 8; i++) printk(KERN_CONT "%02x ", buffer[i]); printk(KERN_CONT "\n"); return -EIO; } return 0; }
31 1 3 26 1 26 1 12 1 3 10 22 2 14 8 3 3 2 8 9 2 1 9 7 12 12 7 19 17 2 12 7 19 19 19 1 7 12 7 5 12 11 12 4 8 6 6 10 2 12 12 11 12 18 7 3 2 3 5 2 1 2 18 18 17 18 18 18 18 18 18 17 7 8 3 17 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/act_police.c Input police filter * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * J Hadi Salim (action changes) */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <net/act_api.h> #include <net/gso.h> #include <net/netlink.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_police.h> #include <net/tc_wrapper.h> /* Each policer is serialized by its individual spinlock */ static struct tc_action_ops act_police_ops; static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_AVRATE] = { .type = NLA_U32 }, [TCA_POLICE_RESULT] = { .type = NLA_U32 }, [TCA_POLICE_RATE64] = { .type = NLA_U64 }, [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 }, [TCA_POLICE_PKTRATE64] = { .type = NLA_U64, .min = 1 }, [TCA_POLICE_PKTBURST64] = { .type = NLA_U64, .min = 1 }, }; static int tcf_police_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { int ret = 0, tcfp_result = TC_ACT_OK, err, size; bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_POLICE_MAX + 1]; struct tcf_chain *goto_ch = NULL; struct tc_police *parm; struct tcf_police *police; struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; struct tc_action_net *tn = net_generic(net, act_police_ops.net_id); struct tcf_police_params *new; bool exists = false; u32 index; u64 rate64, prate64; u64 pps, ppsburst; if (nla == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla, police_policy, NULL); if (err < 0) return err; if (tb[TCA_POLICE_TBF] == NULL) return -EINVAL; size = nla_len(tb[TCA_POLICE_TBF]); if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) return -EINVAL; parm = nla_data(tb[TCA_POLICE_TBF]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!exists) { ret = tcf_idr_create(tn, index, NULL, a, &act_police_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; spin_lock_init(&(to_police(*a)->tcfp_lock)); } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; police = to_police(*a); if (parm->rate.rate) { err = -ENOMEM; R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL); if (R_tab == NULL) goto failure; if (parm->peakrate.rate) { P_tab = qdisc_get_rtab(&parm->peakrate, tb[TCA_POLICE_PEAKRATE], NULL); if (P_tab == NULL) goto failure; } } if (est) { err = gen_replace_estimator(&police->tcf_bstats, police->common.cpu_bstats, &police->tcf_rate_est, &police->tcf_lock, false, est); if (err) goto failure; } else if (tb[TCA_POLICE_AVRATE] && (ret == ACT_P_CREATED || !gen_estimator_active(&police->tcf_rate_est))) { err = -EINVAL; goto failure; } if (tb[TCA_POLICE_RESULT]) { tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) { NL_SET_ERR_MSG(extack, "goto chain not allowed on fallback"); err = -EINVAL; goto failure; } } if ((tb[TCA_POLICE_PKTRATE64] && !tb[TCA_POLICE_PKTBURST64]) || (!tb[TCA_POLICE_PKTRATE64] && tb[TCA_POLICE_PKTBURST64])) { NL_SET_ERR_MSG(extack, "Both or neither packet-per-second burst and rate must be provided"); err = -EINVAL; goto failure; } if (tb[TCA_POLICE_PKTRATE64] && R_tab) { NL_SET_ERR_MSG(extack, "packet-per-second and byte-per-second rate limits not allowed in same action"); err = -EINVAL; goto failure; } new = kzalloc(sizeof(*new), GFP_KERNEL); if (unlikely(!new)) { err = -ENOMEM; goto failure; } /* No failure allowed after this point */ new->tcfp_result = tcfp_result; new->tcfp_mtu = parm->mtu; if (!new->tcfp_mtu) { new->tcfp_mtu = ~0; if (R_tab) new->tcfp_mtu = 255 << R_tab->rate.cell_log; } if (R_tab) { new->rate_present = true; rate64 = nla_get_u64_default(tb[TCA_POLICE_RATE64], 0); psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64); qdisc_put_rtab(R_tab); } else { new->rate_present = false; } if (P_tab) { new->peak_present = true; prate64 = nla_get_u64_default(tb[TCA_POLICE_PEAKRATE64], 0); psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64); qdisc_put_rtab(P_tab); } else { new->peak_present = false; } new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); if (new->peak_present) new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, new->tcfp_mtu); if (tb[TCA_POLICE_AVRATE]) new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); if (tb[TCA_POLICE_PKTRATE64]) { pps = nla_get_u64(tb[TCA_POLICE_PKTRATE64]); ppsburst = nla_get_u64(tb[TCA_POLICE_PKTBURST64]); new->pps_present = true; new->tcfp_pkt_burst = PSCHED_TICKS2NS(ppsburst); psched_ppscfg_precompute(&new->ppsrate, pps); } new->action = parm->action; spin_lock_bh(&police->tcf_lock); spin_lock_bh(&police->tcfp_lock); police->tcfp_t_c = ktime_get_ns(); police->tcfp_toks = new->tcfp_burst; if (new->peak_present) police->tcfp_ptoks = new->tcfp_mtu_ptoks; spin_unlock_bh(&police->tcfp_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); new = rcu_replace_pointer(police->params, new, lockdep_is_held(&police->tcf_lock)); spin_unlock_bh(&police->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (new) kfree_rcu(new, rcu); return ret; failure: qdisc_put_rtab(P_tab); qdisc_put_rtab(R_tab); if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit) { u32 len; if (skb_is_gso(skb)) return skb_gso_validate_mac_len(skb, limit); len = qdisc_pkt_len(skb); if (skb_at_tc_ingress(skb)) len += skb->mac_len; return len <= limit; } TC_INDIRECT_SCOPE int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_police *police = to_police(a); s64 now, toks, ppstoks = 0, ptoks = 0; struct tcf_police_params *p; int ret; tcf_lastuse_update(&police->tcf_tm); bstats_update(this_cpu_ptr(police->common.cpu_bstats), skb); p = rcu_dereference_bh(police->params); ret = p->action; if (p->tcfp_ewma_rate) { struct gnet_stats_rate_est64 sample; if (!gen_estimator_read(&police->tcf_rate_est, &sample) || sample.bps >= p->tcfp_ewma_rate) goto inc_overlimits; } if (tcf_police_mtu_check(skb, p->tcfp_mtu)) { if (!p->rate_present && !p->pps_present) { ret = p->tcfp_result; goto end; } now = ktime_get_ns(); spin_lock_bh(&police->tcfp_lock); toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); if (p->peak_present) { ptoks = toks + police->tcfp_ptoks; if (ptoks > p->tcfp_mtu_ptoks) ptoks = p->tcfp_mtu_ptoks; ptoks -= (s64)psched_l2t_ns(&p->peak, qdisc_pkt_len(skb)); } if (p->rate_present) { toks += police->tcfp_toks; if (toks > p->tcfp_burst) toks = p->tcfp_burst; toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); } else if (p->pps_present) { ppstoks = min_t(s64, now - police->tcfp_t_c, p->tcfp_pkt_burst); ppstoks += police->tcfp_pkttoks; if (ppstoks > p->tcfp_pkt_burst) ppstoks = p->tcfp_pkt_burst; ppstoks -= (s64)psched_pkt2t_ns(&p->ppsrate, 1); } if ((toks | ptoks | ppstoks) >= 0) { police->tcfp_t_c = now; police->tcfp_toks = toks; police->tcfp_ptoks = ptoks; police->tcfp_pkttoks = ppstoks; spin_unlock_bh(&police->tcfp_lock); ret = p->tcfp_result; goto inc_drops; } spin_unlock_bh(&police->tcfp_lock); } inc_overlimits: qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats)); inc_drops: if (ret == TC_ACT_SHOT) qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats)); end: return ret; } static void tcf_police_cleanup(struct tc_action *a) { struct tcf_police *police = to_police(a); struct tcf_police_params *p; p = rcu_dereference_protected(police->params, 1); if (p) kfree_rcu(p, rcu); } static void tcf_police_stats_update(struct tc_action *a, u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_police *police = to_police(a); struct tcf_t *tm = &police->tcf_tm; tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { const struct tcf_police *police = to_police(a); unsigned char *b = skb_tail_pointer(skb); const struct tcf_police_params *p; struct tc_police opt = { .index = police->tcf_index, .refcnt = refcount_read(&police->tcf_refcnt) - ref, .bindcnt = atomic_read(&police->tcf_bindcnt) - bind, }; struct tcf_t t; rcu_read_lock(); p = rcu_dereference(police->params); opt.action = p->action; opt.mtu = p->tcfp_mtu; opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); if (p->rate_present) { psched_ratecfg_getrate(&opt.rate, &p->rate); if ((p->rate.rate_bytes_ps >= (1ULL << 32)) && nla_put_u64_64bit(skb, TCA_POLICE_RATE64, p->rate.rate_bytes_ps, TCA_POLICE_PAD)) goto nla_put_failure; } if (p->peak_present) { psched_ratecfg_getrate(&opt.peakrate, &p->peak); if ((p->peak.rate_bytes_ps >= (1ULL << 32)) && nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, p->peak.rate_bytes_ps, TCA_POLICE_PAD)) goto nla_put_failure; } if (p->pps_present) { if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64, p->ppsrate.rate_pkts_ps, TCA_POLICE_PAD)) goto nla_put_failure; if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64, PSCHED_NS2TICKS(p->tcfp_pkt_burst), TCA_POLICE_PAD)) goto nla_put_failure; } if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) goto nla_put_failure; if (p->tcfp_result && nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result)) goto nla_put_failure; if (p->tcfp_ewma_rate && nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate)) goto nla_put_failure; tcf_tm_dump(&t, &police->tcf_tm); if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) goto nla_put_failure; rcu_read_unlock(); return skb->len; nla_put_failure: rcu_read_unlock(); nlmsg_trim(skb, b); return -1; } static int tcf_police_act_to_flow_act(int tc_act, u32 *extval, struct netlink_ext_ack *extack) { int act_id = -EOPNOTSUPP; if (!TC_ACT_EXT_OPCODE(tc_act)) { if (tc_act == TC_ACT_OK) act_id = FLOW_ACTION_ACCEPT; else if (tc_act == TC_ACT_SHOT) act_id = FLOW_ACTION_DROP; else if (tc_act == TC_ACT_PIPE) act_id = FLOW_ACTION_PIPE; else if (tc_act == TC_ACT_RECLASSIFY) NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\""); else NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload"); } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) { act_id = FLOW_ACTION_GOTO; *extval = tc_act & TC_ACT_EXT_VAL_MASK; } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) { act_id = FLOW_ACTION_JUMP; *extval = tc_act & TC_ACT_EXT_VAL_MASK; } else if (tc_act == TC_ACT_UNSPEC) { act_id = FLOW_ACTION_CONTINUE; } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload"); } return act_id; } static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; struct tcf_police *police = to_police(act); struct tcf_police_params *p; int act_id; p = rcu_dereference_protected(police->params, lockdep_is_held(&police->tcf_lock)); entry->id = FLOW_ACTION_POLICE; entry->police.burst = tcf_police_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.peakrate_bytes_ps = tcf_police_peakrate_bytes_ps(act); entry->police.avrate = tcf_police_tcfp_ewma_rate(act); entry->police.overhead = tcf_police_rate_overhead(act); entry->police.burst_pkt = tcf_police_burst_pkt(act); entry->police.rate_pkt_ps = tcf_police_rate_pkt_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); act_id = tcf_police_act_to_flow_act(police->tcf_action, &entry->police.exceed.extval, extack); if (act_id < 0) return act_id; entry->police.exceed.act_id = act_id; act_id = tcf_police_act_to_flow_act(p->tcfp_result, &entry->police.notexceed.extval, extack); if (act_id < 0) return act_id; entry->police.notexceed.act_id = act_id; *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; fl_action->id = FLOW_ACTION_POLICE; } return 0; } MODULE_AUTHOR("Alexey Kuznetsov"); MODULE_DESCRIPTION("Policing actions"); MODULE_LICENSE("GPL"); static struct tc_action_ops act_police_ops = { .kind = "police", .id = TCA_ID_POLICE, .owner = THIS_MODULE, .stats_update = tcf_police_stats_update, .act = tcf_police_act, .dump = tcf_police_dump, .init = tcf_police_init, .cleanup = tcf_police_cleanup, .offload_act_setup = tcf_police_offload_act_setup, .size = sizeof(struct tcf_police), }; MODULE_ALIAS_NET_ACT("police"); static __net_init int police_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_police_ops.net_id); return tc_action_net_init(net, tn, &act_police_ops); } static void __net_exit police_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_police_ops.net_id); } static struct pernet_operations police_net_ops = { .init = police_init_net, .exit_batch = police_exit_net, .id = &act_police_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init police_init_module(void) { return tcf_register_action(&act_police_ops, &police_net_ops); } static void __exit police_cleanup_module(void) { tcf_unregister_action(&act_police_ops, &police_net_ops); } module_init(police_init_module); module_exit(police_cleanup_module);
7 1 6 6 6 22 22 21 18 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 // SPDX-License-Identifier: GPL-2.0-only #include "netlink.h" #include "common.h" #include "bitset.h" struct debug_req_info { struct ethnl_req_info base; }; struct debug_reply_data { struct ethnl_reply_data base; u32 msg_mask; }; #define DEBUG_REPDATA(__reply_base) \ container_of(__reply_base, struct debug_reply_data, base) const struct nla_policy ethnl_debug_get_policy[] = { [ETHTOOL_A_DEBUG_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; static int debug_prepare_data(const struct ethnl_req_info *req_base, struct ethnl_reply_data *reply_base, const struct genl_info *info) { struct debug_reply_data *data = DEBUG_REPDATA(reply_base); struct net_device *dev = reply_base->dev; int ret; if (!dev->ethtool_ops->get_msglevel) return -EOPNOTSUPP; ret = ethnl_ops_begin(dev); if (ret < 0) return ret; data->msg_mask = dev->ethtool_ops->get_msglevel(dev); ethnl_ops_complete(dev); return 0; } static int debug_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct debug_reply_data *data = DEBUG_REPDATA(reply_base); bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; return ethnl_bitset32_size(&data->msg_mask, NULL, NETIF_MSG_CLASS_COUNT, netif_msg_class_names, compact); } static int debug_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { const struct debug_reply_data *data = DEBUG_REPDATA(reply_base); bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; return ethnl_put_bitset32(skb, ETHTOOL_A_DEBUG_MSGMASK, &data->msg_mask, NULL, NETIF_MSG_CLASS_COUNT, netif_msg_class_names, compact); } /* DEBUG_SET */ const struct nla_policy ethnl_debug_set_policy[] = { [ETHTOOL_A_DEBUG_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), [ETHTOOL_A_DEBUG_MSGMASK] = { .type = NLA_NESTED }, }; static int ethnl_set_debug_validate(struct ethnl_req_info *req_info, struct genl_info *info) { const struct ethtool_ops *ops = req_info->dev->ethtool_ops; return ops->get_msglevel && ops->set_msglevel ? 1 : -EOPNOTSUPP; } static int ethnl_set_debug(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; struct nlattr **tb = info->attrs; bool mod = false; u32 msg_mask; int ret; msg_mask = dev->ethtool_ops->get_msglevel(dev); ret = ethnl_update_bitset32(&msg_mask, NETIF_MSG_CLASS_COUNT, tb[ETHTOOL_A_DEBUG_MSGMASK], netif_msg_class_names, info->extack, &mod); if (ret < 0 || !mod) return ret; dev->ethtool_ops->set_msglevel(dev, msg_mask); return 1; } const struct ethnl_request_ops ethnl_debug_request_ops = { .request_cmd = ETHTOOL_MSG_DEBUG_GET, .reply_cmd = ETHTOOL_MSG_DEBUG_GET_REPLY, .hdr_attr = ETHTOOL_A_DEBUG_HEADER, .req_info_size = sizeof(struct debug_req_info), .reply_data_size = sizeof(struct debug_reply_data), .prepare_data = debug_prepare_data, .reply_size = debug_reply_size, .fill_reply = debug_fill_reply, .set_validate = ethnl_set_debug_validate, .set = ethnl_set_debug, .set_ntf_cmd = ETHTOOL_MSG_DEBUG_NTF, };
263 396 218 350 353 4 24 140 6 3 56 37 26 2 3 20 6 18 1 16 212 72 156 215 9 9 76 189 16 65 5 13 15 3 21 3 18 1 3 4 4 9 65 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 // SPDX-License-Identifier: GPL-2.0-only /* * IPv6 library code, needed by static components when full IPv6 support is * not configured or static. */ #include <linux/export.h> #include <net/ipv6.h> /* * find out if nexthdr is a well-known extension header or a protocol */ bool ipv6_ext_hdr(u8 nexthdr) { /* * find out if nexthdr is an extension header or a protocol */ return (nexthdr == NEXTHDR_HOP) || (nexthdr == NEXTHDR_ROUTING) || (nexthdr == NEXTHDR_FRAGMENT) || (nexthdr == NEXTHDR_AUTH) || (nexthdr == NEXTHDR_NONE) || (nexthdr == NEXTHDR_DEST); } EXPORT_SYMBOL(ipv6_ext_hdr); /* * Skip any extension headers. This is used by the ICMP module. * * Note that strictly speaking this conflicts with RFC 2460 4.0: * ...The contents and semantics of each extension header determine whether * or not to proceed to the next header. Therefore, extension headers must * be processed strictly in the order they appear in the packet; a * receiver must not, for example, scan through a packet looking for a * particular kind of extension header and process that header prior to * processing all preceding ones. * * We do exactly this. This is a protocol bug. We can't decide after a * seeing an unknown discard-with-error flavour TLV option if it's a * ICMP error message or not (errors should never be send in reply to * ICMP error messages). * * But I see no other way to do this. This might need to be reexamined * when Linux implements ESP (and maybe AUTH) headers. * --AK * * This function parses (probably truncated) exthdr set "hdr". * "nexthdrp" initially points to some place, * where type of the first header can be found. * * It skips all well-known exthdrs, and returns pointer to the start * of unparsable area i.e. the first header with unknown type. * If it is not NULL *nexthdr is updated by type/protocol of this header. * * NOTES: - if packet terminated with NEXTHDR_NONE it returns NULL. * - it may return pointer pointing beyond end of packet, * if the last recognized header is truncated in the middle. * - if packet is truncated, so that all parsed headers are skipped, * it returns NULL. * - First fragment header is skipped, not-first ones * are considered as unparsable. * - Reports the offset field of the final fragment header so it is * possible to tell whether this is a first fragment, later fragment, * or not fragmented. * - ESP is unparsable for now and considered like * normal payload protocol. * - Note also special handling of AUTH header. Thanks to IPsec wizards. * * --ANK (980726) */ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp, __be16 *frag_offp) { u8 nexthdr = *nexthdrp; *frag_offp = 0; while (ipv6_ext_hdr(nexthdr)) { struct ipv6_opt_hdr _hdr, *hp; int hdrlen; if (nexthdr == NEXTHDR_NONE) return -1; hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); if (!hp) return -1; if (nexthdr == NEXTHDR_FRAGMENT) { __be16 _frag_off, *fp; fp = skb_header_pointer(skb, start+offsetof(struct frag_hdr, frag_off), sizeof(_frag_off), &_frag_off); if (!fp) return -1; *frag_offp = *fp; if (ntohs(*frag_offp) & ~0x7) break; hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) hdrlen = ipv6_authlen(hp); else hdrlen = ipv6_optlen(hp); nexthdr = hp->nexthdr; start += hdrlen; } *nexthdrp = nexthdr; return start; } EXPORT_SYMBOL(ipv6_skip_exthdr); int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type) { const unsigned char *nh = skb_network_header(skb); int packet_len = skb_tail_pointer(skb) - skb_network_header(skb); struct ipv6_opt_hdr *hdr; int len; if (offset + 2 > packet_len) goto bad; hdr = (struct ipv6_opt_hdr *)(nh + offset); len = ((hdr->hdrlen + 1) << 3); if (offset + len > packet_len) goto bad; offset += 2; len -= 2; while (len > 0) { int opttype = nh[offset]; int optlen; if (opttype == type) return offset; switch (opttype) { case IPV6_TLV_PAD1: optlen = 1; break; default: if (len < 2) goto bad; optlen = nh[offset + 1] + 2; if (optlen > len) goto bad; break; } offset += optlen; len -= optlen; } /* not_found */ bad: return -1; } EXPORT_SYMBOL_GPL(ipv6_find_tlv); /* * find the offset to specified header or the protocol number of last header * if target < 0. "last header" is transport protocol header, ESP, or * "No next header". * * Note that *offset is used as input/output parameter, and if it is not zero, * then it must be a valid offset to an inner IPv6 header. This can be used * to explore inner IPv6 header, eg. ICMPv6 error messages. * * If target header is found, its offset is set in *offset and return protocol * number. Otherwise, return -1. * * If the first fragment doesn't contain the final protocol header or * NEXTHDR_NONE it is considered invalid. * * Note that non-1st fragment is special case that "the protocol number * of last header" is "next header" field in Fragment header. In this case, * *offset is meaningless and fragment offset is stored in *fragoff if fragoff * isn't NULL. * * if flags is not NULL and it's a fragment, then the frag flag * IP6_FH_F_FRAG will be set. If it's an AH header, the * IP6_FH_F_AUTH flag is set and target < 0, then this function will * stop at the AH header. If IP6_FH_F_SKIP_RH flag was passed, then this * function will skip all those routing headers, where segements_left was 0. */ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, unsigned short *fragoff, int *flags) { unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); u8 nexthdr = ipv6_hdr(skb)->nexthdr; bool found; if (fragoff) *fragoff = 0; if (*offset) { struct ipv6hdr _ip6, *ip6; ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); if (!ip6 || (ip6->version != 6)) return -EBADMSG; start = *offset + sizeof(struct ipv6hdr); nexthdr = ip6->nexthdr; } do { struct ipv6_opt_hdr _hdr, *hp; unsigned int hdrlen; found = (nexthdr == target); if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { if (target < 0 || found) break; return -ENOENT; } hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); if (!hp) return -EBADMSG; if (nexthdr == NEXTHDR_ROUTING) { struct ipv6_rt_hdr _rh, *rh; rh = skb_header_pointer(skb, start, sizeof(_rh), &_rh); if (!rh) return -EBADMSG; if (flags && (*flags & IP6_FH_F_SKIP_RH) && rh->segments_left == 0) found = false; } if (nexthdr == NEXTHDR_FRAGMENT) { unsigned short _frag_off; __be16 *fp; if (flags) /* Indicate that this is a fragment */ *flags |= IP6_FH_F_FRAG; fp = skb_header_pointer(skb, start+offsetof(struct frag_hdr, frag_off), sizeof(_frag_off), &_frag_off); if (!fp) return -EBADMSG; _frag_off = ntohs(*fp) & ~0x7; if (_frag_off) { if (target < 0 && ((!ipv6_ext_hdr(hp->nexthdr)) || hp->nexthdr == NEXTHDR_NONE)) { if (fragoff) *fragoff = _frag_off; return hp->nexthdr; } if (!found) return -ENOENT; if (fragoff) *fragoff = _frag_off; break; } hdrlen = 8; } else if (nexthdr == NEXTHDR_AUTH) { if (flags && (*flags & IP6_FH_F_AUTH) && (target < 0)) break; hdrlen = ipv6_authlen(hp); } else hdrlen = ipv6_optlen(hp); if (!found) { nexthdr = hp->nexthdr; start += hdrlen; } } while (!found); *offset = start; return nexthdr; } EXPORT_SYMBOL(ipv6_find_hdr);
29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* Definitions for key type implementations * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #ifndef _LINUX_KEY_TYPE_H #define _LINUX_KEY_TYPE_H #include <linux/key.h> #include <linux/errno.h> #ifdef CONFIG_KEYS struct kernel_pkey_query; struct kernel_pkey_params; /* * Pre-parsed payload, used by key add, update and instantiate. * * This struct will be cleared and data and datalen will be set with the data * and length parameters from the caller and quotalen will be set from * def_datalen from the key type. Then if the preparse() op is provided by the * key type, that will be called. Then the struct will be passed to the * instantiate() or the update() op. * * If the preparse() op is given, the free_preparse() op will be called to * clear the contents. */ struct key_preparsed_payload { const char *orig_description; /* Actual or proposed description (maybe NULL) */ char *description; /* Proposed key description (or NULL) */ union key_payload payload; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ time64_t expiry; /* Expiry time of key */ } __randomize_layout; typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); /* * Preparsed matching criterion. */ struct key_match_data { /* Comparison function, defaults to exact description match, but can be * overridden by type->match_preparse(). Should return true if a match * is found and false if not. */ bool (*cmp)(const struct key *key, const struct key_match_data *match_data); const void *raw_data; /* Raw match data */ void *preparsed; /* For ->match_preparse() to stash stuff */ unsigned lookup_type; /* Type of lookup for this search. */ #define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */ #define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */ }; /* * kernel managed key type definition */ struct key_type { /* name of the type */ const char *name; /* default payload length for quota precalculation (optional) * - this can be used instead of calling key_payload_reserve(), that * function only needs to be called if the real datalen is different */ size_t def_datalen; unsigned int flags; #define KEY_TYPE_NET_DOMAIN 0x00000001 /* Keys of this type have a net namespace domain */ #define KEY_TYPE_INSTANT_REAP 0x00000002 /* Keys of this type don't have a delay after expiring */ /* vet a description */ int (*vet_description)(const char *description); /* Preparse the data blob from userspace that is to be the payload, * generating a proposed description and payload that will be handed to * the instantiate() and update() ops. */ int (*preparse)(struct key_preparsed_payload *prep); /* Free a preparse data structure. */ void (*free_preparse)(struct key_preparsed_payload *prep); /* instantiate a key of this type * - this method should call key_payload_reserve() to determine if the * user's quota will hold the payload */ int (*instantiate)(struct key *key, struct key_preparsed_payload *prep); /* update a key of this type (optional) * - this method should call key_payload_reserve() to recalculate the * quota consumption * - the key must be locked against read when modifying */ int (*update)(struct key *key, struct key_preparsed_payload *prep); /* Preparse the data supplied to ->match() (optional). The * data to be preparsed can be found in match_data->raw_data. * The lookup type can also be set by this function. */ int (*match_preparse)(struct key_match_data *match_data); /* * Free preparsed match data (optional). This should be supplied if * ->match_preparse() is supplied. */ void (*match_free)(struct key_match_data *match_data); /* * Clear some of the data from a key on revocation (optional). * - the key's semaphore will be write-locked by the caller */ void (*revoke)(struct key *key); /* clear the data from a key (optional) */ void (*destroy)(struct key *key); /* describe a key */ void (*describe)(const struct key *key, struct seq_file *p); /* read a key's data (optional) * - permission checks will be done by the caller * - the key's semaphore will be readlocked by the caller * - should return the amount of data that could be read, no matter how * much is copied into the buffer * - shouldn't do the copy if the buffer is NULL */ long (*read)(const struct key *key, char *buffer, size_t buflen); /* handle request_key() for this type instead of invoking * /sbin/request-key (optional) * - key is the key to instantiate * - authkey is the authority to assume when instantiating this key * - op is the operation to be done, usually "create" * - the call must not return until the instantiation process has run * its course */ request_key_actor_t request_key; /* Look up a keyring access restriction (optional) * * - NULL is a valid return value (meaning the requested restriction * is known but will never block addition of a key) * - should return -EINVAL if the restriction is unknown */ struct key_restriction *(*lookup_restriction)(const char *params); /* Asymmetric key accessor functions. */ int (*asym_query)(const struct kernel_pkey_params *params, struct kernel_pkey_query *info); int (*asym_eds_op)(struct kernel_pkey_params *params, const void *in, void *out); int (*asym_verify_signature)(struct kernel_pkey_params *params, const void *in, const void *in2); /* internal fields */ struct list_head link; /* link in types list */ struct lock_class_key lock_class; /* key->sem lock class */ } __randomize_layout; extern struct key_type key_type_keyring; extern int register_key_type(struct key_type *ktype); extern void unregister_key_type(struct key_type *ktype); extern int key_payload_reserve(struct key *key, size_t datalen); extern int key_instantiate_and_link(struct key *key, const void *data, size_t datalen, struct key *keyring, struct key *authkey); extern int key_reject_and_link(struct key *key, unsigned timeout, unsigned error, struct key *keyring, struct key *authkey); extern void complete_request_key(struct key *authkey, int error); static inline int key_negate_and_link(struct key *key, unsigned timeout, struct key *keyring, struct key *authkey) { return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); } extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); #endif /* CONFIG_KEYS */ #endif /* _LINUX_KEY_TYPE_H */
95 14 2 3 76 59 1 88 88 13 4 8 1 5 1 1 3 69 53 4 35 16 3 2 3 23 26 1 47 1 1 27 1 19 41 2 2 6 7 20 6 15 1 16 10 1 1 1 2 5 2 2 1 1 11 15 1 26 4 22 28 2 18 46 2 2 7 32 5 5 32 2 7 28 28 28 27 1 1 13 10 5 20 20 32 5 49 6 23 20 7 42 8 8 1 7 4 1 1 2 73 73 73 2 70 1 2 58 63 3 60 5 5 16 16 16 14 14 14 14 14 14 7 75 75 75 75 74 16 6 4 3 1 4 2 3 2 3 2 2 7 8 2 6 5 3 2 10 5 5 7 1 2 2 2 2 2 1 3 3 1 13 2 11 3 8 1 1 2 7 1 13 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 // SPDX-License-Identifier: GPL-2.0-or-later /* * V4L2 controls framework uAPI implementation: * * Copyright (C) 2010-2021 Hans Verkuil <hverkuil@kernel.org> */ #define pr_fmt(fmt) "v4l2-ctrls: " fmt #include <linux/export.h> #include <linux/mm.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include "v4l2-ctrls-priv.h" /* Internal temporary helper struct, one for each v4l2_ext_control */ struct v4l2_ctrl_helper { /* Pointer to the control reference of the master control */ struct v4l2_ctrl_ref *mref; /* The control ref corresponding to the v4l2_ext_control ID field. */ struct v4l2_ctrl_ref *ref; /* * v4l2_ext_control index of the next control belonging to the * same cluster, or 0 if there isn't any. */ u32 next; }; /* * Helper functions to copy control payload data from kernel space to * user space and vice versa. */ /* Helper function: copy the given control value back to the caller */ static int ptr_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr) { u32 len; if (ctrl->is_ptr && !ctrl->is_string) return copy_to_user(c->ptr, ptr.p_const, c->size) ? -EFAULT : 0; switch (ctrl->type) { case V4L2_CTRL_TYPE_STRING: len = strlen(ptr.p_char); if (c->size < len + 1) { c->size = ctrl->elem_size; return -ENOSPC; } return copy_to_user(c->string, ptr.p_char, len + 1) ? -EFAULT : 0; case V4L2_CTRL_TYPE_INTEGER64: c->value64 = *ptr.p_s64; break; default: c->value = *ptr.p_s32; break; } return 0; } /* Helper function: copy the current control value back to the caller */ static int cur_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { return ptr_to_user(c, ctrl, ctrl->p_cur); } /* Helper function: copy the new control value back to the caller */ static int new_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the request value back to the caller */ static int req_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl_ref *ref) { return ptr_to_user(c, ref->ctrl, ref->p_req); } /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->init(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the minimum control value back to the caller */ static int min_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->minimum(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the maximum control value back to the caller */ static int max_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { ctrl->type_ops->maximum(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } /* Helper function: copy the caller-provider value as the new control value */ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { int ret; u32 size; ctrl->is_new = 0; if (ctrl->is_dyn_array && c->size > ctrl->p_array_alloc_elems * ctrl->elem_size) { void *old = ctrl->p_array; void *tmp = kvzalloc(2 * c->size, GFP_KERNEL); if (!tmp) return -ENOMEM; memcpy(tmp, ctrl->p_new.p, ctrl->elems * ctrl->elem_size); memcpy(tmp + c->size, ctrl->p_cur.p, ctrl->elems * ctrl->elem_size); ctrl->p_new.p = tmp; ctrl->p_cur.p = tmp + c->size; ctrl->p_array = tmp; ctrl->p_array_alloc_elems = c->size / ctrl->elem_size; kvfree(old); } if (ctrl->is_ptr && !ctrl->is_string) { unsigned int elems = c->size / ctrl->elem_size; if (copy_from_user(ctrl->p_new.p, c->ptr, c->size)) return -EFAULT; ctrl->is_new = 1; if (ctrl->is_dyn_array) ctrl->new_elems = elems; else if (ctrl->is_array) ctrl->type_ops->init(ctrl, elems, ctrl->p_new); return 0; } switch (ctrl->type) { case V4L2_CTRL_TYPE_INTEGER64: *ctrl->p_new.p_s64 = c->value64; break; case V4L2_CTRL_TYPE_STRING: size = c->size; if (size == 0) return -ERANGE; if (size > ctrl->maximum + 1) size = ctrl->maximum + 1; ret = copy_from_user(ctrl->p_new.p_char, c->string, size) ? -EFAULT : 0; if (!ret) { char last = ctrl->p_new.p_char[size - 1]; ctrl->p_new.p_char[size - 1] = 0; /* * If the string was longer than ctrl->maximum, * then return an error. */ if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last) return -ERANGE; ctrl->is_new = 1; } return ret; default: *ctrl->p_new.p_s32 = c->value; break; } ctrl->is_new = 1; return 0; } /* * VIDIOC_G/TRY/S_EXT_CTRLS implementation */ /* * Some general notes on the atomic requirements of VIDIOC_G/TRY/S_EXT_CTRLS: * * It is not a fully atomic operation, just best-effort only. After all, if * multiple controls have to be set through multiple i2c writes (for example) * then some initial writes may succeed while others fail. Thus leaving the * system in an inconsistent state. The question is how much effort you are * willing to spend on trying to make something atomic that really isn't. * * From the point of view of an application the main requirement is that * when you call VIDIOC_S_EXT_CTRLS and some values are invalid then an * error should be returned without actually affecting any controls. * * If all the values are correct, then it is acceptable to just give up * in case of low-level errors. * * It is important though that the application can tell when only a partial * configuration was done. The way we do that is through the error_idx field * of struct v4l2_ext_controls: if that is equal to the count field then no * controls were affected. Otherwise all controls before that index were * successful in performing their 'get' or 'set' operation, the control at * the given index failed, and you don't know what happened with the controls * after the failed one. Since if they were part of a control cluster they * could have been successfully processed (if a cluster member was encountered * at index < error_idx), they could have failed (if a cluster member was at * error_idx), or they may not have been processed yet (if the first cluster * member appeared after error_idx). * * It is all fairly theoretical, though. In practice all you can do is to * bail out. If error_idx == count, then it is an application bug. If * error_idx < count then it is only an application bug if the error code was * EBUSY. That usually means that something started streaming just when you * tried to set the controls. In all other cases it is a driver/hardware * problem and all you can do is to retry or bail out. * * Note that these rules do not apply to VIDIOC_TRY_EXT_CTRLS: since that * never modifies controls the error_idx is just set to whatever control * has an invalid value. */ /* * Prepare for the extended g/s/try functions. * Find the controls in the control array and do some basic checks. */ static int prepare_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct v4l2_ctrl_helper *helpers, struct video_device *vdev, bool get) { struct v4l2_ctrl_helper *h; bool have_clusters = false; u32 i; for (i = 0, h = helpers; i < cs->count; i++, h++) { struct v4l2_ext_control *c = &cs->controls[i]; struct v4l2_ctrl_ref *ref; struct v4l2_ctrl *ctrl; u32 id = c->id & V4L2_CTRL_ID_MASK; cs->error_idx = i; if (cs->which && (cs->which < V4L2_CTRL_WHICH_DEF_VAL || cs->which > V4L2_CTRL_WHICH_MAX_VAL) && V4L2_CTRL_ID2WHICH(id) != cs->which) { dprintk(vdev, "invalid which 0x%x or control id 0x%x\n", cs->which, id); return -EINVAL; } /* * Old-style private controls are not allowed for * extended controls. */ if (id >= V4L2_CID_PRIVATE_BASE) { dprintk(vdev, "old-style private controls not allowed\n"); return -EINVAL; } ref = find_ref_lock(hdl, id); if (!ref) { dprintk(vdev, "cannot find control id 0x%x\n", id); return -EINVAL; } h->ref = ref; ctrl = ref->ctrl; if (ctrl->flags & V4L2_CTRL_FLAG_DISABLED) { dprintk(vdev, "control id 0x%x is disabled\n", id); return -EINVAL; } if (!(ctrl->flags & V4L2_CTRL_FLAG_HAS_WHICH_MIN_MAX) && (cs->which == V4L2_CTRL_WHICH_MIN_VAL || cs->which == V4L2_CTRL_WHICH_MAX_VAL)) { dprintk(vdev, "invalid which 0x%x or control id 0x%x\n", cs->which, id); return -EINVAL; } if (ctrl->cluster[0]->ncontrols > 1) have_clusters = true; if (ctrl->cluster[0] != ctrl) ref = find_ref_lock(hdl, ctrl->cluster[0]->id); if (ctrl->is_dyn_array) { unsigned int max_size = ctrl->dims[0] * ctrl->elem_size; unsigned int tot_size = ctrl->elem_size; if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) tot_size *= ref->p_req_elems; else tot_size *= ctrl->elems; c->size = ctrl->elem_size * (c->size / ctrl->elem_size); if (get) { if (c->size < tot_size) { c->size = tot_size; return -ENOSPC; } c->size = tot_size; } else { if (c->size > max_size) { c->size = max_size; return -ENOSPC; } if (!c->size) return -EFAULT; } } else if (ctrl->is_ptr && !ctrl->is_string) { unsigned int tot_size = ctrl->elems * ctrl->elem_size; if (c->size < tot_size) { /* * In the get case the application first * queries to obtain the size of the control. */ if (get) { c->size = tot_size; return -ENOSPC; } dprintk(vdev, "pointer control id 0x%x size too small, %d bytes but %d bytes needed\n", id, c->size, tot_size); return -EFAULT; } c->size = tot_size; } /* Store the ref to the master control of the cluster */ h->mref = ref; /* * Initially set next to 0, meaning that there is no other * control in this helper array belonging to the same * cluster. */ h->next = 0; } /* * We are done if there were no controls that belong to a multi- * control cluster. */ if (!have_clusters) return 0; /* * The code below figures out in O(n) time which controls in the list * belong to the same cluster. */ /* This has to be done with the handler lock taken. */ mutex_lock(hdl->lock); /* First zero the helper field in the master control references */ for (i = 0; i < cs->count; i++) helpers[i].mref->helper = NULL; for (i = 0, h = helpers; i < cs->count; i++, h++) { struct v4l2_ctrl_ref *mref = h->mref; /* * If the mref->helper is set, then it points to an earlier * helper that belongs to the same cluster. */ if (mref->helper) { /* * Set the next field of mref->helper to the current * index: this means that the earlier helper now * points to the next helper in the same cluster. */ mref->helper->next = i; /* * mref should be set only for the first helper in the * cluster, clear the others. */ h->mref = NULL; } /* Point the mref helper to the current helper struct. */ mref->helper = h; } mutex_unlock(hdl->lock); return 0; } /* * Handles the corner case where cs->count == 0. It checks whether the * specified control class exists. If that class ID is 0, then it checks * whether there are any controls at all. */ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which) { if (which == 0 || (which >= V4L2_CTRL_WHICH_DEF_VAL && which <= V4L2_CTRL_WHICH_MAX_VAL)) return 0; return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL; } /* * Get extended controls. Allocates the helpers array if needed. * * Note that v4l2_g_ext_ctrls_common() with 'which' set to * V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was * completed, and in that case p_req_valid is true for all controls. */ int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct video_device *vdev) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; int ret; int i, j; bool is_default, is_request, is_min, is_max; is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL); is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL); is_min = (cs->which == V4L2_CTRL_WHICH_MIN_VAL); is_max = (cs->which == V4L2_CTRL_WHICH_MAX_VAL); cs->error_idx = cs->count; cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (!hdl) return -EINVAL; if (cs->count == 0) return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kvmalloc_array(cs->count, sizeof(helper[0]), GFP_KERNEL); if (!helpers) return -ENOMEM; } ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, true); cs->error_idx = cs->count; for (i = 0; !ret && i < cs->count; i++) if (helpers[i].ref->ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) ret = -EACCES; for (i = 0; !ret && i < cs->count; i++) { struct v4l2_ctrl *master; bool is_volatile = false; u32 idx = i; if (!helpers[i].mref) continue; master = helpers[i].mref->ctrl; cs->error_idx = i; v4l2_ctrl_lock(master); /* * g_volatile_ctrl will update the new control values. * This makes no sense for V4L2_CTRL_WHICH_DEF_VAL, * V4L2_CTRL_WHICH_MIN_VAL, V4L2_CTRL_WHICH_MAX_VAL and * V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests * it is v4l2_ctrl_request_complete() that copies the * volatile controls at the time of request completion * to the request, so you don't want to do that again. */ if (!is_default && !is_request && !is_min && !is_max && ((master->flags & V4L2_CTRL_FLAG_VOLATILE) || (master->has_volatiles && !is_cur_manual(master)))) { for (j = 0; j < master->ncontrols; j++) cur_to_new(master->cluster[j]); ret = call_op(master, g_volatile_ctrl); is_volatile = true; } if (ret) { v4l2_ctrl_unlock(master); break; } /* * Copy the default value (if is_default is true), the * request value (if is_request is true and p_req is valid), * the new volatile value (if is_volatile is true) or the * current value. */ do { struct v4l2_ctrl_ref *ref = helpers[idx].ref; if (is_default) ret = def_to_user(cs->controls + idx, ref->ctrl); else if (is_request && ref->p_req_array_enomem) ret = -ENOMEM; else if (is_request && ref->p_req_valid) ret = req_to_user(cs->controls + idx, ref); else if (is_min) ret = min_to_user(cs->controls + idx, ref->ctrl); else if (is_max) ret = max_to_user(cs->controls + idx, ref->ctrl); else if (is_volatile) ret = new_to_user(cs->controls + idx, ref->ctrl); else ret = cur_to_user(cs->controls + idx, ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); v4l2_ctrl_unlock(master); } if (cs->count > ARRAY_SIZE(helper)) kvfree(helpers); return ret; } int v4l2_g_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) return v4l2_g_ext_ctrls_request(hdl, vdev, mdev, cs); return v4l2_g_ext_ctrls_common(hdl, cs, vdev); } EXPORT_SYMBOL(v4l2_g_ext_ctrls); /* Validate a new control */ static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { return ctrl->type_ops->validate(ctrl, p_new); } /* Validate controls. */ static int validate_ctrls(struct v4l2_ext_controls *cs, struct v4l2_ctrl_helper *helpers, struct video_device *vdev, bool set) { unsigned int i; int ret = 0; cs->error_idx = cs->count; for (i = 0; i < cs->count; i++) { struct v4l2_ctrl *ctrl = helpers[i].ref->ctrl; union v4l2_ctrl_ptr p_new; cs->error_idx = i; if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) { dprintk(vdev, "control id 0x%x is read-only\n", ctrl->id); return -EACCES; } /* * This test is also done in try_set_control_cluster() which * is called in atomic context, so that has the final say, * but it makes sense to do an up-front check as well. Once * an error occurs in try_set_control_cluster() some other * controls may have been set already and we want to do a * best-effort to avoid that. */ if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED)) { dprintk(vdev, "control id 0x%x is grabbed, cannot set\n", ctrl->id); return -EBUSY; } /* * Skip validation for now if the payload needs to be copied * from userspace into kernelspace. We'll validate those later. */ if (ctrl->is_ptr) continue; if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) p_new.p_s64 = &cs->controls[i].value64; else p_new.p_s32 = &cs->controls[i].value; ret = validate_new(ctrl, p_new); if (ret) return ret; } return 0; } /* Try or try-and-set controls */ int try_set_ext_ctrls_common(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_ext_controls *cs, struct video_device *vdev, bool set) { struct v4l2_ctrl_helper helper[4]; struct v4l2_ctrl_helper *helpers = helper; unsigned int i, j; int ret; cs->error_idx = cs->count; /* Default/minimum/maximum values cannot be changed */ if (cs->which == V4L2_CTRL_WHICH_DEF_VAL || cs->which == V4L2_CTRL_WHICH_MIN_VAL || cs->which == V4L2_CTRL_WHICH_MAX_VAL) { dprintk(vdev, "%s: cannot change default/min/max value\n", video_device_node_name(vdev)); return -EINVAL; } cs->which = V4L2_CTRL_ID2WHICH(cs->which); if (!hdl) { dprintk(vdev, "%s: invalid null control handler\n", video_device_node_name(vdev)); return -EINVAL; } if (cs->count == 0) return class_check(hdl, cs->which); if (cs->count > ARRAY_SIZE(helper)) { helpers = kvmalloc_array(cs->count, sizeof(helper[0]), GFP_KERNEL); if (!helpers) return -ENOMEM; } ret = prepare_ext_ctrls(hdl, cs, helpers, vdev, false); if (!ret) ret = validate_ctrls(cs, helpers, vdev, set); if (ret && set) cs->error_idx = cs->count; for (i = 0; !ret && i < cs->count; i++) { struct v4l2_ctrl *master; u32 idx = i; if (!helpers[i].mref) continue; cs->error_idx = i; master = helpers[i].mref->ctrl; v4l2_ctrl_lock(master); /* Reset the 'is_new' flags of the cluster */ for (j = 0; j < master->ncontrols; j++) if (master->cluster[j]) master->cluster[j]->is_new = 0; /* * For volatile autoclusters that are currently in auto mode * we need to discover if it will be set to manual mode. * If so, then we have to copy the current volatile values * first since those will become the new manual values (which * may be overwritten by explicit new values from this set * of controls). */ if (master->is_auto && master->has_volatiles && !is_cur_manual(master)) { /* Pick an initial non-manual value */ s32 new_auto_val = master->manual_mode_value + 1; u32 tmp_idx = idx; do { /* * Check if the auto control is part of the * list, and remember the new value. */ if (helpers[tmp_idx].ref->ctrl == master) new_auto_val = cs->controls[tmp_idx].value; tmp_idx = helpers[tmp_idx].next; } while (tmp_idx); /* * If the new value == the manual value, then copy * the current volatile values. */ if (new_auto_val == master->manual_mode_value) update_from_auto_cluster(master); } /* * Copy the new caller-supplied control values. * user_to_new() sets 'is_new' to 1. */ do { struct v4l2_ctrl *ctrl = helpers[idx].ref->ctrl; ret = user_to_new(cs->controls + idx, ctrl); if (!ret && ctrl->is_ptr) { ret = validate_new(ctrl, ctrl->p_new); if (ret) dprintk(vdev, "failed to validate control %s (%d)\n", v4l2_ctrl_get_name(ctrl->id), ret); } idx = helpers[idx].next; } while (!ret && idx); if (!ret) ret = try_or_set_cluster(fh, master, !hdl->req_obj.req && set, 0); if (!ret && hdl->req_obj.req && set) { for (j = 0; j < master->ncontrols; j++) { struct v4l2_ctrl_ref *ref = find_ref(hdl, master->cluster[j]->id); new_to_req(ref); } } /* Copy the new values back to userspace. */ if (!ret) { idx = i; do { ret = new_to_user(cs->controls + idx, helpers[idx].ref->ctrl); idx = helpers[idx].next; } while (!ret && idx); } v4l2_ctrl_unlock(master); } if (cs->count > ARRAY_SIZE(helper)) kvfree(helpers); return ret; } static int try_set_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs, bool set) { int ret; if (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL) return try_set_ext_ctrls_request(fh, hdl, vdev, mdev, cs, set); ret = try_set_ext_ctrls_common(fh, hdl, cs, vdev, set); if (ret) dprintk(vdev, "%s: try_set_ext_ctrls_common failed (%d)\n", video_device_node_name(vdev), ret); return ret; } int v4l2_try_ext_ctrls(struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { return try_set_ext_ctrls(NULL, hdl, vdev, mdev, cs, false); } EXPORT_SYMBOL(v4l2_try_ext_ctrls); int v4l2_s_ext_ctrls(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct video_device *vdev, struct media_device *mdev, struct v4l2_ext_controls *cs) { return try_set_ext_ctrls(fh, hdl, vdev, mdev, cs, true); } EXPORT_SYMBOL(v4l2_s_ext_ctrls); /* * VIDIOC_G/S_CTRL implementation */ /* Helper function to get a single control */ static int get_ctrl(struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { struct v4l2_ctrl *master = ctrl->cluster[0]; int ret = 0; int i; /* Compound controls are not supported. The new_to_user() and * cur_to_user() calls below would need to be modified not to access * userspace memory when called from get_ctrl(). */ if (!ctrl->is_int && ctrl->type != V4L2_CTRL_TYPE_INTEGER64) return -EINVAL; if (ctrl->flags & V4L2_CTRL_FLAG_WRITE_ONLY) return -EACCES; v4l2_ctrl_lock(master); /* g_volatile_ctrl will update the current control values */ if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) { for (i = 0; i < master->ncontrols; i++) cur_to_new(master->cluster[i]); ret = call_op(master, g_volatile_ctrl); if (!ret) ret = new_to_user(c, ctrl); } else { ret = cur_to_user(c, ctrl); } v4l2_ctrl_unlock(master); return ret; } int v4l2_g_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); struct v4l2_ext_control c; int ret; if (!ctrl || !ctrl->is_int) return -EINVAL; ret = get_ctrl(ctrl, &c); if (!ret) control->value = c.value; return ret; } EXPORT_SYMBOL(v4l2_g_ctrl); /* Helper function for VIDIOC_S_CTRL compatibility */ static int set_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 ch_flags) { struct v4l2_ctrl *master = ctrl->cluster[0]; int ret; int i; /* Reset the 'is_new' flags of the cluster */ for (i = 0; i < master->ncontrols; i++) if (master->cluster[i]) master->cluster[i]->is_new = 0; ret = validate_new(ctrl, ctrl->p_new); if (ret) return ret; /* * For autoclusters with volatiles that are switched from auto to * manual mode we have to update the current volatile values since * those will become the initial manual values after such a switch. */ if (master->is_auto && master->has_volatiles && ctrl == master && !is_cur_manual(master) && ctrl->val == master->manual_mode_value) update_from_auto_cluster(master); ctrl->is_new = 1; return try_or_set_cluster(fh, master, true, ch_flags); } /* Helper function for VIDIOC_S_CTRL compatibility */ static int set_ctrl_lock(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, struct v4l2_ext_control *c) { int ret; v4l2_ctrl_lock(ctrl); ret = user_to_new(c, ctrl); if (!ret) ret = set_ctrl(fh, ctrl, 0); if (!ret) ret = cur_to_user(c, ctrl); v4l2_ctrl_unlock(ctrl); return ret; } int v4l2_s_ctrl(struct v4l2_fh *fh, struct v4l2_ctrl_handler *hdl, struct v4l2_control *control) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, control->id); struct v4l2_ext_control c = { control->id }; int ret; if (!ctrl || !ctrl->is_int) return -EINVAL; if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY) return -EACCES; c.value = control->value; ret = set_ctrl_lock(fh, ctrl, &c); control->value = c.value; return ret; } EXPORT_SYMBOL(v4l2_s_ctrl); /* * Helper functions for drivers to get/set controls. */ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_ext_control c; /* It's a driver bug if this happens. */ if (WARN_ON(!ctrl->is_int)) return 0; c.value = 0; get_ctrl(ctrl, &c); return c.value; } EXPORT_SYMBOL(v4l2_ctrl_g_ctrl); s64 v4l2_ctrl_g_ctrl_int64(struct v4l2_ctrl *ctrl) { struct v4l2_ext_control c; /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) return 0; c.value64 = 0; get_ctrl(ctrl, &c); return c.value64; } EXPORT_SYMBOL(v4l2_ctrl_g_ctrl_int64); int __v4l2_ctrl_s_ctrl(struct v4l2_ctrl *ctrl, s32 val) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(!ctrl->is_int)) return -EINVAL; ctrl->val = val; return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl); int __v4l2_ctrl_s_ctrl_int64(struct v4l2_ctrl *ctrl, s64 val) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->is_ptr || ctrl->type != V4L2_CTRL_TYPE_INTEGER64)) return -EINVAL; *ctrl->p_new.p_s64 = val; return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_int64); int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->type != V4L2_CTRL_TYPE_STRING)) return -EINVAL; strscpy(ctrl->p_new.p_char, s, ctrl->maximum + 1); return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_string); int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, enum v4l2_ctrl_type type, const void *p) { lockdep_assert_held(ctrl->handler->lock); /* It's a driver bug if this happens. */ if (WARN_ON(ctrl->type != type)) return -EINVAL; /* Setting dynamic arrays is not (yet?) supported. */ if (WARN_ON(ctrl->is_dyn_array)) return -EINVAL; memcpy(ctrl->p_new.p, p, ctrl->elems * ctrl->elem_size); return set_ctrl(NULL, ctrl, 0); } EXPORT_SYMBOL(__v4l2_ctrl_s_ctrl_compound); /* * Modify the range of a control. */ int __v4l2_ctrl_modify_range(struct v4l2_ctrl *ctrl, s64 min, s64 max, u64 step, s64 def) { bool value_changed; bool range_changed = false; int ret; lockdep_assert_held(ctrl->handler->lock); switch (ctrl->type) { case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_INTEGER64: case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_BITMASK: case V4L2_CTRL_TYPE_U8: case V4L2_CTRL_TYPE_U16: case V4L2_CTRL_TYPE_U32: if (ctrl->is_array) return -EINVAL; ret = check_range(ctrl->type, min, max, step, def); if (ret) return ret; break; default: return -EINVAL; } if (ctrl->minimum != min || ctrl->maximum != max || ctrl->step != step || ctrl->default_value != def) { range_changed = true; ctrl->minimum = min; ctrl->maximum = max; ctrl->step = step; ctrl->default_value = def; } cur_to_new(ctrl); if (validate_new(ctrl, ctrl->p_new)) { if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) *ctrl->p_new.p_s64 = def; else *ctrl->p_new.p_s32 = def; } if (ctrl->type == V4L2_CTRL_TYPE_INTEGER64) value_changed = *ctrl->p_new.p_s64 != *ctrl->p_cur.p_s64; else value_changed = *ctrl->p_new.p_s32 != *ctrl->p_cur.p_s32; if (value_changed) ret = set_ctrl(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); else if (range_changed) send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_RANGE); return ret; } EXPORT_SYMBOL(__v4l2_ctrl_modify_range); int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, u32 dims[V4L2_CTRL_MAX_DIMS]) { unsigned int elems = 1; unsigned int i; void *p_array; lockdep_assert_held(ctrl->handler->lock); if (!ctrl->is_array || ctrl->is_dyn_array) return -EINVAL; for (i = 0; i < ctrl->nr_of_dims; i++) elems *= dims[i]; if (elems == 0) return -EINVAL; p_array = kvzalloc(2 * elems * ctrl->elem_size, GFP_KERNEL); if (!p_array) return -ENOMEM; kvfree(ctrl->p_array); ctrl->p_array_alloc_elems = elems; ctrl->elems = elems; ctrl->new_elems = elems; ctrl->p_array = p_array; ctrl->p_new.p = p_array; ctrl->p_cur.p = p_array + elems * ctrl->elem_size; for (i = 0; i < ctrl->nr_of_dims; i++) ctrl->dims[i] = dims[i]; ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | V4L2_EVENT_CTRL_CH_DIMENSIONS); return 0; } EXPORT_SYMBOL(__v4l2_ctrl_modify_dimensions); /* Implement VIDIOC_QUERY_EXT_CTRL */ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctrl *qc) { const unsigned int next_flags = V4L2_CTRL_FLAG_NEXT_CTRL | V4L2_CTRL_FLAG_NEXT_COMPOUND; u32 id = qc->id & V4L2_CTRL_ID_MASK; struct v4l2_ctrl_ref *ref; struct v4l2_ctrl *ctrl; if (!hdl) return -EINVAL; mutex_lock(hdl->lock); /* Try to find it */ ref = find_ref(hdl, id); if ((qc->id & next_flags) && !list_empty(&hdl->ctrl_refs)) { bool is_compound; /* Match any control that is not hidden */ unsigned int mask = 1; bool match = false; if ((qc->id & next_flags) == V4L2_CTRL_FLAG_NEXT_COMPOUND) { /* Match any hidden control */ match = true; } else if ((qc->id & next_flags) == next_flags) { /* Match any control, compound or not */ mask = 0; } /* Find the next control with ID > qc->id */ /* Did we reach the end of the control list? */ if (id >= node2id(hdl->ctrl_refs.prev)) { ref = NULL; /* Yes, so there is no next control */ } else if (ref) { struct v4l2_ctrl_ref *pos = ref; /* * We found a control with the given ID, so just get * the next valid one in the list. */ ref = NULL; list_for_each_entry_continue(pos, &hdl->ctrl_refs, node) { is_compound = pos->ctrl->is_array || pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; if (id < pos->ctrl->id && (is_compound & mask) == match) { ref = pos; break; } } } else { struct v4l2_ctrl_ref *pos; /* * No control with the given ID exists, so start * searching for the next largest ID. We know there * is one, otherwise the first 'if' above would have * been true. */ list_for_each_entry(pos, &hdl->ctrl_refs, node) { is_compound = pos->ctrl->is_array || pos->ctrl->type >= V4L2_CTRL_COMPOUND_TYPES; if (id < pos->ctrl->id && (is_compound & mask) == match) { ref = pos; break; } } } } mutex_unlock(hdl->lock); if (!ref) return -EINVAL; ctrl = ref->ctrl; memset(qc, 0, sizeof(*qc)); if (id >= V4L2_CID_PRIVATE_BASE) qc->id = id; else qc->id = ctrl->id; strscpy(qc->name, ctrl->name, sizeof(qc->name)); qc->flags = user_flags(ctrl); qc->type = ctrl->type; qc->elem_size = ctrl->elem_size; qc->elems = ctrl->elems; qc->nr_of_dims = ctrl->nr_of_dims; memcpy(qc->dims, ctrl->dims, qc->nr_of_dims * sizeof(qc->dims[0])); qc->minimum = ctrl->minimum; qc->maximum = ctrl->maximum; qc->default_value = ctrl->default_value; if (ctrl->type == V4L2_CTRL_TYPE_MENU || ctrl->type == V4L2_CTRL_TYPE_INTEGER_MENU) qc->step = 1; else qc->step = ctrl->step; return 0; } EXPORT_SYMBOL(v4l2_query_ext_ctrl); void v4l2_query_ext_ctrl_to_v4l2_queryctrl(struct v4l2_queryctrl *to, const struct v4l2_query_ext_ctrl *from) { to->id = from->id; to->type = from->type; to->flags = from->flags; strscpy(to->name, from->name, sizeof(to->name)); switch (from->type) { case V4L2_CTRL_TYPE_INTEGER: case V4L2_CTRL_TYPE_BOOLEAN: case V4L2_CTRL_TYPE_MENU: case V4L2_CTRL_TYPE_INTEGER_MENU: case V4L2_CTRL_TYPE_STRING: case V4L2_CTRL_TYPE_BITMASK: to->minimum = from->minimum; to->maximum = from->maximum; to->step = from->step; to->default_value = from->default_value; break; default: to->minimum = 0; to->maximum = 0; to->step = 0; to->default_value = 0; break; } } EXPORT_SYMBOL(v4l2_query_ext_ctrl_to_v4l2_queryctrl); /* Implement VIDIOC_QUERYCTRL */ int v4l2_queryctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_queryctrl *qc) { struct v4l2_query_ext_ctrl qec = { qc->id }; int rc; rc = v4l2_query_ext_ctrl(hdl, &qec); if (rc) return rc; v4l2_query_ext_ctrl_to_v4l2_queryctrl(qc, &qec); return 0; } EXPORT_SYMBOL(v4l2_queryctrl); /* Implement VIDIOC_QUERYMENU */ int v4l2_querymenu(struct v4l2_ctrl_handler *hdl, struct v4l2_querymenu *qm) { struct v4l2_ctrl *ctrl; u32 i = qm->index; ctrl = v4l2_ctrl_find(hdl, qm->id); if (!ctrl) return -EINVAL; qm->reserved = 0; /* Sanity checks */ switch (ctrl->type) { case V4L2_CTRL_TYPE_MENU: if (!ctrl->qmenu) return -EINVAL; break; case V4L2_CTRL_TYPE_INTEGER_MENU: if (!ctrl->qmenu_int) return -EINVAL; break; default: return -EINVAL; } if (i < ctrl->minimum || i > ctrl->maximum) return -EINVAL; /* Use mask to see if this menu item should be skipped */ if (i < BITS_PER_LONG_LONG && (ctrl->menu_skip_mask & BIT_ULL(i))) return -EINVAL; /* Empty menu items should also be skipped */ if (ctrl->type == V4L2_CTRL_TYPE_MENU) { if (!ctrl->qmenu[i] || ctrl->qmenu[i][0] == '\0') return -EINVAL; strscpy(qm->name, ctrl->qmenu[i], sizeof(qm->name)); } else { qm->value = ctrl->qmenu_int[i]; } return 0; } EXPORT_SYMBOL(v4l2_querymenu); /* * VIDIOC_LOG_STATUS helpers */ int v4l2_ctrl_log_status(struct file *file, void *priv) { struct video_device *vfd = video_devdata(file); if (vfd->v4l2_dev) { struct v4l2_fh *vfh = file_to_v4l2_fh(file); v4l2_ctrl_handler_log_status(vfh->ctrl_handler, vfd->v4l2_dev->name); } return 0; } EXPORT_SYMBOL(v4l2_ctrl_log_status); int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd) { v4l2_ctrl_handler_log_status(sd->ctrl_handler, sd->name); return 0; } EXPORT_SYMBOL(v4l2_ctrl_subdev_log_status); /* * VIDIOC_(UN)SUBSCRIBE_EVENT implementation */ static int v4l2_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned int elems) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); if (!ctrl) return -EINVAL; v4l2_ctrl_lock(ctrl); list_add_tail(&sev->node, &ctrl->ev_subs); if (ctrl->type != V4L2_CTRL_TYPE_CTRL_CLASS && (sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) send_initial_event(sev->fh, ctrl); v4l2_ctrl_unlock(ctrl); return 0; } static void v4l2_ctrl_del_event(struct v4l2_subscribed_event *sev) { struct v4l2_ctrl *ctrl = v4l2_ctrl_find(sev->fh->ctrl_handler, sev->id); if (!ctrl) return; v4l2_ctrl_lock(ctrl); list_del(&sev->node); v4l2_ctrl_unlock(ctrl); } void v4l2_ctrl_replace(struct v4l2_event *old, const struct v4l2_event *new) { u32 old_changes = old->u.ctrl.changes; old->u.ctrl = new->u.ctrl; old->u.ctrl.changes |= old_changes; } EXPORT_SYMBOL(v4l2_ctrl_replace); void v4l2_ctrl_merge(const struct v4l2_event *old, struct v4l2_event *new) { new->u.ctrl.changes |= old->u.ctrl.changes; } EXPORT_SYMBOL(v4l2_ctrl_merge); const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops = { .add = v4l2_ctrl_add_event, .del = v4l2_ctrl_del_event, .replace = v4l2_ctrl_replace, .merge = v4l2_ctrl_merge, }; EXPORT_SYMBOL(v4l2_ctrl_sub_ev_ops); int v4l2_ctrl_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { if (sub->type == V4L2_EVENT_CTRL) return v4l2_event_subscribe(fh, sub, 0, &v4l2_ctrl_sub_ev_ops); return -EINVAL; } EXPORT_SYMBOL(v4l2_ctrl_subscribe_event); int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { if (!sd->ctrl_handler) return -EINVAL; return v4l2_ctrl_subscribe_event(fh, sub); } EXPORT_SYMBOL(v4l2_ctrl_subdev_subscribe_event); /* * poll helper */ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait) { struct v4l2_fh *fh = file_to_v4l2_fh(file); poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) return EPOLLPRI; return 0; } EXPORT_SYMBOL(v4l2_ctrl_poll);
2878 2874 1854 1855 1854 2877 2877 2879 2875 2885 1856 2879 2111 2109 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 // SPDX-License-Identifier: GPL-2.0-only /* * Link physical devices with ACPI devices support * * Copyright (c) 2005 David Shaohua Li <shaohua.li@intel.com> * Copyright (c) 2005 Intel Corp. */ #define pr_fmt(fmt) "ACPI: " fmt #include <linux/acpi_iort.h> #include <linux/export.h> #include <linux/init.h> #include <linux/list.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/rwsem.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/platform_device.h> #include "internal.h" static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); #define PHYSICAL_NODE_STRING "physical_node" #define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) int register_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return -ENODEV; if (type && type->match && type->find_companion) { down_write(&bus_type_sem); list_add_tail(&type->list, &bus_type_list); up_write(&bus_type_sem); pr_info("bus type %s registered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(register_acpi_bus_type); int unregister_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) return 0; if (type) { down_write(&bus_type_sem); list_del_init(&type->list); up_write(&bus_type_sem); pr_info("bus type %s unregistered\n", type->name); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(unregister_acpi_bus_type); static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) { struct acpi_bus_type *tmp, *ret = NULL; down_read(&bus_type_sem); list_for_each_entry(tmp, &bus_type_list, list) { if (tmp->match(dev)) { ret = tmp; break; } } up_read(&bus_type_sem); return ret; } #define FIND_CHILD_MIN_SCORE 1 #define FIND_CHILD_MID_SCORE 2 #define FIND_CHILD_MAX_SCORE 3 static int match_any(struct acpi_device *adev, void *not_used) { return 1; } static bool acpi_dev_has_children(struct acpi_device *adev) { return acpi_dev_for_each_child(adev, match_any, NULL) > 0; } static int find_child_checks(struct acpi_device *adev, bool check_children) { unsigned long long sta; acpi_status status; if (check_children && !acpi_dev_has_children(adev)) return -ENODEV; status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); if (status == AE_NOT_FOUND) { /* * Special case: backlight device objects without _STA are * preferred to other objects with the same _ADR value, because * it is more likely that they are actually useful. */ if (adev->pnp.type.backlight) return FIND_CHILD_MID_SCORE; return FIND_CHILD_MIN_SCORE; } if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) return -ENODEV; /* * If the device has a _HID returning a valid ACPI/PNP device ID, it is * better to make it look less attractive here, so that the other device * with the same _ADR value (that may not have a valid device ID) can be * matched going forward. [This means a second spec violation in a row, * so whatever we do here is best effort anyway.] */ if (adev->pnp.type.platform_id) return FIND_CHILD_MIN_SCORE; return FIND_CHILD_MAX_SCORE; } struct find_child_walk_data { struct acpi_device *adev; u64 address; int score; bool check_sta; bool check_children; }; static int check_one_child(struct acpi_device *adev, void *data) { struct find_child_walk_data *wd = data; int score; if (!adev->pnp.type.bus_address || acpi_device_adr(adev) != wd->address) return 0; if (!wd->adev) { /* * This is the first matching object, so save it. If it is not * necessary to look for any other matching objects, stop the * search. */ wd->adev = adev; return !(wd->check_sta || wd->check_children); } /* * There is more than one matching device object with the same _ADR * value. That really is unexpected, so we are kind of beyond the scope * of the spec here. We have to choose which one to return, though. * * First, get the score for the previously found object and terminate * the walk if it is maximum. */ if (!wd->score) { score = find_child_checks(wd->adev, wd->check_children); if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* * Second, if the object that has just been found has a better score, * replace the previously found one with it and terminate the walk if * the new score is maximum. */ score = find_child_checks(adev, wd->check_children); if (score > wd->score) { wd->adev = adev; if (score == FIND_CHILD_MAX_SCORE) return 1; wd->score = score; } /* Continue, because there may be better matches. */ return 0; } static struct acpi_device *acpi_find_child(struct acpi_device *parent, u64 address, bool check_children, bool check_sta) { struct find_child_walk_data wd = { .address = address, .check_children = check_children, .check_sta = check_sta, .adev = NULL, .score = 0, }; if (parent) acpi_dev_for_each_child(parent, check_one_child, &wd); return wd.adev; } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, u64 address, bool check_children) { return acpi_find_child(parent, address, check_children, true); } EXPORT_SYMBOL_GPL(acpi_find_child_device); struct acpi_device *acpi_find_child_by_adr(struct acpi_device *adev, acpi_bus_address adr) { return acpi_find_child(adev, adr, false, false); } EXPORT_SYMBOL_GPL(acpi_find_child_by_adr); static void acpi_physnode_link_name(char *buf, unsigned int node_id) { if (node_id > 0) snprintf(buf, PHYSICAL_NODE_NAME_SIZE, PHYSICAL_NODE_STRING "%u", node_id); else strcpy(buf, PHYSICAL_NODE_STRING); } int acpi_bind_one(struct device *dev, struct acpi_device *acpi_dev) { struct acpi_device_physical_node *physical_node, *pn; char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; struct list_head *physnode_list; unsigned int node_id; int retval = -EINVAL; if (has_acpi_companion(dev)) { if (acpi_dev) { dev_warn(dev, "ACPI companion already set\n"); return -EINVAL; } else { acpi_dev = ACPI_COMPANION(dev); } } if (!acpi_dev) return -EINVAL; acpi_dev_get(acpi_dev); get_device(dev); physical_node = kzalloc(sizeof(*physical_node), GFP_KERNEL); if (!physical_node) { retval = -ENOMEM; goto err; } mutex_lock(&acpi_dev->physical_node_lock); /* * Keep the list sorted by node_id so that the IDs of removed nodes can * be recycled easily. */ physnode_list = &acpi_dev->physical_node_list; node_id = 0; list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { /* Sanity check. */ if (pn->dev == dev) { mutex_unlock(&acpi_dev->physical_node_lock); dev_warn(dev, "Already associated with ACPI node\n"); kfree(physical_node); if (ACPI_COMPANION(dev) != acpi_dev) goto err; put_device(dev); acpi_dev_put(acpi_dev); return 0; } if (pn->node_id == node_id) { physnode_list = &pn->node; node_id++; } } physical_node->node_id = node_id; physical_node->dev = dev; list_add(&physical_node->node, physnode_list); acpi_dev->physical_node_count++; if (!has_acpi_companion(dev)) ACPI_COMPANION_SET(dev, acpi_dev); acpi_physnode_link_name(physical_node_name, node_id); retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, physical_node_name); if (retval) dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n", physical_node_name, retval); retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj, "firmware_node"); if (retval) dev_err(dev, "Failed to create link firmware_node (%d)\n", retval); mutex_unlock(&acpi_dev->physical_node_lock); if (acpi_dev->wakeup.flags.valid) device_set_wakeup_capable(dev, true); return 0; err: ACPI_COMPANION_SET(dev, NULL); put_device(dev); acpi_dev_put(acpi_dev); return retval; } EXPORT_SYMBOL_GPL(acpi_bind_one); int acpi_unbind_one(struct device *dev) { struct acpi_device *acpi_dev = ACPI_COMPANION(dev); struct acpi_device_physical_node *entry; if (!acpi_dev) return 0; mutex_lock(&acpi_dev->physical_node_lock); list_for_each_entry(entry, &acpi_dev->physical_node_list, node) if (entry->dev == dev) { char physnode_name[PHYSICAL_NODE_NAME_SIZE]; list_del(&entry->node); acpi_dev->physical_node_count--; acpi_physnode_link_name(physnode_name, entry->node_id); sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name); sysfs_remove_link(&dev->kobj, "firmware_node"); ACPI_COMPANION_SET(dev, NULL); /* Drop references taken by acpi_bind_one(). */ put_device(dev); acpi_dev_put(acpi_dev); kfree(entry); break; } mutex_unlock(&acpi_dev->physical_node_lock); return 0; } EXPORT_SYMBOL_GPL(acpi_unbind_one); void acpi_device_notify(struct device *dev) { struct acpi_device *adev; int ret; ret = acpi_bind_one(dev, NULL); if (ret) { struct acpi_bus_type *type = acpi_get_bus_type(dev); if (!type) goto err; adev = type->find_companion(dev); if (!adev) { dev_dbg(dev, "ACPI companion not found\n"); goto err; } ret = acpi_bind_one(dev, adev); if (ret) goto err; if (type->setup) { type->setup(dev); goto done; } } else { adev = ACPI_COMPANION(dev); if (dev_is_pci(dev)) { pci_acpi_setup(dev, adev); goto done; } else if (dev_is_platform(dev)) { acpi_configure_pmsi_domain(dev); } } if (adev->handler && adev->handler->bind) adev->handler->bind(dev); done: acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n", dev_name(dev)); return; err: dev_dbg(dev, "No ACPI support\n"); } void acpi_device_notify_remove(struct device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev); if (!adev) return; if (dev_is_pci(dev)) pci_acpi_cleanup(dev, adev); else if (adev->handler && adev->handler->unbind) adev->handler->unbind(dev); acpi_unbind_one(dev); }
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 // SPDX-License-Identifier: GPL-2.0-or-later /* * userdlm.c * * Code which implements the kernel side of a minimal userspace * interface to our DLM. * * Many of the functions here are pared down versions of dlmglue.c * functions. * * Copyright (C) 2003, 2004 Oracle. All rights reserved. */ #include <linux/signal.h> #include <linux/sched/signal.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/crc32.h> #include "../ocfs2_lockingver.h" #include "../stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "../cluster/masklog.h" static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb) { return container_of(lksb, struct user_lock_res, l_lksb); } static inline int user_check_wait_flag(struct user_lock_res *lockres, int flag) { int ret; spin_lock(&lockres->l_lock); ret = lockres->l_flags & flag; spin_unlock(&lockres->l_lock); return ret; } static inline void user_wait_on_busy_lock(struct user_lock_res *lockres) { wait_event(lockres->l_event, !user_check_wait_flag(lockres, USER_LOCK_BUSY)); } static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) { wait_event(lockres->l_event, !user_check_wait_flag(lockres, USER_LOCK_BLOCKED)); } /* I heart container_of... */ static inline struct ocfs2_cluster_connection * cluster_connection_from_user_lockres(struct user_lock_res *lockres) { struct dlmfs_inode_private *ip; ip = container_of(lockres, struct dlmfs_inode_private, ip_lockres); return ip->ip_conn; } static struct inode * user_dlm_inode_from_user_lockres(struct user_lock_res *lockres) { struct dlmfs_inode_private *ip; ip = container_of(lockres, struct dlmfs_inode_private, ip_lockres); return &ip->ip_vfs_inode; } static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) { spin_lock(&lockres->l_lock); lockres->l_flags &= ~USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); } #define user_log_dlm_error(_func, _stat, _lockres) do { \ mlog(ML_ERROR, "Dlm error %d while calling %s on " \ "resource %.*s\n", _stat, _func, \ _lockres->l_namelen, _lockres->l_name); \ } while (0) /* WARNING: This function lives in a world where the only three lock * levels are EX, PR, and NL. It *will* have to be adjusted when more * lock types are added. */ static inline int user_highest_compat_lock_level(int level) { int new_level = DLM_LOCK_EX; if (level == DLM_LOCK_EX) new_level = DLM_LOCK_NL; else if (level == DLM_LOCK_PR) new_level = DLM_LOCK_PR; return new_level; } static void user_ast(struct ocfs2_dlm_lksb *lksb) { struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); int status; mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n", lockres->l_namelen, lockres->l_name, lockres->l_level, lockres->l_requested); spin_lock(&lockres->l_lock); status = ocfs2_dlm_lock_status(&lockres->l_lksb); if (status) { mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n", status, lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); return; } mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV, "Lockres %.*s, requested ivmode. flags 0x%x\n", lockres->l_namelen, lockres->l_name, lockres->l_flags); /* we're downconverting. */ if (lockres->l_requested < lockres->l_level) { if (lockres->l_requested <= user_highest_compat_lock_level(lockres->l_blocking)) { lockres->l_blocking = DLM_LOCK_NL; lockres->l_flags &= ~USER_LOCK_BLOCKED; } } lockres->l_level = lockres->l_requested; lockres->l_requested = DLM_LOCK_IV; lockres->l_flags |= USER_LOCK_ATTACHED; lockres->l_flags &= ~USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); wake_up(&lockres->l_event); } static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) { struct inode *inode; inode = user_dlm_inode_from_user_lockres(lockres); if (!igrab(inode)) BUG(); } static void user_dlm_unblock_lock(struct work_struct *work); static void __user_dlm_queue_lockres(struct user_lock_res *lockres) { if (!(lockres->l_flags & USER_LOCK_QUEUED)) { user_dlm_grab_inode_ref(lockres); INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); queue_work(user_dlm_worker, &lockres->l_work); lockres->l_flags |= USER_LOCK_QUEUED; } } static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) { int queue = 0; if (!(lockres->l_flags & USER_LOCK_BLOCKED)) return; switch (lockres->l_blocking) { case DLM_LOCK_EX: if (!lockres->l_ex_holders && !lockres->l_ro_holders) queue = 1; break; case DLM_LOCK_PR: if (!lockres->l_ex_holders) queue = 1; break; default: BUG(); } if (queue) __user_dlm_queue_lockres(lockres); } static void user_bast(struct ocfs2_dlm_lksb *lksb, int level) { struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n", lockres->l_namelen, lockres->l_name, level, lockres->l_level); spin_lock(&lockres->l_lock); lockres->l_flags |= USER_LOCK_BLOCKED; if (level > lockres->l_blocking) lockres->l_blocking = level; __user_dlm_queue_lockres(lockres); spin_unlock(&lockres->l_lock); wake_up(&lockres->l_event); } static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status) { struct user_lock_res *lockres = user_lksb_to_lock_res(lksb); mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n", lockres->l_namelen, lockres->l_name, lockres->l_flags); if (status) mlog(ML_ERROR, "dlm returns status %d\n", status); spin_lock(&lockres->l_lock); /* The teardown flag gets set early during the unlock process, * so test the cancel flag to make sure that this ast isn't * for a concurrent cancel. */ if (lockres->l_flags & USER_LOCK_IN_TEARDOWN && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) { lockres->l_level = DLM_LOCK_IV; } else if (status == DLM_CANCELGRANT) { /* We tried to cancel a convert request, but it was * already granted. Don't clear the busy flag - the * ast should've done this already. */ BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); lockres->l_flags &= ~USER_LOCK_IN_CANCEL; goto out_noclear; } else { BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL)); /* Cancel succeeded, we want to re-queue */ lockres->l_requested = DLM_LOCK_IV; /* cancel an * upconvert * request. */ lockres->l_flags &= ~USER_LOCK_IN_CANCEL; /* we want the unblock thread to look at it again * now. */ if (lockres->l_flags & USER_LOCK_BLOCKED) __user_dlm_queue_lockres(lockres); } lockres->l_flags &= ~USER_LOCK_BUSY; out_noclear: spin_unlock(&lockres->l_lock); wake_up(&lockres->l_event); } /* * This is the userdlmfs locking protocol version. * * See fs/ocfs2/dlmglue.c for more details on locking versions. */ static struct ocfs2_locking_protocol user_dlm_lproto = { .lp_max_version = { .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, }, .lp_lock_ast = user_ast, .lp_blocking_ast = user_bast, .lp_unlock_ast = user_unlock_ast, }; static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) { struct inode *inode; inode = user_dlm_inode_from_user_lockres(lockres); iput(inode); } static void user_dlm_unblock_lock(struct work_struct *work) { int new_level, status; struct user_lock_res *lockres = container_of(work, struct user_lock_res, l_work); struct ocfs2_cluster_connection *conn = cluster_connection_from_user_lockres(lockres); mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED), "Lockres %.*s, flags 0x%x\n", lockres->l_namelen, lockres->l_name, lockres->l_flags); /* notice that we don't clear USER_LOCK_BLOCKED here. If it's * set, we want user_ast clear it. */ lockres->l_flags &= ~USER_LOCK_QUEUED; /* It's valid to get here and no longer be blocked - if we get * several basts in a row, we might be queued by the first * one, the unblock thread might run and clear the queued * flag, and finally we might get another bast which re-queues * us before our ast for the downconvert is called. */ if (!(lockres->l_flags & USER_LOCK_BLOCKED)) { mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n", lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n", lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } if (lockres->l_flags & USER_LOCK_BUSY) { if (lockres->l_flags & USER_LOCK_IN_CANCEL) { mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n", lockres->l_namelen, lockres->l_name); spin_unlock(&lockres->l_lock); goto drop_ref; } lockres->l_flags |= USER_LOCK_IN_CANCEL; spin_unlock(&lockres->l_lock); status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_CANCEL); if (status) user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto drop_ref; } /* If there are still incompat holders, we can exit safely * without worrying about re-queueing this lock as that will * happen on the last call to user_cluster_unlock. */ if ((lockres->l_blocking == DLM_LOCK_EX) && (lockres->l_ex_holders || lockres->l_ro_holders)) { spin_unlock(&lockres->l_lock); mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n", lockres->l_namelen, lockres->l_name, lockres->l_ex_holders, lockres->l_ro_holders); goto drop_ref; } if ((lockres->l_blocking == DLM_LOCK_PR) && lockres->l_ex_holders) { spin_unlock(&lockres->l_lock); mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n", lockres->l_namelen, lockres->l_name, lockres->l_ex_holders); goto drop_ref; } /* yay, we can downconvert now. */ new_level = user_highest_compat_lock_level(lockres->l_blocking); lockres->l_requested = new_level; lockres->l_flags |= USER_LOCK_BUSY; mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n", lockres->l_namelen, lockres->l_name, lockres->l_level, new_level); spin_unlock(&lockres->l_lock); /* need lock downconvert request now... */ status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb, DLM_LKF_CONVERT|DLM_LKF_VALBLK, lockres->l_name, lockres->l_namelen); if (status) { user_log_dlm_error("ocfs2_dlm_lock", status, lockres); user_recover_from_dlm_error(lockres); } drop_ref: user_dlm_drop_inode_ref(lockres); } static inline void user_dlm_inc_holders(struct user_lock_res *lockres, int level) { switch(level) { case DLM_LOCK_EX: lockres->l_ex_holders++; break; case DLM_LOCK_PR: lockres->l_ro_holders++; break; default: BUG(); } } /* predict what lock level we'll be dropping down to on behalf * of another node, and return true if the currently wanted * level will be compatible with it. */ static inline int user_may_continue_on_blocked_lock(struct user_lock_res *lockres, int wanted) { BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); return wanted <= user_highest_compat_lock_level(lockres->l_blocking); } int user_dlm_cluster_lock(struct user_lock_res *lockres, int level, int lkm_flags) { int status, local_flags; struct ocfs2_cluster_connection *conn = cluster_connection_from_user_lockres(lockres); if (level != DLM_LOCK_EX && level != DLM_LOCK_PR) { mlog(ML_ERROR, "lockres %.*s: invalid request!\n", lockres->l_namelen, lockres->l_name); status = -EINVAL; goto bail; } mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n", lockres->l_namelen, lockres->l_name, level, lkm_flags); again: if (signal_pending(current)) { status = -ERESTARTSYS; goto bail; } spin_lock(&lockres->l_lock); if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { spin_unlock(&lockres->l_lock); status = -EAGAIN; goto bail; } /* We only compare against the currently granted level * here. If the lock is blocked waiting on a downconvert, * we'll get caught below. */ if ((lockres->l_flags & USER_LOCK_BUSY) && (level > lockres->l_level)) { /* is someone sitting in dlm_lock? If so, wait on * them. */ spin_unlock(&lockres->l_lock); user_wait_on_busy_lock(lockres); goto again; } if ((lockres->l_flags & USER_LOCK_BLOCKED) && (!user_may_continue_on_blocked_lock(lockres, level))) { /* is the lock is currently blocked on behalf of * another node */ spin_unlock(&lockres->l_lock); user_wait_on_blocked_lock(lockres); goto again; } if (level > lockres->l_level) { local_flags = lkm_flags | DLM_LKF_VALBLK; if (lockres->l_level != DLM_LOCK_IV) local_flags |= DLM_LKF_CONVERT; lockres->l_requested = level; lockres->l_flags |= USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); BUG_ON(level == DLM_LOCK_IV); BUG_ON(level == DLM_LOCK_NL); /* call dlm_lock to upgrade lock now */ status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb, local_flags, lockres->l_name, lockres->l_namelen); if (status) { if ((lkm_flags & DLM_LKF_NOQUEUE) && (status != -EAGAIN)) user_log_dlm_error("ocfs2_dlm_lock", status, lockres); user_recover_from_dlm_error(lockres); goto bail; } user_wait_on_busy_lock(lockres); goto again; } user_dlm_inc_holders(lockres, level); spin_unlock(&lockres->l_lock); status = 0; bail: return status; } static inline void user_dlm_dec_holders(struct user_lock_res *lockres, int level) { switch(level) { case DLM_LOCK_EX: BUG_ON(!lockres->l_ex_holders); lockres->l_ex_holders--; break; case DLM_LOCK_PR: BUG_ON(!lockres->l_ro_holders); lockres->l_ro_holders--; break; default: BUG(); } } void user_dlm_cluster_unlock(struct user_lock_res *lockres, int level) { if (level != DLM_LOCK_EX && level != DLM_LOCK_PR) { mlog(ML_ERROR, "lockres %.*s: invalid request!\n", lockres->l_namelen, lockres->l_name); return; } spin_lock(&lockres->l_lock); user_dlm_dec_holders(lockres, level); __user_dlm_cond_queue_lockres(lockres); spin_unlock(&lockres->l_lock); } void user_dlm_write_lvb(struct inode *inode, const char *val, unsigned int len) { struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; char *lvb; BUG_ON(len > DLM_LVB_LEN); spin_lock(&lockres->l_lock); BUG_ON(lockres->l_level < DLM_LOCK_EX); lvb = ocfs2_dlm_lvb(&lockres->l_lksb); memcpy(lvb, val, len); spin_unlock(&lockres->l_lock); } bool user_dlm_read_lvb(struct inode *inode, char *val) { struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; char *lvb; bool ret = true; spin_lock(&lockres->l_lock); BUG_ON(lockres->l_level < DLM_LOCK_PR); if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) { lvb = ocfs2_dlm_lvb(&lockres->l_lksb); memcpy(val, lvb, DLM_LVB_LEN); } else ret = false; spin_unlock(&lockres->l_lock); return ret; } void user_dlm_lock_res_init(struct user_lock_res *lockres, struct dentry *dentry) { memset(lockres, 0, sizeof(*lockres)); spin_lock_init(&lockres->l_lock); init_waitqueue_head(&lockres->l_event); lockres->l_level = DLM_LOCK_IV; lockres->l_requested = DLM_LOCK_IV; lockres->l_blocking = DLM_LOCK_IV; /* should have been checked before getting here. */ BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); memcpy(lockres->l_name, dentry->d_name.name, dentry->d_name.len); lockres->l_namelen = dentry->d_name.len; } int user_dlm_destroy_lock(struct user_lock_res *lockres) { int status = -EBUSY; struct ocfs2_cluster_connection *conn = cluster_connection_from_user_lockres(lockres); mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name); spin_lock(&lockres->l_lock); if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { spin_unlock(&lockres->l_lock); goto bail; } lockres->l_flags |= USER_LOCK_IN_TEARDOWN; while (lockres->l_flags & USER_LOCK_BUSY) { spin_unlock(&lockres->l_lock); user_wait_on_busy_lock(lockres); spin_lock(&lockres->l_lock); } if (lockres->l_ro_holders || lockres->l_ex_holders) { lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; spin_unlock(&lockres->l_lock); goto bail; } status = 0; if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { /* * lock is never requested, leave USER_LOCK_IN_TEARDOWN set * to avoid new lock request coming in. */ spin_unlock(&lockres->l_lock); goto bail; } lockres->l_flags |= USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK); if (status) { spin_lock(&lockres->l_lock); lockres->l_flags &= ~USER_LOCK_IN_TEARDOWN; lockres->l_flags &= ~USER_LOCK_BUSY; spin_unlock(&lockres->l_lock); user_log_dlm_error("ocfs2_dlm_unlock", status, lockres); goto bail; } user_wait_on_busy_lock(lockres); status = 0; bail: return status; } static void user_dlm_recovery_handler_noop(int node_num, void *recovery_data) { /* We ignore recovery events */ return; } void user_dlm_set_locking_protocol(void) { ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version); } struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name) { int rc; struct ocfs2_cluster_connection *conn; rc = ocfs2_cluster_connect_agnostic(name->name, name->len, &user_dlm_lproto, user_dlm_recovery_handler_noop, NULL, &conn); if (rc) mlog_errno(rc); return rc ? ERR_PTR(rc) : conn; } void user_dlm_unregister(struct ocfs2_cluster_connection *conn) { ocfs2_cluster_disconnect(conn, 0); }
380 380 381 379 378 339 338 110 109 340 396 59 59 2 398 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 // SPDX-License-Identifier: GPL-2.0 /* * linux/drivers/base/map.c * * (C) Copyright Al Viro 2002,2003 * * NOTE: data structure needs to be changed. It works, but for large dev_t * it will be too slow. It is isolated, though, so these changes will be * local to that file. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/kdev_t.h> #include <linux/kobject.h> #include <linux/kobj_map.h> struct kobj_map { struct probe { struct probe *next; dev_t dev; unsigned long range; struct module *owner; kobj_probe_t *get; int (*lock)(dev_t, void *); void *data; } *probes[255]; struct mutex *lock; }; int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, struct module *module, kobj_probe_t *probe, int (*lock)(dev_t, void *), void *data) { unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; unsigned int index = MAJOR(dev); unsigned int i; struct probe *p; if (n > 255) n = 255; p = kmalloc_array(n, sizeof(struct probe), GFP_KERNEL); if (p == NULL) return -ENOMEM; for (i = 0; i < n; i++, p++) { p->owner = module; p->get = probe; p->lock = lock; p->dev = dev; p->range = range; p->data = data; } mutex_lock(domain->lock); for (i = 0, p -= n; i < n; i++, p++, index++) { struct probe **s = &domain->probes[index % 255]; while (*s && (*s)->range < range) s = &(*s)->next; p->next = *s; *s = p; } mutex_unlock(domain->lock); return 0; } void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) { unsigned int n = MAJOR(dev + range - 1) - MAJOR(dev) + 1; unsigned int index = MAJOR(dev); unsigned int i; struct probe *found = NULL; if (n > 255) n = 255; mutex_lock(domain->lock); for (i = 0; i < n; i++, index++) { struct probe **s; for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { struct probe *p = *s; if (p->dev == dev && p->range == range) { *s = p->next; if (!found) found = p; break; } } } mutex_unlock(domain->lock); kfree(found); } struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) { struct kobject *kobj; struct probe *p; unsigned long best = ~0UL; retry: mutex_lock(domain->lock); for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) { struct kobject *(*probe)(dev_t, int *, void *); struct module *owner; void *data; if (p->dev > dev || p->dev + p->range - 1 < dev) continue; if (p->range - 1 >= best) break; if (!try_module_get(p->owner)) continue; owner = p->owner; data = p->data; probe = p->get; best = p->range - 1; *index = dev - p->dev; if (p->lock && p->lock(dev, data) < 0) { module_put(owner); continue; } mutex_unlock(domain->lock); kobj = probe(dev, index, data); /* Currently ->owner protects _only_ ->probe() itself. */ module_put(owner); if (kobj) return kobj; goto retry; } mutex_unlock(domain->lock); return NULL; } struct kobj_map *kobj_map_init(kobj_probe_t *base_probe, struct mutex *lock) { struct kobj_map *p = kmalloc(sizeof(struct kobj_map), GFP_KERNEL); struct probe *base = kzalloc(sizeof(*base), GFP_KERNEL); int i; if ((p == NULL) || (base == NULL)) { kfree(p); kfree(base); return NULL; } base->dev = 1; base->range = ~0; base->get = base_probe; for (i = 0; i < 255; i++) p->probes[i] = base; p->lock = lock; return p; }
14 14 14 15 15 14 14 13 13 15 12 14 14 15 15 24 24 24 8 16 16 16 24 24 24 24 4 4 23 23 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 // SPDX-License-Identifier: GPL-2.0 // rc-ir-raw.c - handle IR pulse/space events // // Copyright (C) 2010 by Mauro Carvalho Chehab #include <linux/export.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/kmod.h> #include <linux/sched.h> #include "rc-core-priv.h" /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */ static LIST_HEAD(ir_raw_client_list); /* Used to handle IR raw handler extensions */ DEFINE_MUTEX(ir_raw_handler_lock); static LIST_HEAD(ir_raw_handler_list); static atomic64_t available_protocols = ATOMIC64_INIT(0); static int ir_raw_event_thread(void *data) { struct ir_raw_event ev; struct ir_raw_handler *handler; struct ir_raw_event_ctrl *raw = data; struct rc_dev *dev = raw->dev; while (1) { mutex_lock(&ir_raw_handler_lock); while (kfifo_out(&raw->kfifo, &ev, 1)) { if (is_timing_event(ev)) { if (ev.duration == 0) dev_warn_once(&dev->dev, "nonsensical timing event of duration 0"); if (is_timing_event(raw->prev_ev) && !is_transition(&ev, &raw->prev_ev)) dev_warn_once(&dev->dev, "two consecutive events of type %s", TO_STR(ev.pulse)); } list_for_each_entry(handler, &ir_raw_handler_list, list) if (dev->enabled_protocols & handler->protocols || !handler->protocols) handler->decode(dev, ev); lirc_raw_event(dev, ev); raw->prev_ev = ev; } mutex_unlock(&ir_raw_handler_lock); set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); break; } else if (!kfifo_is_empty(&raw->kfifo)) set_current_state(TASK_RUNNING); schedule(); } return 0; } /** * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders * @dev: the struct rc_dev device descriptor * @ev: the struct ir_raw_event descriptor of the pulse/space * * This routine (which may be called from an interrupt context) stores a * pulse/space duration for the raw ir decoding state machines. Pulses are * signalled as positive values and spaces as negative values. A zero value * will reset the decoding state machines. */ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev) { if (!dev->raw) return -EINVAL; dev_dbg(&dev->dev, "sample: (%05dus %s)\n", ev->duration, TO_STR(ev->pulse)); if (!kfifo_put(&dev->raw->kfifo, *ev)) { dev_err(&dev->dev, "IR event FIFO is full!\n"); return -ENOSPC; } return 0; } EXPORT_SYMBOL_GPL(ir_raw_event_store); /** * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space * @dev: the struct rc_dev device descriptor * @pulse: true for pulse, false for space * * This routine (which may be called from an interrupt context) is used to * store the beginning of an ir pulse or space (or the start/end of ir * reception) for the raw ir decoding state machines. This is used by * hardware which does not provide durations directly but only interrupts * (or similar events) on state change. */ int ir_raw_event_store_edge(struct rc_dev *dev, bool pulse) { ktime_t now; struct ir_raw_event ev = {}; if (!dev->raw) return -EINVAL; now = ktime_get(); ev.duration = ktime_to_us(ktime_sub(now, dev->raw->last_event)); ev.pulse = !pulse; return ir_raw_event_store_with_timeout(dev, &ev); } EXPORT_SYMBOL_GPL(ir_raw_event_store_edge); /* * ir_raw_event_store_with_timeout() - pass a pulse/space duration to the raw * ir decoders, schedule decoding and * timeout * @dev: the struct rc_dev device descriptor * @ev: the struct ir_raw_event descriptor of the pulse/space * * This routine (which may be called from an interrupt context) stores a * pulse/space duration for the raw ir decoding state machines, schedules * decoding and generates a timeout. */ int ir_raw_event_store_with_timeout(struct rc_dev *dev, struct ir_raw_event *ev) { ktime_t now; int rc = 0; if (!dev->raw) return -EINVAL; now = ktime_get(); spin_lock(&dev->raw->edge_spinlock); rc = ir_raw_event_store(dev, ev); dev->raw->last_event = now; /* timer could be set to timeout (125ms by default) */ if (!timer_pending(&dev->raw->edge_handle) || time_after(dev->raw->edge_handle.expires, jiffies + msecs_to_jiffies(15))) { mod_timer(&dev->raw->edge_handle, jiffies + msecs_to_jiffies(15)); } spin_unlock(&dev->raw->edge_spinlock); return rc; } EXPORT_SYMBOL_GPL(ir_raw_event_store_with_timeout); /** * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing * @dev: the struct rc_dev device descriptor * @ev: the event that has occurred * * This routine (which may be called from an interrupt context) works * in similar manner to ir_raw_event_store_edge. * This routine is intended for devices with limited internal buffer * It automerges samples of same type, and handles timeouts. Returns non-zero * if the event was added, and zero if the event was ignored due to idle * processing. */ int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev) { if (!dev->raw) return -EINVAL; /* Ignore spaces in idle mode */ if (dev->idle && !ev->pulse) return 0; else if (dev->idle) ir_raw_event_set_idle(dev, false); if (!dev->raw->this_ev.duration) dev->raw->this_ev = *ev; else if (ev->pulse == dev->raw->this_ev.pulse) dev->raw->this_ev.duration += ev->duration; else { ir_raw_event_store(dev, &dev->raw->this_ev); dev->raw->this_ev = *ev; } /* Enter idle mode if necessary */ if (!ev->pulse && dev->timeout && dev->raw->this_ev.duration >= dev->timeout) ir_raw_event_set_idle(dev, true); return 1; } EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter); /** * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not * @dev: the struct rc_dev device descriptor * @idle: whether the device is idle or not */ void ir_raw_event_set_idle(struct rc_dev *dev, bool idle) { if (!dev->raw) return; dev_dbg(&dev->dev, "%s idle mode\n", idle ? "enter" : "leave"); if (idle) { dev->raw->this_ev.timeout = true; ir_raw_event_store(dev, &dev->raw->this_ev); dev->raw->this_ev = (struct ir_raw_event) {}; } if (dev->s_idle) dev->s_idle(dev, idle); dev->idle = idle; } EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); /** * ir_raw_event_handle() - schedules the decoding of stored ir data * @dev: the struct rc_dev device descriptor * * This routine will tell rc-core to start decoding stored ir data. */ void ir_raw_event_handle(struct rc_dev *dev) { if (!dev->raw || !dev->raw->thread) return; wake_up_process(dev->raw->thread); } EXPORT_SYMBOL_GPL(ir_raw_event_handle); /* used internally by the sysfs interface */ u64 ir_raw_get_allowed_protocols(void) { return atomic64_read(&available_protocols); } static int change_protocol(struct rc_dev *dev, u64 *rc_proto) { struct ir_raw_handler *handler; u32 timeout = 0; mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) { if (!(dev->enabled_protocols & handler->protocols) && (*rc_proto & handler->protocols) && handler->raw_register) handler->raw_register(dev); if ((dev->enabled_protocols & handler->protocols) && !(*rc_proto & handler->protocols) && handler->raw_unregister) handler->raw_unregister(dev); } mutex_unlock(&ir_raw_handler_lock); if (!dev->max_timeout) return 0; mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) { if (handler->protocols & *rc_proto) { if (timeout < handler->min_timeout) timeout = handler->min_timeout; } } mutex_unlock(&ir_raw_handler_lock); if (timeout == 0) timeout = IR_DEFAULT_TIMEOUT; else timeout += MS_TO_US(10); if (timeout < dev->min_timeout) timeout = dev->min_timeout; else if (timeout > dev->max_timeout) timeout = dev->max_timeout; if (dev->s_timeout) dev->s_timeout(dev, timeout); else dev->timeout = timeout; return 0; } static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols) { mutex_lock(&dev->lock); dev->enabled_protocols &= ~protocols; mutex_unlock(&dev->lock); } /** * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation. * @ev: Pointer to pointer to next free event. *@ev is incremented for * each raw event filled. * @max: Maximum number of raw events to fill. * @timings: Manchester modulation timings. * @n: Number of bits of data. * @data: Data bits to encode. * * Encodes the @n least significant bits of @data using Manchester (bi-phase) * modulation with the timing characteristics described by @timings, writing up * to @max raw IR events using the *@ev pointer. * * Returns: 0 on success. * -ENOBUFS if there isn't enough space in the array to fit the * full encoded data. In this case all @max events will have been * written. */ int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max, const struct ir_raw_timings_manchester *timings, unsigned int n, u64 data) { bool need_pulse; u64 i; int ret = -ENOBUFS; i = BIT_ULL(n - 1); if (timings->leader_pulse) { if (!max--) return ret; init_ir_raw_event_duration((*ev), 1, timings->leader_pulse); if (timings->leader_space) { if (!max--) return ret; init_ir_raw_event_duration(++(*ev), 0, timings->leader_space); } } else { /* continue existing signal */ --(*ev); } /* from here on *ev will point to the last event rather than the next */ while (n && i > 0) { need_pulse = !(data & i); if (timings->invert) need_pulse = !need_pulse; if (need_pulse == !!(*ev)->pulse) { (*ev)->duration += timings->clock; } else { if (!max--) goto nobufs; init_ir_raw_event_duration(++(*ev), need_pulse, timings->clock); } if (!max--) goto nobufs; init_ir_raw_event_duration(++(*ev), !need_pulse, timings->clock); i >>= 1; } if (timings->trailer_space) { if (!(*ev)->pulse) (*ev)->duration += timings->trailer_space; else if (!max--) goto nobufs; else init_ir_raw_event_duration(++(*ev), 0, timings->trailer_space); } ret = 0; nobufs: /* point to the next event rather than last event before returning */ ++(*ev); return ret; } EXPORT_SYMBOL(ir_raw_gen_manchester); /** * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation. * @ev: Pointer to pointer to next free event. *@ev is incremented for * each raw event filled. * @max: Maximum number of raw events to fill. * @timings: Pulse distance modulation timings. * @n: Number of bits of data. * @data: Data bits to encode. * * Encodes the @n least significant bits of @data using pulse-distance * modulation with the timing characteristics described by @timings, writing up * to @max raw IR events using the *@ev pointer. * * Returns: 0 on success. * -ENOBUFS if there isn't enough space in the array to fit the * full encoded data. In this case all @max events will have been * written. */ int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max, const struct ir_raw_timings_pd *timings, unsigned int n, u64 data) { int i; int ret; unsigned int space; if (timings->header_pulse) { ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse, timings->header_space); if (ret) return ret; } if (timings->msb_first) { for (i = n - 1; i >= 0; --i) { space = timings->bit_space[(data >> i) & 1]; ret = ir_raw_gen_pulse_space(ev, &max, timings->bit_pulse, space); if (ret) return ret; } } else { for (i = 0; i < n; ++i, data >>= 1) { space = timings->bit_space[data & 1]; ret = ir_raw_gen_pulse_space(ev, &max, timings->bit_pulse, space); if (ret) return ret; } } ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse, timings->trailer_space); return ret; } EXPORT_SYMBOL(ir_raw_gen_pd); /** * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation. * @ev: Pointer to pointer to next free event. *@ev is incremented for * each raw event filled. * @max: Maximum number of raw events to fill. * @timings: Pulse distance modulation timings. * @n: Number of bits of data. * @data: Data bits to encode. * * Encodes the @n least significant bits of @data using space-distance * modulation with the timing characteristics described by @timings, writing up * to @max raw IR events using the *@ev pointer. * * Returns: 0 on success. * -ENOBUFS if there isn't enough space in the array to fit the * full encoded data. In this case all @max events will have been * written. */ int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max, const struct ir_raw_timings_pl *timings, unsigned int n, u64 data) { int i; int ret = -ENOBUFS; unsigned int pulse; if (!max--) return ret; init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse); if (timings->msb_first) { for (i = n - 1; i >= 0; --i) { if (!max--) return ret; init_ir_raw_event_duration((*ev)++, 0, timings->bit_space); if (!max--) return ret; pulse = timings->bit_pulse[(data >> i) & 1]; init_ir_raw_event_duration((*ev)++, 1, pulse); } } else { for (i = 0; i < n; ++i, data >>= 1) { if (!max--) return ret; init_ir_raw_event_duration((*ev)++, 0, timings->bit_space); if (!max--) return ret; pulse = timings->bit_pulse[data & 1]; init_ir_raw_event_duration((*ev)++, 1, pulse); } } if (!max--) return ret; init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space); return 0; } EXPORT_SYMBOL(ir_raw_gen_pl); /** * ir_raw_encode_scancode() - Encode a scancode as raw events * * @protocol: protocol * @scancode: scancode filter describing a single scancode * @events: array of raw events to write into * @max: max number of raw events * * Attempts to encode the scancode as raw events. * * Returns: The number of events written. * -ENOBUFS if there isn't enough space in the array to fit the * encoding. In this case all @max events will have been written. * -EINVAL if the scancode is ambiguous or invalid, or if no * compatible encoder was found. */ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max) { struct ir_raw_handler *handler; int ret = -EINVAL; u64 mask = 1ULL << protocol; ir_raw_load_modules(&mask); mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) { if (handler->protocols & mask && handler->encode) { ret = handler->encode(protocol, scancode, events, max); if (ret >= 0 || ret == -ENOBUFS) break; } } mutex_unlock(&ir_raw_handler_lock); return ret; } EXPORT_SYMBOL(ir_raw_encode_scancode); /** * ir_raw_edge_handle() - Handle ir_raw_event_store_edge() processing * * @t: timer_list * * This callback is armed by ir_raw_event_store_edge(). It does two things: * first of all, rather than calling ir_raw_event_handle() for each * edge and waking up the rc thread, 15 ms after the first edge * ir_raw_event_handle() is called. Secondly, generate a timeout event * no more IR is received after the rc_dev timeout. */ static void ir_raw_edge_handle(struct timer_list *t) { struct ir_raw_event_ctrl *raw = timer_container_of(raw, t, edge_handle); struct rc_dev *dev = raw->dev; unsigned long flags; ktime_t interval; spin_lock_irqsave(&dev->raw->edge_spinlock, flags); interval = ktime_sub(ktime_get(), dev->raw->last_event); if (ktime_to_us(interval) >= dev->timeout) { struct ir_raw_event ev = { .timeout = true, .duration = ktime_to_us(interval) }; ir_raw_event_store(dev, &ev); } else { mod_timer(&dev->raw->edge_handle, jiffies + usecs_to_jiffies(dev->timeout - ktime_to_us(interval))); } spin_unlock_irqrestore(&dev->raw->edge_spinlock, flags); ir_raw_event_handle(dev); } /** * ir_raw_encode_carrier() - Get carrier used for protocol * * @protocol: protocol * * Attempts to find the carrier for the specified protocol * * Returns: The carrier in Hz * -EINVAL if the protocol is invalid, or if no * compatible encoder was found. */ int ir_raw_encode_carrier(enum rc_proto protocol) { struct ir_raw_handler *handler; int ret = -EINVAL; u64 mask = BIT_ULL(protocol); mutex_lock(&ir_raw_handler_lock); list_for_each_entry(handler, &ir_raw_handler_list, list) { if (handler->protocols & mask && handler->encode) { ret = handler->carrier; break; } } mutex_unlock(&ir_raw_handler_lock); return ret; } EXPORT_SYMBOL(ir_raw_encode_carrier); /* * Used to (un)register raw event clients */ int ir_raw_event_prepare(struct rc_dev *dev) { if (!dev) return -EINVAL; dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL); if (!dev->raw) return -ENOMEM; dev->raw->dev = dev; dev->change_protocol = change_protocol; dev->idle = true; spin_lock_init(&dev->raw->edge_spinlock); timer_setup(&dev->raw->edge_handle, ir_raw_edge_handle, 0); INIT_KFIFO(dev->raw->kfifo); return 0; } int ir_raw_event_register(struct rc_dev *dev) { struct task_struct *thread; thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", dev->minor); if (IS_ERR(thread)) return PTR_ERR(thread); dev->raw->thread = thread; mutex_lock(&ir_raw_handler_lock); list_add_tail(&dev->raw->list, &ir_raw_client_list); mutex_unlock(&ir_raw_handler_lock); return 0; } void ir_raw_event_free(struct rc_dev *dev) { if (!dev) return; kfree(dev->raw); dev->raw = NULL; } void ir_raw_event_unregister(struct rc_dev *dev) { struct ir_raw_handler *handler; if (!dev || !dev->raw) return; kthread_stop(dev->raw->thread); timer_delete_sync(&dev->raw->edge_handle); mutex_lock(&ir_raw_handler_lock); list_del(&dev->raw->list); list_for_each_entry(handler, &ir_raw_handler_list, list) if (handler->raw_unregister && (handler->protocols & dev->enabled_protocols)) handler->raw_unregister(dev); lirc_bpf_free(dev); ir_raw_event_free(dev); /* * A user can be calling bpf(BPF_PROG_{QUERY|ATTACH|DETACH}), so * ensure that the raw member is null on unlock; this is how * "device gone" is checked. */ mutex_unlock(&ir_raw_handler_lock); } /* * Extension interface - used to register the IR decoders */ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler) { mutex_lock(&ir_raw_handler_lock); list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list); atomic64_or(ir_raw_handler->protocols, &available_protocols); mutex_unlock(&ir_raw_handler_lock); return 0; } EXPORT_SYMBOL(ir_raw_handler_register); void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler) { struct ir_raw_event_ctrl *raw; u64 protocols = ir_raw_handler->protocols; mutex_lock(&ir_raw_handler_lock); list_del(&ir_raw_handler->list); list_for_each_entry(raw, &ir_raw_client_list, list) { if (ir_raw_handler->raw_unregister && (raw->dev->enabled_protocols & protocols)) ir_raw_handler->raw_unregister(raw->dev); ir_raw_disable_protocols(raw->dev, protocols); } atomic64_andnot(protocols, &available_protocols); mutex_unlock(&ir_raw_handler_lock); } EXPORT_SYMBOL(ir_raw_handler_unregister);
289 289 297 290 294 2 7 7 7 7 302 294 256 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 // SPDX-License-Identifier: GPL-2.0-only /* * 9P entry point * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kmod.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/moduleparam.h> #include <net/9p/9p.h> #include <linux/fs.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include <linux/list.h> #include <linux/spinlock.h> #ifdef CONFIG_NET_9P_DEBUG unsigned int p9_debug_level; /* feature-rific global debug level */ EXPORT_SYMBOL(p9_debug_level); module_param_named(debug, p9_debug_level, uint, 0); MODULE_PARM_DESC(debug, "9P debugging level"); void _p9_debug(enum p9_debug_flags level, const char *func, const char *fmt, ...) { struct va_format vaf; va_list args; if ((p9_debug_level & level) != level) return; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (level == P9_DEBUG_9P) pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf); else pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf); va_end(args); } EXPORT_SYMBOL(_p9_debug); #endif /* Dynamic Transport Registration Routines */ static DEFINE_SPINLOCK(v9fs_trans_lock); static LIST_HEAD(v9fs_trans_list); /** * v9fs_register_trans - register a new transport with 9p * @m: structure describing the transport module and entry points * */ void v9fs_register_trans(struct p9_trans_module *m) { spin_lock(&v9fs_trans_lock); list_add_tail(&m->list, &v9fs_trans_list); spin_unlock(&v9fs_trans_lock); } EXPORT_SYMBOL(v9fs_register_trans); /** * v9fs_unregister_trans - unregister a 9p transport * @m: the transport to remove * */ void v9fs_unregister_trans(struct p9_trans_module *m) { spin_lock(&v9fs_trans_lock); list_del_init(&m->list); spin_unlock(&v9fs_trans_lock); } EXPORT_SYMBOL(v9fs_unregister_trans); static struct p9_trans_module *_p9_get_trans_by_name(const char *s) { struct p9_trans_module *t, *found = NULL; spin_lock(&v9fs_trans_lock); list_for_each_entry(t, &v9fs_trans_list, list) if (strcmp(t->name, s) == 0 && try_module_get(t->owner)) { found = t; break; } spin_unlock(&v9fs_trans_lock); return found; } /** * v9fs_get_trans_by_name - get transport with the matching name * @s: string identifying transport * */ struct p9_trans_module *v9fs_get_trans_by_name(const char *s) { struct p9_trans_module *found = NULL; found = _p9_get_trans_by_name(s); #ifdef CONFIG_MODULES if (!found) { request_module("9p-%s", s); found = _p9_get_trans_by_name(s); } #endif return found; } EXPORT_SYMBOL(v9fs_get_trans_by_name); static const char * const v9fs_default_transports[] = { "virtio", "tcp", "fd", "unix", "xen", "rdma", }; /** * v9fs_get_default_trans - get the default transport * */ struct p9_trans_module *v9fs_get_default_trans(void) { struct p9_trans_module *t, *found = NULL; int i; spin_lock(&v9fs_trans_lock); list_for_each_entry(t, &v9fs_trans_list, list) if (t->def && try_module_get(t->owner)) { found = t; break; } if (!found) list_for_each_entry(t, &v9fs_trans_list, list) if (try_module_get(t->owner)) { found = t; break; } spin_unlock(&v9fs_trans_lock); for (i = 0; !found && i < ARRAY_SIZE(v9fs_default_transports); i++) found = v9fs_get_trans_by_name(v9fs_default_transports[i]); return found; } EXPORT_SYMBOL(v9fs_get_default_trans); /** * v9fs_put_trans - put trans * @m: transport to put * */ void v9fs_put_trans(struct p9_trans_module *m) { if (m) module_put(m->owner); } EXPORT_SYMBOL(v9fs_put_trans); /** * init_p9 - Initialize module * */ static int __init init_p9(void) { int ret; ret = p9_client_init(); if (ret) return ret; p9_error_init(); pr_info("Installing 9P2000 support\n"); return ret; } /** * exit_p9 - shutdown module * */ static void __exit exit_p9(void) { pr_info("Unloading 9P2000 support\n"); p9_client_exit(); } module_init(init_p9) module_exit(exit_p9) MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>"); MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
4 4 4 4 4 4 2 4 5 6 2 2 2 2 1 2 2 9 9 9 9 9 1 2 2 1 1 1 8 8 1 1 3 3 3 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 // SPDX-License-Identifier: GPL-2.0-only /* * * general timer device for using in ISDN stacks * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> */ #include <linux/poll.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/timer.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mISDNif.h> #include <linux/mutex.h> #include <linux/sched/signal.h> #include "core.h" static DEFINE_MUTEX(mISDN_mutex); static u_int *debug; struct mISDNtimerdev { int next_id; struct list_head pending; struct list_head expired; wait_queue_head_t wait; u_int work; spinlock_t lock; /* protect lists */ }; struct mISDNtimer { struct list_head list; struct mISDNtimerdev *dev; struct timer_list tl; int id; }; static int mISDN_open(struct inode *ino, struct file *filep) { struct mISDNtimerdev *dev; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep); dev = kmalloc(sizeof(struct mISDNtimerdev) , GFP_KERNEL); if (!dev) return -ENOMEM; dev->next_id = 1; INIT_LIST_HEAD(&dev->pending); INIT_LIST_HEAD(&dev->expired); spin_lock_init(&dev->lock); dev->work = 0; init_waitqueue_head(&dev->wait); filep->private_data = dev; return nonseekable_open(ino, filep); } static int mISDN_close(struct inode *ino, struct file *filep) { struct mISDNtimerdev *dev = filep->private_data; struct list_head *list = &dev->pending; struct mISDNtimer *timer, *next; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p,%p)\n", __func__, ino, filep); spin_lock_irq(&dev->lock); while (!list_empty(list)) { timer = list_first_entry(list, struct mISDNtimer, list); spin_unlock_irq(&dev->lock); timer_shutdown_sync(&timer->tl); spin_lock_irq(&dev->lock); /* it might have been moved to ->expired */ list_del(&timer->list); kfree(timer); } spin_unlock_irq(&dev->lock); list_for_each_entry_safe(timer, next, &dev->expired, list) { kfree(timer); } kfree(dev); return 0; } static ssize_t mISDN_read(struct file *filep, char __user *buf, size_t count, loff_t *off) { struct mISDNtimerdev *dev = filep->private_data; struct list_head *list = &dev->expired; struct mISDNtimer *timer; int ret = 0; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p, %d, %p)\n", __func__, filep, buf, (int)count, off); if (count < sizeof(int)) return -ENOSPC; spin_lock_irq(&dev->lock); while (list_empty(list) && (dev->work == 0)) { spin_unlock_irq(&dev->lock); if (filep->f_flags & O_NONBLOCK) return -EAGAIN; wait_event_interruptible(dev->wait, (dev->work || !list_empty(list))); if (signal_pending(current)) return -ERESTARTSYS; spin_lock_irq(&dev->lock); } if (dev->work) dev->work = 0; if (!list_empty(list)) { timer = list_first_entry(list, struct mISDNtimer, list); list_del(&timer->list); spin_unlock_irq(&dev->lock); if (put_user(timer->id, (int __user *)buf)) ret = -EFAULT; else ret = sizeof(int); kfree(timer); } else { spin_unlock_irq(&dev->lock); } return ret; } static __poll_t mISDN_poll(struct file *filep, poll_table *wait) { struct mISDNtimerdev *dev = filep->private_data; __poll_t mask = EPOLLERR; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %p)\n", __func__, filep, wait); if (dev) { poll_wait(filep, &dev->wait, wait); mask = 0; if (dev->work || !list_empty(&dev->expired)) mask |= (EPOLLIN | EPOLLRDNORM); if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s work(%d) empty(%d)\n", __func__, dev->work, list_empty(&dev->expired)); } return mask; } static void dev_expire_timer(struct timer_list *t) { struct mISDNtimer *timer = timer_container_of(timer, t, tl); u_long flags; spin_lock_irqsave(&timer->dev->lock, flags); if (timer->id >= 0) list_move_tail(&timer->list, &timer->dev->expired); wake_up_interruptible(&timer->dev->wait); spin_unlock_irqrestore(&timer->dev->lock, flags); } static int misdn_add_timer(struct mISDNtimerdev *dev, int timeout) { int id; struct mISDNtimer *timer; if (!timeout) { dev->work = 1; wake_up_interruptible(&dev->wait); id = 0; } else { timer = kzalloc(sizeof(struct mISDNtimer), GFP_KERNEL); if (!timer) return -ENOMEM; timer->dev = dev; timer_setup(&timer->tl, dev_expire_timer, 0); spin_lock_irq(&dev->lock); id = timer->id = dev->next_id++; if (dev->next_id < 0) dev->next_id = 1; list_add_tail(&timer->list, &dev->pending); timer->tl.expires = jiffies + ((HZ * (u_long)timeout) / 1000); add_timer(&timer->tl); spin_unlock_irq(&dev->lock); } return id; } static int misdn_del_timer(struct mISDNtimerdev *dev, int id) { struct mISDNtimer *timer; spin_lock_irq(&dev->lock); list_for_each_entry(timer, &dev->pending, list) { if (timer->id == id) { list_del_init(&timer->list); timer->id = -1; spin_unlock_irq(&dev->lock); timer_shutdown_sync(&timer->tl); kfree(timer); return id; } } spin_unlock_irq(&dev->lock); return 0; } static long mISDN_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct mISDNtimerdev *dev = filep->private_data; int id, tout, ret = 0; if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s(%p, %x, %lx)\n", __func__, filep, cmd, arg); mutex_lock(&mISDN_mutex); switch (cmd) { case IMADDTIMER: if (get_user(tout, (int __user *)arg)) { ret = -EFAULT; break; } id = misdn_add_timer(dev, tout); if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s add %d id %d\n", __func__, tout, id); if (id < 0) { ret = id; break; } if (put_user(id, (int __user *)arg)) ret = -EFAULT; break; case IMDELTIMER: if (get_user(id, (int __user *)arg)) { ret = -EFAULT; break; } if (*debug & DEBUG_TIMER) printk(KERN_DEBUG "%s del id %d\n", __func__, id); id = misdn_del_timer(dev, id); if (put_user(id, (int __user *)arg)) ret = -EFAULT; break; default: ret = -EINVAL; } mutex_unlock(&mISDN_mutex); return ret; } static const struct file_operations mISDN_fops = { .owner = THIS_MODULE, .read = mISDN_read, .poll = mISDN_poll, .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, }; static struct miscdevice mISDNtimer = { .minor = MISC_DYNAMIC_MINOR, .name = "mISDNtimer", .fops = &mISDN_fops, }; int mISDN_inittimer(u_int *deb) { int err; debug = deb; err = misc_register(&mISDNtimer); if (err) printk(KERN_WARNING "mISDN: Could not register timer device\n"); return err; } void mISDN_timer_cleanup(void) { misc_deregister(&mISDNtimer); }
53 53 53 53 52 13 13 13 13 13 14 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 // SPDX-License-Identifier: GPL-2.0-only /* * virtio transport for vsock * * Copyright (C) 2013-2015 Red Hat, Inc. * Author: Asias He <asias@redhat.com> * Stefan Hajnoczi <stefanha@redhat.com> * * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s * early virtio-vsock proof-of-concept bits. */ #include <linux/spinlock.h> #include <linux/module.h> #include <linux/list.h> #include <linux/atomic.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/virtio_vsock.h> #include <net/sock.h> #include <linux/mutex.h> #include <net/af_vsock.h> static struct workqueue_struct *virtio_vsock_workqueue; static struct virtio_vsock __rcu *the_virtio_vsock; static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */ static struct virtio_transport virtio_transport; /* forward declaration */ struct virtio_vsock { struct virtio_device *vdev; struct virtqueue *vqs[VSOCK_VQ_MAX]; /* Virtqueue processing is deferred to a workqueue */ struct work_struct tx_work; struct work_struct rx_work; struct work_struct event_work; /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX] * must be accessed with tx_lock held. */ struct mutex tx_lock; bool tx_run; struct work_struct send_pkt_work; struct sk_buff_head send_pkt_queue; atomic_t queued_replies; /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX] * must be accessed with rx_lock held. */ struct mutex rx_lock; bool rx_run; int rx_buf_nr; int rx_buf_max_nr; /* The following fields are protected by event_lock. * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held. */ struct mutex event_lock; bool event_run; struct virtio_vsock_event event_list[8]; u32 guest_cid; bool seqpacket_allow; /* These fields are used only in tx path in function * 'virtio_transport_send_pkt_work()', so to save * stack space in it, place both of them here. Each * pointer from 'out_sgs' points to the corresponding * element in 'out_bufs' - this is initialized in * 'virtio_vsock_probe()'. Both fields are protected * by 'tx_lock'. +1 is needed for packet header. */ struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1]; struct scatterlist out_bufs[MAX_SKB_FRAGS + 1]; }; static u32 virtio_transport_get_local_cid(void) { struct virtio_vsock *vsock; u32 ret; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { ret = VMADDR_CID_ANY; goto out_rcu; } ret = vsock->guest_cid; out_rcu: rcu_read_unlock(); return ret; } /* Caller need to hold vsock->tx_lock on vq */ static int virtio_transport_send_skb(struct sk_buff *skb, struct virtqueue *vq, struct virtio_vsock *vsock, gfp_t gfp) { int ret, in_sg = 0, out_sg = 0; struct scatterlist **sgs; sgs = vsock->out_sgs; sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb))); out_sg++; if (!skb_is_nonlinear(skb)) { if (skb->len > 0) { sg_init_one(sgs[out_sg], skb->data, skb->len); out_sg++; } } else { struct skb_shared_info *si; int i; /* If skb is nonlinear, then its buffer must contain * only header and nothing more. Data is stored in * the fragged part. */ WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb))); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) { skb_frag_t *skb_frag = &si->frags[i]; void *va; /* We will use 'page_to_virt()' for the userspace page * here, because virtio or dma-mapping layers will call * 'virt_to_phys()' later to fill the buffer descriptor. * We don't touch memory at "virtual" address of this page. */ va = page_to_virt(skb_frag_page(skb_frag)); sg_init_one(sgs[out_sg], va + skb_frag_off(skb_frag), skb_frag_size(skb_frag)); out_sg++; } } ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, gfp); /* Usually this means that there is no more space available in * the vq */ if (ret < 0) return ret; virtio_transport_deliver_tap_pkt(skb); return 0; } static void virtio_transport_send_pkt_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, send_pkt_work); struct virtqueue *vq; bool added = false; bool restart_rx = false; mutex_lock(&vsock->tx_lock); if (!vsock->tx_run) goto out; vq = vsock->vqs[VSOCK_VQ_TX]; for (;;) { struct sk_buff *skb; bool reply; int ret; skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); if (!skb) break; reply = virtio_vsock_skb_reply(skb); ret = virtio_transport_send_skb(skb, vq, vsock, GFP_KERNEL); if (ret < 0) { virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); break; } if (reply) { struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; int val; val = atomic_dec_return(&vsock->queued_replies); /* Do we now have resources to resume rx processing? */ if (val + 1 == virtqueue_get_vring_size(rx_vq)) restart_rx = true; } added = true; } if (added) virtqueue_kick(vq); out: mutex_unlock(&vsock->tx_lock); if (restart_rx) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } /* Caller need to hold RCU for vsock. * Returns 0 if the packet is successfully put on the vq. */ static int virtio_transport_send_skb_fast_path(struct virtio_vsock *vsock, struct sk_buff *skb) { struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; int ret; /* Inside RCU, can't sleep! */ ret = mutex_trylock(&vsock->tx_lock); if (unlikely(ret == 0)) return -EBUSY; ret = virtio_transport_send_skb(skb, vq, vsock, GFP_ATOMIC); if (ret == 0) virtqueue_kick(vq); mutex_unlock(&vsock->tx_lock); return ret; } static int virtio_transport_send_pkt(struct sk_buff *skb) { struct virtio_vsock_hdr *hdr; struct virtio_vsock *vsock; int len = skb->len; hdr = virtio_vsock_hdr(skb); rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { kfree_skb(skb); len = -ENODEV; goto out_rcu; } if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) { kfree_skb(skb); len = -ENODEV; goto out_rcu; } /* If send_pkt_queue is empty, we can safely bypass this queue * because packet order is maintained and (try) to put the packet * on the virtqueue using virtio_transport_send_skb_fast_path. * If this fails we simply put the packet on the intermediate * queue and schedule the worker. */ if (!skb_queue_empty_lockless(&vsock->send_pkt_queue) || virtio_transport_send_skb_fast_path(vsock, skb)) { if (virtio_vsock_skb_reply(skb)) atomic_inc(&vsock->queued_replies); virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); } out_rcu: rcu_read_unlock(); return len; } static int virtio_transport_cancel_pkt(struct vsock_sock *vsk) { struct virtio_vsock *vsock; int cnt = 0, ret; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (!vsock) { ret = -ENODEV; goto out_rcu; } cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); if (cnt) { struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; int new_cnt; new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && new_cnt < virtqueue_get_vring_size(rx_vq)) queue_work(virtio_vsock_workqueue, &vsock->rx_work); } ret = 0; out_rcu: rcu_read_unlock(); return ret; } static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; struct scatterlist pkt, *p; struct virtqueue *vq; struct sk_buff *skb; int ret; vq = vsock->vqs[VSOCK_VQ_RX]; do { skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL); if (!skb) break; memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM); sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len); p = &pkt; ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL); if (ret < 0) { kfree_skb(skb); break; } vsock->rx_buf_nr++; } while (vq->num_free); if (vsock->rx_buf_nr > vsock->rx_buf_max_nr) vsock->rx_buf_max_nr = vsock->rx_buf_nr; virtqueue_kick(vq); } static void virtio_transport_tx_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, tx_work); struct virtqueue *vq; bool added = false; vq = vsock->vqs[VSOCK_VQ_TX]; mutex_lock(&vsock->tx_lock); if (!vsock->tx_run) goto out; do { struct sk_buff *skb; unsigned int len; virtqueue_disable_cb(vq); while ((skb = virtqueue_get_buf(vq, &len)) != NULL) { virtio_transport_consume_skb_sent(skb, true); added = true; } } while (!virtqueue_enable_cb(vq)); out: mutex_unlock(&vsock->tx_lock); if (added) queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); } /* Is there space left for replies to rx packets? */ static bool virtio_transport_more_replies(struct virtio_vsock *vsock) { struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX]; int val; smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */ val = atomic_read(&vsock->queued_replies); return val < virtqueue_get_vring_size(vq); } /* event_lock must be held */ static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock, struct virtio_vsock_event *event) { struct scatterlist sg; struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_EVENT]; sg_init_one(&sg, event, sizeof(*event)); return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL); } /* event_lock must be held */ static void virtio_vsock_event_fill(struct virtio_vsock *vsock) { size_t i; for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) { struct virtio_vsock_event *event = &vsock->event_list[i]; virtio_vsock_event_fill_one(vsock, event); } virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); } static void virtio_vsock_reset_sock(struct sock *sk) { /* vmci_transport.c doesn't take sk_lock here either. At least we're * under vsock_table_lock so the sock cannot disappear while we're * executing. */ sk->sk_state = TCP_CLOSE; sk->sk_err = ECONNRESET; sk_error_report(sk); } static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; __le64 guest_cid; vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid), &guest_cid, sizeof(guest_cid)); vsock->guest_cid = le64_to_cpu(guest_cid); } /* event_lock must be held */ static void virtio_vsock_event_handle(struct virtio_vsock *vsock, struct virtio_vsock_event *event) { switch (le32_to_cpu(event->id)) { case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET: virtio_vsock_update_guest_cid(vsock); vsock_for_each_connected_socket(&virtio_transport.transport, virtio_vsock_reset_sock); break; } } static void virtio_transport_event_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, event_work); struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_EVENT]; mutex_lock(&vsock->event_lock); if (!vsock->event_run) goto out; do { struct virtio_vsock_event *event; unsigned int len; virtqueue_disable_cb(vq); while ((event = virtqueue_get_buf(vq, &len)) != NULL) { if (len == sizeof(*event)) virtio_vsock_event_handle(vsock, event); virtio_vsock_event_fill_one(vsock, event); } } while (!virtqueue_enable_cb(vq)); virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]); out: mutex_unlock(&vsock->event_lock); } static void virtio_vsock_event_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->event_work); } static void virtio_vsock_tx_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->tx_work); } static void virtio_vsock_rx_done(struct virtqueue *vq) { struct virtio_vsock *vsock = vq->vdev->priv; if (!vsock) return; queue_work(virtio_vsock_workqueue, &vsock->rx_work); } static bool virtio_transport_can_msgzerocopy(int bufs_num) { struct virtio_vsock *vsock; bool res = false; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (vsock) { struct virtqueue *vq = vsock->vqs[VSOCK_VQ_TX]; /* Check that tx queue is large enough to keep whole * data to send. This is needed, because when there is * not enough free space in the queue, current skb to * send will be reinserted to the head of tx list of * the socket to retry transmission later, so if skb * is bigger than whole queue, it will be reinserted * again and again, thus blocking other skbs to be sent. * Each page of the user provided buffer will be added * as a single buffer to the tx virtqueue, so compare * number of pages against maximum capacity of the queue. */ if (bufs_num <= vq->num_max) res = true; } rcu_read_unlock(); return res; } static bool virtio_transport_msgzerocopy_allow(void) { return true; } static bool virtio_transport_seqpacket_allow(u32 remote_cid); static struct virtio_transport virtio_transport = { .transport = { .module = THIS_MODULE, .get_local_cid = virtio_transport_get_local_cid, .init = virtio_transport_do_socket_init, .destruct = virtio_transport_destruct, .release = virtio_transport_release, .connect = virtio_transport_connect, .shutdown = virtio_transport_shutdown, .cancel_pkt = virtio_transport_cancel_pkt, .dgram_bind = virtio_transport_dgram_bind, .dgram_dequeue = virtio_transport_dgram_dequeue, .dgram_enqueue = virtio_transport_dgram_enqueue, .dgram_allow = virtio_transport_dgram_allow, .stream_dequeue = virtio_transport_stream_dequeue, .stream_enqueue = virtio_transport_stream_enqueue, .stream_has_data = virtio_transport_stream_has_data, .stream_has_space = virtio_transport_stream_has_space, .stream_rcvhiwat = virtio_transport_stream_rcvhiwat, .stream_is_active = virtio_transport_stream_is_active, .stream_allow = virtio_transport_stream_allow, .seqpacket_dequeue = virtio_transport_seqpacket_dequeue, .seqpacket_enqueue = virtio_transport_seqpacket_enqueue, .seqpacket_allow = virtio_transport_seqpacket_allow, .seqpacket_has_data = virtio_transport_seqpacket_has_data, .msgzerocopy_allow = virtio_transport_msgzerocopy_allow, .notify_poll_in = virtio_transport_notify_poll_in, .notify_poll_out = virtio_transport_notify_poll_out, .notify_recv_init = virtio_transport_notify_recv_init, .notify_recv_pre_block = virtio_transport_notify_recv_pre_block, .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue, .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue, .notify_send_init = virtio_transport_notify_send_init, .notify_send_pre_block = virtio_transport_notify_send_pre_block, .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue, .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue, .notify_buffer_size = virtio_transport_notify_buffer_size, .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat, .unsent_bytes = virtio_transport_unsent_bytes, .read_skb = virtio_transport_read_skb, }, .send_pkt = virtio_transport_send_pkt, .can_msgzerocopy = virtio_transport_can_msgzerocopy, }; static bool virtio_transport_seqpacket_allow(u32 remote_cid) { struct virtio_vsock *vsock; bool seqpacket_allow; seqpacket_allow = false; rcu_read_lock(); vsock = rcu_dereference(the_virtio_vsock); if (vsock) seqpacket_allow = vsock->seqpacket_allow; rcu_read_unlock(); return seqpacket_allow; } static void virtio_transport_rx_work(struct work_struct *work) { struct virtio_vsock *vsock = container_of(work, struct virtio_vsock, rx_work); struct virtqueue *vq; vq = vsock->vqs[VSOCK_VQ_RX]; mutex_lock(&vsock->rx_lock); if (!vsock->rx_run) goto out; do { virtqueue_disable_cb(vq); for (;;) { unsigned int len, payload_len; struct virtio_vsock_hdr *hdr; struct sk_buff *skb; if (!virtio_transport_more_replies(vsock)) { /* Stop rx until the device processes already * pending replies. Leave rx virtqueue * callbacks disabled. */ goto out; } skb = virtqueue_get_buf(vq, &len); if (!skb) break; vsock->rx_buf_nr--; /* Drop short/long packets */ if (unlikely(len < sizeof(*hdr) || len > virtio_vsock_skb_len(skb))) { kfree_skb(skb); continue; } hdr = virtio_vsock_hdr(skb); payload_len = le32_to_cpu(hdr->len); if (unlikely(payload_len > len - sizeof(*hdr))) { kfree_skb(skb); continue; } if (payload_len) virtio_vsock_skb_put(skb, payload_len); virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); } } while (!virtqueue_enable_cb(vq)); out: if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2) virtio_vsock_rx_fill(vsock); mutex_unlock(&vsock->rx_lock); } static int virtio_vsock_vqs_init(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; struct virtqueue_info vqs_info[] = { { "rx", virtio_vsock_rx_done }, { "tx", virtio_vsock_tx_done }, { "event", virtio_vsock_event_done }, }; int ret; mutex_lock(&vsock->rx_lock); vsock->rx_buf_nr = 0; vsock->rx_buf_max_nr = 0; mutex_unlock(&vsock->rx_lock); atomic_set(&vsock->queued_replies, 0); ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, vqs_info, NULL); if (ret < 0) return ret; virtio_vsock_update_guest_cid(vsock); virtio_device_ready(vdev); return 0; } static void virtio_vsock_vqs_start(struct virtio_vsock *vsock) { mutex_lock(&vsock->tx_lock); vsock->tx_run = true; mutex_unlock(&vsock->tx_lock); mutex_lock(&vsock->rx_lock); virtio_vsock_rx_fill(vsock); vsock->rx_run = true; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->event_lock); virtio_vsock_event_fill(vsock); vsock->event_run = true; mutex_unlock(&vsock->event_lock); /* virtio_transport_send_pkt() can queue packets once * the_virtio_vsock is set, but they won't be processed until * vsock->tx_run is set to true. We queue vsock->send_pkt_work * when initialization finishes to send those packets queued * earlier. * We don't need to queue the other workers (rx, event) because * as long as we don't fill the queues with empty buffers, the * host can't send us any notification. */ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work); } static void virtio_vsock_vqs_del(struct virtio_vsock *vsock) { struct virtio_device *vdev = vsock->vdev; struct sk_buff *skb; /* Reset all connected sockets when the VQs disappear */ vsock_for_each_connected_socket(&virtio_transport.transport, virtio_vsock_reset_sock); /* Stop all work handlers to make sure no one is accessing the device, * so we can safely call virtio_reset_device(). */ mutex_lock(&vsock->rx_lock); vsock->rx_run = false; mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); vsock->tx_run = false; mutex_unlock(&vsock->tx_lock); mutex_lock(&vsock->event_lock); vsock->event_run = false; mutex_unlock(&vsock->event_lock); /* Flush all device writes and interrupts, device will not use any * more buffers. */ virtio_reset_device(vdev); mutex_lock(&vsock->rx_lock); while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX]))) kfree_skb(skb); mutex_unlock(&vsock->rx_lock); mutex_lock(&vsock->tx_lock); while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX]))) kfree_skb(skb); mutex_unlock(&vsock->tx_lock); virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); /* Delete virtqueues and flush outstanding callbacks if any */ vdev->config->del_vqs(vdev); } static int virtio_vsock_probe(struct virtio_device *vdev) { struct virtio_vsock *vsock = NULL; int ret; int i; ret = mutex_lock_interruptible(&the_virtio_vsock_mutex); if (ret) return ret; /* Only one virtio-vsock device per guest is supported */ if (rcu_dereference_protected(the_virtio_vsock, lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } vsock = kzalloc(sizeof(*vsock), GFP_KERNEL); if (!vsock) { ret = -ENOMEM; goto out; } vsock->vdev = vdev; mutex_init(&vsock->tx_lock); mutex_init(&vsock->rx_lock); mutex_init(&vsock->event_lock); skb_queue_head_init(&vsock->send_pkt_queue); INIT_WORK(&vsock->rx_work, virtio_transport_rx_work); INIT_WORK(&vsock->tx_work, virtio_transport_tx_work); INIT_WORK(&vsock->event_work, virtio_transport_event_work); INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work); if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET)) vsock->seqpacket_allow = true; vdev->priv = vsock; ret = virtio_vsock_vqs_init(vsock); if (ret < 0) goto out; for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++) vsock->out_sgs[i] = &vsock->out_bufs[i]; rcu_assign_pointer(the_virtio_vsock, vsock); virtio_vsock_vqs_start(vsock); mutex_unlock(&the_virtio_vsock_mutex); return 0; out: kfree(vsock); mutex_unlock(&the_virtio_vsock_mutex); return ret; } static void virtio_vsock_remove(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; mutex_lock(&the_virtio_vsock_mutex); vdev->priv = NULL; rcu_assign_pointer(the_virtio_vsock, NULL); synchronize_rcu(); virtio_vsock_vqs_del(vsock); /* Other works can be queued before 'config->del_vqs()', so we flush * all works before to free the vsock object to avoid use after free. */ flush_work(&vsock->rx_work); flush_work(&vsock->tx_work); flush_work(&vsock->event_work); flush_work(&vsock->send_pkt_work); mutex_unlock(&the_virtio_vsock_mutex); kfree(vsock); } #ifdef CONFIG_PM_SLEEP static int virtio_vsock_freeze(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; mutex_lock(&the_virtio_vsock_mutex); rcu_assign_pointer(the_virtio_vsock, NULL); synchronize_rcu(); virtio_vsock_vqs_del(vsock); mutex_unlock(&the_virtio_vsock_mutex); return 0; } static int virtio_vsock_restore(struct virtio_device *vdev) { struct virtio_vsock *vsock = vdev->priv; int ret; mutex_lock(&the_virtio_vsock_mutex); /* Only one virtio-vsock device per guest is supported */ if (rcu_dereference_protected(the_virtio_vsock, lockdep_is_held(&the_virtio_vsock_mutex))) { ret = -EBUSY; goto out; } ret = virtio_vsock_vqs_init(vsock); if (ret < 0) goto out; rcu_assign_pointer(the_virtio_vsock, vsock); virtio_vsock_vqs_start(vsock); out: mutex_unlock(&the_virtio_vsock_mutex); return ret; } #endif /* CONFIG_PM_SLEEP */ static struct virtio_device_id id_table[] = { { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_VSOCK_F_SEQPACKET }; static struct virtio_driver virtio_vsock_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .id_table = id_table, .probe = virtio_vsock_probe, .remove = virtio_vsock_remove, #ifdef CONFIG_PM_SLEEP .freeze = virtio_vsock_freeze, .restore = virtio_vsock_restore, #endif }; static int __init virtio_vsock_init(void) { int ret; virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", WQ_PERCPU, 0); if (!virtio_vsock_workqueue) return -ENOMEM; ret = vsock_core_register(&virtio_transport.transport, VSOCK_TRANSPORT_F_G2H); if (ret) goto out_wq; ret = register_virtio_driver(&virtio_vsock_driver); if (ret) goto out_vci; return 0; out_vci: vsock_core_unregister(&virtio_transport.transport); out_wq: destroy_workqueue(virtio_vsock_workqueue); return ret; } static void __exit virtio_vsock_exit(void) { unregister_virtio_driver(&virtio_vsock_driver); vsock_core_unregister(&virtio_transport.transport); destroy_workqueue(virtio_vsock_workqueue); } module_init(virtio_vsock_init); module_exit(virtio_vsock_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Asias He"); MODULE_DESCRIPTION("virtio transport for vsock"); MODULE_DEVICE_TABLE(virtio, id_table);
18 18 18 18 7 3 8 18 4 1 1 1 25 7 18 1 1 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2008 Red Hat Inc. * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_dumb_buffers.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_mode.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_internal.h" /** * DOC: overview * * The KMS API doesn't standardize backing storage object creation and leaves it * to driver-specific ioctls. Furthermore actually creating a buffer object even * for GEM-based drivers is done through a driver-specific ioctl - GEM only has * a common userspace interface for sharing and destroying objects. While not an * issue for full-fledged graphics stacks that include device-specific userspace * components (in libdrm for instance), this limit makes DRM-based early boot * graphics unnecessarily complex. * * Dumb objects partly alleviate the problem by providing a standard API to * create dumb buffers suitable for scanout, which can then be used to create * KMS frame buffers. * * To support dumb objects drivers must implement the &drm_driver.dumb_create * and &drm_driver.dumb_map_offset operations (the latter defaults to * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles * additionally need to implement the &drm_driver.dumb_destroy operation. See * the callbacks for further details. * * Note that dumb objects may not be used for gpu acceleration, as has been * attempted on some ARM embedded platforms. Such drivers really must have * a hardware-specific ioctl to allocate suitable buffer objects. */ static int drm_mode_align_dumb(struct drm_mode_create_dumb *args, unsigned long hw_pitch_align, unsigned long hw_size_align) { u32 pitch = args->pitch; u32 size; if (!pitch) return -EINVAL; if (hw_pitch_align) pitch = roundup(pitch, hw_pitch_align); if (!hw_size_align) hw_size_align = PAGE_SIZE; else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE)) return -EINVAL; /* TODO: handle this if necessary */ if (check_mul_overflow(args->height, pitch, &size)) return -EINVAL; size = ALIGN(size, hw_size_align); if (!size) return -EINVAL; args->pitch = pitch; args->size = size; return 0; } /** * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers * @dev: DRM device * @args: Parameters for the dumb buffer * @hw_pitch_align: Hardware scanline alignment in bytes * @hw_size_align: Hardware buffer-size alignment in bytes * * The helper drm_mode_size_dumb() calculates the size of the buffer * allocation and the scanline size for a dumb buffer. Callers have to * set the buffers width, height and color mode in the argument @arg. * The helper validates the correctness of the input and tests for * possible overflows. If successful, it returns the dumb buffer's * required scanline pitch and size in &args. * * The parameter @hw_pitch_align allows the driver to specifies an * alignment for the scanline pitch, if the hardware requires any. The * calculated pitch will be a multiple of the alignment. The parameter * @hw_size_align allows to specify an alignment for buffer sizes. The * provided alignment should represent requirements of the graphics * hardware. drm_mode_size_dumb() handles GEM-related constraints * automatically across all drivers and hardware. For example, the * returned buffer size is always a multiple of PAGE_SIZE, which is * required by mmap(). * * Returns: * Zero on success, or a negative error code otherwise. */ int drm_mode_size_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, unsigned long hw_pitch_align, unsigned long hw_size_align) { u64 pitch = 0; u32 fourcc; /* * The scanline pitch depends on the buffer width and the color * format. The latter is specified as a color-mode constant for * which we first have to find the corresponding color format. * * Different color formats can have the same color-mode constant. * For example XRGB8888 and BGRX8888 both have a color mode of 32. * It is possible to use different formats for dumb-buffer allocation * and rendering as long as all involved formats share the same * color-mode constant. */ fourcc = drm_driver_color_mode_format(dev, args->bpp); if (fourcc != DRM_FORMAT_INVALID) { const struct drm_format_info *info = drm_format_info(fourcc); if (!info) return -EINVAL; pitch = drm_format_info_min_pitch(info, 0, args->width); } else if (args->bpp) { /* * Some userspace throws in arbitrary values for bpp and * relies on the kernel to figure it out. In this case we * fall back to the old method of using bpp directly. The * over-commitment of memory from the rounding is acceptable * for compatibility with legacy userspace. We have a number * of deprecated legacy values that are explicitly supported. */ switch (args->bpp) { default: drm_warn_once(dev, "Unknown color mode %u; guessing buffer size.\n", args->bpp); fallthrough; /* * These constants represent various YUV formats supported by * drm_gem_afbc_get_bpp(). */ case 12: // DRM_FORMAT_YUV420_8BIT case 15: // DRM_FORMAT_YUV420_10BIT case 30: // DRM_FORMAT_VUY101010 fallthrough; /* * Used by Mesa and Gstreamer to allocate NV formats and others * as RGB buffers. Technically, XRGB16161616F formats are RGB, * but the dumb buffers are not supposed to be used for anything * beyond 32 bits per pixels. */ case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010 case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8); break; } } if (!pitch || pitch > U32_MAX) return -EINVAL; args->pitch = pitch; return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align); } EXPORT_SYMBOL(drm_mode_size_dumb); int drm_mode_create_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, struct drm_file *file_priv) { u32 cpp, stride, size; if (!dev->driver->dumb_create) return -ENOSYS; if (!args->width || !args->height || !args->bpp) return -EINVAL; /* overflow checks for 32bit size calculations */ if (args->bpp > U32_MAX - 8) return -EINVAL; cpp = DIV_ROUND_UP(args->bpp, 8); if (cpp > U32_MAX / args->width) return -EINVAL; stride = cpp * args->width; if (args->height > U32_MAX / stride) return -EINVAL; /* test for wrap-around */ size = args->height * stride; if (PAGE_ALIGN(size) == 0) return -EINVAL; /* * handle, pitch and size are output parameters. Zero them out to * prevent drivers from accidentally using uninitialized data. Since * not all existing userspace is clearing these fields properly we * cannot reject IOCTL with garbage in them. */ args->handle = 0; args->pitch = 0; args->size = 0; return dev->driver->dumb_create(file_priv, dev, args); } int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_dumb *args = data; int err; err = drm_mode_create_dumb(dev, args, file_priv); if (err) { args->handle = 0; args->pitch = 0; args->size = 0; } return err; } static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args, struct drm_file *file_priv) { if (!dev->driver->dumb_create) return -ENOSYS; if (dev->driver->dumb_map_offset) return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); else return drm_gem_dumb_map_offset(file_priv, dev, args->handle, &args->offset); } /** * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; int err; err = drm_mode_mmap_dumb(dev, args, file_priv); if (err) args->offset = 0; return err; } int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, struct drm_file *file_priv) { if (!dev->driver->dumb_create) return -ENOSYS; return drm_gem_handle_delete(file_priv, handle); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; return drm_mode_destroy_dumb(dev, args->handle, file_priv); }
9 9 9 9 1 1 2 1 3 3 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 // SPDX-License-Identifier: GPL-2.0-only /* * Sync File validation framework and debug information * * Copyright (C) 2012 Google, Inc. */ #include <linux/debugfs.h> #include "sync_debug.h" static struct dentry *dbgfs; static LIST_HEAD(sync_timeline_list_head); static DEFINE_SPINLOCK(sync_timeline_list_lock); void sync_timeline_debug_add(struct sync_timeline *obj) { unsigned long flags; spin_lock_irqsave(&sync_timeline_list_lock, flags); list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); } void sync_timeline_debug_remove(struct sync_timeline *obj) { unsigned long flags; spin_lock_irqsave(&sync_timeline_list_lock, flags); list_del(&obj->sync_timeline_list); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); } static const char *sync_status_str(int status) { if (status < 0) return "error"; if (status > 0) return "signaled"; return "active"; } static void sync_print_fence(struct seq_file *s, struct dma_fence *fence, bool show) { struct sync_timeline *parent = dma_fence_parent(fence); int status; status = dma_fence_get_status_locked(fence); seq_printf(s, " %s%sfence %s", show ? parent->name : "", show ? "_" : "", sync_status_str(status)); if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) { struct timespec64 ts64 = ktime_to_timespec64(fence->timestamp); seq_printf(s, "@%ptSp", &ts64); } seq_printf(s, ": %lld", fence->seqno); seq_printf(s, " / %d", parent->value); seq_putc(s, '\n'); } static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj) { struct list_head *pos; seq_printf(s, "%s: %d\n", obj->name, obj->value); spin_lock(&obj->lock); /* Caller already disabled IRQ. */ list_for_each(pos, &obj->pt_list) { struct sync_pt *pt = container_of(pos, struct sync_pt, link); sync_print_fence(s, &pt->base, false); } spin_unlock(&obj->lock); } static int sync_info_debugfs_show(struct seq_file *s, void *unused) { struct list_head *pos; seq_puts(s, "objs:\n--------------\n"); spin_lock_irq(&sync_timeline_list_lock); list_for_each(pos, &sync_timeline_list_head) { struct sync_timeline *obj = container_of(pos, struct sync_timeline, sync_timeline_list); sync_print_obj(s, obj); seq_putc(s, '\n'); } spin_unlock_irq(&sync_timeline_list_lock); seq_puts(s, "fences:\n--------------\n"); return 0; } DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs); static __init int sync_debugfs_init(void) { dbgfs = debugfs_create_dir("sync", NULL); /* * The debugfs files won't ever get removed and thus, there is * no need to protect it against removal races. The use of * debugfs_create_file_unsafe() is actually safe here. */ debugfs_create_file_unsafe("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops); debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL, &sw_sync_debugfs_fops); return 0; } late_initcall(sync_debugfs_init);
12 1 9 8 1 4 1 1 1 1 2 2 3 3 1 1 1 4 2 2 256 257 248 14 6 5 1 3 1 2 235 143 31 22 2 2 1262 1261 235 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/init.h> #include <linux/module.h> #include <linux/netfilter.h> #include <net/flow_offload.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_offload.h> #include <net/pkt_cls.h> static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions) { struct nft_flow_rule *flow; flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); if (!flow) return NULL; flow->rule = flow_rule_alloc(num_actions); if (!flow->rule) { kfree(flow); return NULL; } flow->rule->match.dissector = &flow->match.dissector; flow->rule->match.mask = &flow->match.mask; flow->rule->match.key = &flow->match.key; return flow; } void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow, enum flow_dissector_key_id addr_type) { struct nft_flow_match *match = &flow->match; struct nft_flow_key *mask = &match->mask; struct nft_flow_key *key = &match->key; if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) return; key->control.addr_type = addr_type; mask->control.addr_type = 0xffff; match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL); match->dissector.offset[FLOW_DISSECTOR_KEY_CONTROL] = offsetof(struct nft_flow_key, control); } struct nft_offload_ethertype { __be16 value; __be16 mask; }; static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow) { struct nft_flow_match *match = &flow->match; struct nft_offload_ethertype ethertype = { .value = match->key.basic.n_proto, .mask = match->mask.basic.n_proto, }; if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) && (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) || match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) { match->key.basic.n_proto = match->key.cvlan.vlan_tpid; match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid; match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid; match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid; match->key.vlan.vlan_tpid = ethertype.value; match->mask.vlan.vlan_tpid = ethertype.mask; match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] = offsetof(struct nft_flow_key, cvlan); match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN); } else if (match->dissector.used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) && (match->key.basic.n_proto == htons(ETH_P_8021Q) || match->key.basic.n_proto == htons(ETH_P_8021AD))) { match->key.basic.n_proto = match->key.vlan.vlan_tpid; match->mask.basic.n_proto = match->mask.vlan.vlan_tpid; match->key.vlan.vlan_tpid = ethertype.value; match->mask.vlan.vlan_tpid = ethertype.mask; match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] = offsetof(struct nft_flow_key, vlan); match->dissector.used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_VLAN); } } struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule) { struct nft_offload_ctx *ctx; struct nft_flow_rule *flow; int num_actions = 0, err; struct nft_expr *expr; expr = nft_expr_first(rule); while (nft_expr_more(rule, expr)) { if (expr->ops->offload_action && expr->ops->offload_action(expr)) num_actions++; expr = nft_expr_next(expr); } if (num_actions == 0) return ERR_PTR(-EOPNOTSUPP); flow = nft_flow_rule_alloc(num_actions); if (!flow) return ERR_PTR(-ENOMEM); expr = nft_expr_first(rule); ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL); if (!ctx) { err = -ENOMEM; goto err_out; } ctx->net = net; ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; while (nft_expr_more(rule, expr)) { if (!expr->ops->offload) { err = -EOPNOTSUPP; goto err_out; } err = expr->ops->offload(ctx, flow, expr); if (err < 0) goto err_out; expr = nft_expr_next(expr); } nft_flow_rule_transfer_vlan(ctx, flow); flow->proto = ctx->dep.l3num; kfree(ctx); return flow; err_out: kfree(ctx); nft_flow_rule_destroy(flow); return ERR_PTR(err); } void nft_flow_rule_destroy(struct nft_flow_rule *flow) { struct flow_action_entry *entry; int i; flow_action_for_each(i, entry, &flow->rule->action) { switch (entry->id) { case FLOW_ACTION_REDIRECT: case FLOW_ACTION_MIRRED: dev_put(entry->dev); break; default: break; } } kfree(flow->rule); kfree(flow); } void nft_offload_set_dependency(struct nft_offload_ctx *ctx, enum nft_offload_dep_type type) { ctx->dep.type = type; } void nft_offload_update_dependency(struct nft_offload_ctx *ctx, const void *data, u32 len) { switch (ctx->dep.type) { case NFT_OFFLOAD_DEP_NETWORK: WARN_ON(len != sizeof(__u16)); memcpy(&ctx->dep.l3num, data, sizeof(__u16)); break; case NFT_OFFLOAD_DEP_TRANSPORT: WARN_ON(len != sizeof(__u8)); memcpy(&ctx->dep.protonum, data, sizeof(__u8)); break; default: break; } ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC; } static void nft_flow_offload_common_init(struct flow_cls_common_offload *common, __be16 proto, int priority, struct netlink_ext_ack *extack) { common->protocol = proto; common->prio = priority; common->extack = extack; } static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, struct list_head *cb_list) { struct flow_block_cb *block_cb; int err; list_for_each_entry(block_cb, cb_list, list) { err = block_cb->cb(type, type_data, block_cb->cb_priv); if (err < 0) return err; } return 0; } static int nft_chain_offload_priority(const struct nft_base_chain *basechain) { if (basechain->ops.priority <= 0 || basechain->ops.priority > USHRT_MAX) return -1; return 0; } bool nft_chain_offload_support(const struct nft_base_chain *basechain) { struct nf_hook_ops *ops; struct net_device *dev; struct nft_hook *hook; if (nft_chain_offload_priority(basechain) < 0) return false; list_for_each_entry(hook, &basechain->hook_list, list) { list_for_each_entry(ops, &hook->ops_list, list) { if (ops->pf != NFPROTO_NETDEV || ops->hooknum != NF_NETDEV_INGRESS) return false; dev = ops->dev; if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists()) return false; } } return true; } static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, const struct nft_base_chain *basechain, const struct nft_rule *rule, const struct nft_flow_rule *flow, struct netlink_ext_ack *extack, enum flow_cls_command command) { __be16 proto = ETH_P_ALL; memset(cls_flow, 0, sizeof(*cls_flow)); if (flow) proto = flow->proto; nft_flow_offload_common_init(&cls_flow->common, proto, basechain->ops.priority, extack); cls_flow->command = command; cls_flow->cookie = (unsigned long) rule; if (flow) cls_flow->rule = flow->rule; } static int nft_flow_offload_cmd(const struct nft_chain *chain, const struct nft_rule *rule, struct nft_flow_rule *flow, enum flow_cls_command command, struct flow_cls_offload *cls_flow) { struct netlink_ext_ack extack = {}; struct nft_base_chain *basechain; if (!nft_is_base_chain(chain)) return -EOPNOTSUPP; basechain = nft_base_chain(chain); nft_flow_cls_offload_setup(cls_flow, basechain, rule, flow, &extack, command); return nft_setup_cb_call(TC_SETUP_CLSFLOWER, cls_flow, &basechain->flow_block.cb_list); } static int nft_flow_offload_rule(const struct nft_chain *chain, struct nft_rule *rule, struct nft_flow_rule *flow, enum flow_cls_command command) { struct flow_cls_offload cls_flow; return nft_flow_offload_cmd(chain, rule, flow, command, &cls_flow); } int nft_flow_rule_stats(const struct nft_chain *chain, const struct nft_rule *rule) { struct flow_cls_offload cls_flow = {}; struct nft_expr *expr, *next; int err; err = nft_flow_offload_cmd(chain, rule, NULL, FLOW_CLS_STATS, &cls_flow); if (err < 0) return err; nft_rule_for_each_expr(expr, next, rule) { if (expr->ops->offload_stats) expr->ops->offload_stats(expr, &cls_flow.stats); } return 0; } static int nft_flow_offload_bind(struct flow_block_offload *bo, struct nft_base_chain *basechain) { list_splice(&bo->cb_list, &basechain->flow_block.cb_list); return 0; } static int nft_flow_offload_unbind(struct flow_block_offload *bo, struct nft_base_chain *basechain) { struct flow_block_cb *block_cb, *next; struct flow_cls_offload cls_flow; struct netlink_ext_ack extack; struct nft_chain *chain; struct nft_rule *rule; chain = &basechain->chain; list_for_each_entry(rule, &chain->rules, list) { memset(&extack, 0, sizeof(extack)); nft_flow_cls_offload_setup(&cls_flow, basechain, rule, NULL, &extack, FLOW_CLS_DESTROY); nft_setup_cb_call(TC_SETUP_CLSFLOWER, &cls_flow, &bo->cb_list); } list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { list_del(&block_cb->list); flow_block_cb_free(block_cb); } return 0; } static int nft_block_setup(struct nft_base_chain *basechain, struct flow_block_offload *bo, enum flow_block_command cmd) { int err; switch (cmd) { case FLOW_BLOCK_BIND: err = nft_flow_offload_bind(bo, basechain); break; case FLOW_BLOCK_UNBIND: err = nft_flow_offload_unbind(bo, basechain); break; default: WARN_ON_ONCE(1); err = -EOPNOTSUPP; } return err; } static void nft_flow_block_offload_init(struct flow_block_offload *bo, struct net *net, enum flow_block_command cmd, struct nft_base_chain *basechain, struct netlink_ext_ack *extack) { memset(bo, 0, sizeof(*bo)); bo->net = net; bo->block = &basechain->flow_block; bo->command = cmd; bo->binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; bo->extack = extack; bo->cb_list_head = &basechain->flow_block.cb_list; INIT_LIST_HEAD(&bo->cb_list); } static int nft_block_offload_cmd(struct nft_base_chain *chain, struct net_device *dev, enum flow_block_command cmd) { struct netlink_ext_ack extack = {}; struct flow_block_offload bo; int err; nft_flow_block_offload_init(&bo, dev_net(dev), cmd, chain, &extack); err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); if (err < 0) return err; return nft_block_setup(chain, &bo, cmd); } static void nft_indr_block_cleanup(struct flow_block_cb *block_cb) { struct nft_base_chain *basechain = block_cb->indr.data; struct net_device *dev = block_cb->indr.dev; struct netlink_ext_ack extack = {}; struct nftables_pernet *nft_net; struct net *net = dev_net(dev); struct flow_block_offload bo; nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND, basechain, &extack); nft_net = nft_pernet(net); mutex_lock(&nft_net->commit_mutex); list_del(&block_cb->driver_list); list_move(&block_cb->list, &bo.cb_list); nft_flow_offload_unbind(&bo, basechain); mutex_unlock(&nft_net->commit_mutex); } static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain, struct net_device *dev, enum flow_block_command cmd) { struct netlink_ext_ack extack = {}; struct flow_block_offload bo; int err; nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, nft_indr_block_cleanup); if (err < 0) return err; if (list_empty(&bo.cb_list)) return -EOPNOTSUPP; return nft_block_setup(basechain, &bo, cmd); } static int nft_chain_offload_cmd(struct nft_base_chain *basechain, struct net_device *dev, enum flow_block_command cmd) { int err; if (dev->netdev_ops->ndo_setup_tc) err = nft_block_offload_cmd(basechain, dev, cmd); else err = nft_indr_block_offload_cmd(basechain, dev, cmd); return err; } static int nft_flow_block_chain(struct nft_base_chain *basechain, const struct net_device *this_dev, enum flow_block_command cmd) { struct nf_hook_ops *ops; struct nft_hook *hook; int err, i = 0; list_for_each_entry(hook, &basechain->hook_list, list) { list_for_each_entry(ops, &hook->ops_list, list) { if (this_dev && this_dev != ops->dev) continue; err = nft_chain_offload_cmd(basechain, ops->dev, cmd); if (err < 0 && cmd == FLOW_BLOCK_BIND) { if (!this_dev) goto err_flow_block; return err; } i++; } } return 0; err_flow_block: list_for_each_entry(hook, &basechain->hook_list, list) { list_for_each_entry(ops, &hook->ops_list, list) { if (i-- <= 0) break; nft_chain_offload_cmd(basechain, ops->dev, FLOW_BLOCK_UNBIND); } } return err; } static int nft_flow_offload_chain(struct nft_chain *chain, u8 *ppolicy, enum flow_block_command cmd) { struct nft_base_chain *basechain; u8 policy; if (!nft_is_base_chain(chain)) return -EOPNOTSUPP; basechain = nft_base_chain(chain); policy = ppolicy ? *ppolicy : basechain->policy; /* Only default policy to accept is supported for now. */ if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP) return -EOPNOTSUPP; return nft_flow_block_chain(basechain, NULL, cmd); } static void nft_flow_rule_offload_abort(struct net *net, struct nft_trans *trans) { struct nftables_pernet *nft_net = nft_pernet(net); int err = 0; list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) { if (trans->table->family != NFPROTO_NETDEV) continue; switch (trans->msg_type) { case NFT_MSG_NEWCHAIN: if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) || nft_trans_chain_update(trans)) continue; err = nft_flow_offload_chain(nft_trans_chain(trans), NULL, FLOW_BLOCK_UNBIND); break; case NFT_MSG_DELCHAIN: if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; err = nft_flow_offload_chain(nft_trans_chain(trans), NULL, FLOW_BLOCK_BIND); break; case NFT_MSG_NEWRULE: if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), NULL, FLOW_CLS_DESTROY); break; case NFT_MSG_DELRULE: if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); break; } if (WARN_ON_ONCE(err)) break; } } int nft_flow_rule_offload_commit(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); struct nft_trans *trans; int err = 0; u8 policy; list_for_each_entry(trans, &nft_net->commit_list, list) { if (trans->table->family != NFPROTO_NETDEV) continue; switch (trans->msg_type) { case NFT_MSG_NEWCHAIN: if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) || nft_trans_chain_update(trans)) continue; policy = nft_trans_chain_policy(trans); err = nft_flow_offload_chain(nft_trans_chain(trans), &policy, FLOW_BLOCK_BIND); break; case NFT_MSG_DELCHAIN: if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; policy = nft_trans_chain_policy(trans); err = nft_flow_offload_chain(nft_trans_chain(trans), &policy, FLOW_BLOCK_UNBIND); break; case NFT_MSG_NEWRULE: if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; if (trans->flags & NLM_F_REPLACE || !(trans->flags & NLM_F_APPEND)) { err = -EOPNOTSUPP; break; } err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); break; case NFT_MSG_DELRULE: if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), NULL, FLOW_CLS_DESTROY); break; } if (err) { nft_flow_rule_offload_abort(net, trans); break; } } return err; } static struct nft_chain *__nft_offload_get_chain(const struct nftables_pernet *nft_net, struct net_device *dev) { struct nft_base_chain *basechain; struct nft_hook *hook, *found; const struct nft_table *table; struct nft_chain *chain; list_for_each_entry(table, &nft_net->tables, list) { if (table->family != NFPROTO_NETDEV) continue; list_for_each_entry(chain, &table->chains, list) { if (!nft_is_base_chain(chain) || !(chain->flags & NFT_CHAIN_HW_OFFLOAD)) continue; found = NULL; basechain = nft_base_chain(chain); list_for_each_entry(hook, &basechain->hook_list, list) { if (!nft_hook_find_ops(hook, dev)) continue; found = hook; break; } if (!found) continue; return chain; } } return NULL; } static int nft_offload_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct nftables_pernet *nft_net; struct net *net = dev_net(dev); struct nft_chain *chain; if (event != NETDEV_UNREGISTER) return NOTIFY_DONE; nft_net = nft_pernet(net); mutex_lock(&nft_net->commit_mutex); chain = __nft_offload_get_chain(nft_net, dev); if (chain) nft_flow_block_chain(nft_base_chain(chain), dev, FLOW_BLOCK_UNBIND); mutex_unlock(&nft_net->commit_mutex); return NOTIFY_DONE; } static struct notifier_block nft_offload_netdev_notifier = { .notifier_call = nft_offload_netdev_event, }; int nft_offload_init(void) { return register_netdevice_notifier(&nft_offload_netdev_notifier); } void nft_offload_exit(void) { unregister_netdevice_notifier(&nft_offload_netdev_notifier); }
88 25 103 265 85 332 83 82 126 95 83 83 82 83 162 164 166 166 126 95 147 147 38 167 167 58 83 83 83 82 83 37 82 91 85 18 83 82 82 83 81 83 83 83 83 83 37 83 83 82 57 82 22 22 37 37 37 37 37 18 18 18 18 18 18 66 65 66 66 66 188 187 189 35 188 259 221 247 66 246 246 260 259 257 259 258 260 83 83 260 82 247 18 18 247 66 243 73 64 18 6 101 19 231 232 150 229 231 232 163 18 82 149 4 3 168 101 83 62 62 101 101 100 138 101 1 77 69 77 1 77 10 80 8 211 211 97 112 1 2 49 1 53 4 2 4 3 2 52 45 3 63 81 57 110 8 959 966 960 1174 1173 61 1173 1175 727 1161 1165 515 472 392 531 531 386 457 32 32 100 1039 1017 10 1009 1006 103 966 959 956 95 98 47 71 90 90 39 9 124 114 114 84 92 112 93 1 2 111 12 97 20 91 14 97 15 97 98 54 86 49 50 50 49 50 46 50 50 50 50 50 50 28 23 20 20 20 20 20 20 2 20 20 20 20 20 20 20 20 20 20 19 20 20 152 151 149 152 152 151 151 152 152 151 151 152 68 114 151 1 152 152 18 151 20 13 12 24 6 18 12 13 12 10 2 9 12 25 25 23 25 62 62 4 5 4 5 5 5 4 4 4 4 5 4 3 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 5 3 1 1 1 1 1 1 1 1 1 1 4 4 4 4 4 4 4 4 4 3 1 3 2 3 4 4 4 2 3 3 1 3 4 4 4 4 3 3 3 3 2 3 3 2 2 2 2 2 2 5 5 3 5 5 4 5 5 5 5 5 5 5 5 5 4 4 5 5 12 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Robert Olsson <robert.olsson@its.uu.se> Uppsala Universitet * & Swedish University of Agricultural Sciences. * * Jens Laas <jens.laas@data.slu.se> Swedish University of * Agricultural Sciences. * * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet * * This work is based on the LPC-trie which is originally described in: * * An experimental study of compression methods for dynamic tries * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. * https://www.csc.kth.se/~snilsson/software/dyntrie2/ * * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 * * Code from fib_hash has been reused which includes the following header: * * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * IPv4 FIB: lookup engine and maintenance routines. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Substantial contributions to this work comes from: * * David S. Miller, <davem@davemloft.net> * Stephen Hemminger <shemminger@osdl.org> * Paul E. McKenney <paulmck@us.ibm.com> * Patrick McHardy <kaber@trash.net> */ #include <linux/cache.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/errno.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/rcupdate_wait.h> #include <linux/skbuff.h> #include <linux/netlink.h> #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/notifier.h> #include <net/net_namespace.h> #include <net/inet_dscp.h> #include <net/ip.h> #include <net/protocol.h> #include <net/route.h> #include <net/tcp.h> #include <net/sock.h> #include <net/ip_fib.h> #include <net/fib_notifier.h> #include <trace/events/fib.h> #include "fib_lookup.h" static int call_fib_entry_notifier(struct notifier_block *nb, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_alias *fa, struct netlink_ext_ack *extack) { struct fib_entry_notifier_info info = { .info.extack = extack, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; return call_fib4_notifier(nb, event_type, &info.info); } static int call_fib_entry_notifiers(struct net *net, enum fib_event_type event_type, u32 dst, int dst_len, struct fib_alias *fa, struct netlink_ext_ack *extack) { struct fib_entry_notifier_info info = { .info.extack = extack, .dst = dst, .dst_len = dst_len, .fi = fa->fa_info, .dscp = fa->fa_dscp, .type = fa->fa_type, .tb_id = fa->tb_id, }; return call_fib4_notifiers(net, event_type, &info.info); } #define MAX_STAT_DEPTH 32 #define KEYLENGTH (8*sizeof(t_key)) #define KEY_MAX ((t_key)~0) typedef unsigned int t_key; #define IS_TRIE(n) ((n)->pos >= KEYLENGTH) #define IS_TNODE(n) ((n)->bits) #define IS_LEAF(n) (!(n)->bits) struct key_vector { t_key key; unsigned char pos; /* 2log(KEYLENGTH) bits needed */ unsigned char bits; /* 2log(KEYLENGTH) bits needed */ unsigned char slen; union { /* This list pointer if valid if (pos | bits) == 0 (LEAF) */ struct hlist_head leaf; /* This array is valid if (pos | bits) > 0 (TNODE) */ DECLARE_FLEX_ARRAY(struct key_vector __rcu *, tnode); }; }; struct tnode { struct rcu_head rcu; t_key empty_children; /* KEYLENGTH bits needed */ t_key full_children; /* KEYLENGTH bits needed */ struct key_vector __rcu *parent; struct key_vector kv[1]; #define tn_bits kv[0].bits }; #define TNODE_SIZE(n) offsetof(struct tnode, kv[0].tnode[n]) #define LEAF_SIZE TNODE_SIZE(1) #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats { unsigned int gets; unsigned int backtrack; unsigned int semantic_match_passed; unsigned int semantic_match_miss; unsigned int null_node_hit; unsigned int resize_node_skipped; }; #endif struct trie_stat { unsigned int totdepth; unsigned int maxdepth; unsigned int tnodes; unsigned int leaves; unsigned int nullpointers; unsigned int prefixes; unsigned int nodesizes[MAX_STAT_DEPTH]; }; struct trie { struct key_vector kv[1]; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats; #endif }; static struct key_vector *resize(struct trie *t, struct key_vector *tn); static unsigned int tnode_free_size; /* * synchronize_rcu after call_rcu for outstanding dirty memory; it should be * especially useful before resizing the root node with PREEMPT_NONE configs; * the value was obtained experimentally, aiming to avoid visible slowdown. */ unsigned int sysctl_fib_sync_mem = 512 * 1024; unsigned int sysctl_fib_sync_mem_min = 64 * 1024; unsigned int sysctl_fib_sync_mem_max = 64 * 1024 * 1024; static struct kmem_cache *fn_alias_kmem __ro_after_init; static struct kmem_cache *trie_leaf_kmem __ro_after_init; static inline struct tnode *tn_info(struct key_vector *kv) { return container_of(kv, struct tnode, kv[0]); } /* caller must hold RTNL */ #define node_parent(tn) rtnl_dereference(tn_info(tn)->parent) #define get_child(tn, i) rtnl_dereference((tn)->tnode[i]) /* caller must hold RCU read lock or RTNL */ #define node_parent_rcu(tn) rcu_dereference_rtnl(tn_info(tn)->parent) #define get_child_rcu(tn, i) rcu_dereference_rtnl((tn)->tnode[i]) /* wrapper for rcu_assign_pointer */ static inline void node_set_parent(struct key_vector *n, struct key_vector *tp) { if (n) rcu_assign_pointer(tn_info(n)->parent, tp); } #define NODE_INIT_PARENT(n, p) RCU_INIT_POINTER(tn_info(n)->parent, p) /* This provides us with the number of children in this node, in the case of a * leaf this will return 0 meaning none of the children are accessible. */ static inline unsigned long child_length(const struct key_vector *tn) { return (1ul << tn->bits) & ~(1ul); } #define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos) static inline unsigned long get_index(t_key key, struct key_vector *kv) { unsigned long index = key ^ kv->key; if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos)) return 0; return index >> kv->pos; } /* To understand this stuff, an understanding of keys and all their bits is * necessary. Every node in the trie has a key associated with it, but not * all of the bits in that key are significant. * * Consider a node 'n' and its parent 'tp'. * * If n is a leaf, every bit in its key is significant. Its presence is * necessitated by path compression, since during a tree traversal (when * searching for a leaf - unless we are doing an insertion) we will completely * ignore all skipped bits we encounter. Thus we need to verify, at the end of * a potentially successful search, that we have indeed been walking the * correct key path. * * Note that we can never "miss" the correct key in the tree if present by * following the wrong path. Path compression ensures that segments of the key * that are the same for all keys with a given prefix are skipped, but the * skipped part *is* identical for each node in the subtrie below the skipped * bit! trie_insert() in this implementation takes care of that. * * if n is an internal node - a 'tnode' here, the various parts of its key * have many different meanings. * * Example: * _________________________________________________________________ * | i | i | i | i | i | i | i | N | N | N | S | S | S | S | S | C | * ----------------------------------------------------------------- * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 * * _________________________________________________________________ * | C | C | C | u | u | u | u | u | u | u | u | u | u | u | u | u | * ----------------------------------------------------------------- * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 * * tp->pos = 22 * tp->bits = 3 * n->pos = 13 * n->bits = 4 * * First, let's just ignore the bits that come before the parent tp, that is * the bits from (tp->pos + tp->bits) to 31. They are *known* but at this * point we do not use them for anything. * * The bits from (tp->pos) to (tp->pos + tp->bits - 1) - "N", above - are the * index into the parent's child array. That is, they will be used to find * 'n' among tp's children. * * The bits from (n->pos + n->bits) to (tp->pos - 1) - "S" - are skipped bits * for the node n. * * All the bits we have seen so far are significant to the node n. The rest * of the bits are really not needed or indeed known in n->key. * * The bits from (n->pos) to (n->pos + n->bits - 1) - "C" - are the index into * n's child array, and will of course be different for each child. * * The rest of the bits, from 0 to (n->pos -1) - "u" - are completely unknown * at this point. */ static const int halve_threshold = 25; static const int inflate_threshold = 50; static const int halve_threshold_root = 15; static const int inflate_threshold_root = 30; static inline void alias_free_mem_rcu(struct fib_alias *fa) { kfree_rcu(fa, rcu); } #define TNODE_VMALLOC_MAX \ ilog2((SIZE_MAX - TNODE_SIZE(0)) / sizeof(struct key_vector *)) static void __node_free_rcu(struct rcu_head *head) { struct tnode *n = container_of(head, struct tnode, rcu); if (!n->tn_bits) kmem_cache_free(trie_leaf_kmem, n); else kvfree(n); } #define node_free(n) call_rcu(&tn_info(n)->rcu, __node_free_rcu) static struct tnode *tnode_alloc(int bits) { size_t size; /* verify bits is within bounds */ if (bits > TNODE_VMALLOC_MAX) return NULL; /* determine size and verify it is non-zero and didn't overflow */ size = TNODE_SIZE(1ul << bits); if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else return vzalloc(size); } static inline void empty_child_inc(struct key_vector *n) { tn_info(n)->empty_children++; if (!tn_info(n)->empty_children) tn_info(n)->full_children++; } static inline void empty_child_dec(struct key_vector *n) { if (!tn_info(n)->empty_children) tn_info(n)->full_children--; tn_info(n)->empty_children--; } static struct key_vector *leaf_new(t_key key, struct fib_alias *fa) { struct key_vector *l; struct tnode *kv; kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL); if (!kv) return NULL; /* initialize key vector */ l = kv->kv; l->key = key; l->pos = 0; l->bits = 0; l->slen = fa->fa_slen; /* link leaf to fib alias */ INIT_HLIST_HEAD(&l->leaf); hlist_add_head(&fa->fa_list, &l->leaf); return l; } static struct key_vector *tnode_new(t_key key, int pos, int bits) { unsigned int shift = pos + bits; struct key_vector *tn; struct tnode *tnode; /* verify bits and pos their msb bits clear and values are valid */ BUG_ON(!bits || (shift > KEYLENGTH)); tnode = tnode_alloc(bits); if (!tnode) return NULL; pr_debug("AT %p s=%zu %zu\n", tnode, TNODE_SIZE(0), sizeof(struct key_vector *) << bits); if (bits == KEYLENGTH) tnode->full_children = 1; else tnode->empty_children = 1ul << bits; tn = tnode->kv; tn->key = (shift < KEYLENGTH) ? (key >> shift) << shift : 0; tn->pos = pos; tn->bits = bits; tn->slen = pos; return tn; } /* Check whether a tnode 'n' is "full", i.e. it is an internal node * and no bits are skipped. See discussion in dyntree paper p. 6 */ static inline int tnode_full(struct key_vector *tn, struct key_vector *n) { return n && ((n->pos + n->bits) == tn->pos) && IS_TNODE(n); } /* Add a child at position i overwriting the old value. * Update the value of full_children and empty_children. */ static void put_child(struct key_vector *tn, unsigned long i, struct key_vector *n) { struct key_vector *chi = get_child(tn, i); int isfull, wasfull; BUG_ON(i >= child_length(tn)); /* update emptyChildren, overflow into fullChildren */ if (!n && chi) empty_child_inc(tn); if (n && !chi) empty_child_dec(tn); /* update fullChildren */ wasfull = tnode_full(tn, chi); isfull = tnode_full(tn, n); if (wasfull && !isfull) tn_info(tn)->full_children--; else if (!wasfull && isfull) tn_info(tn)->full_children++; if (n && (tn->slen < n->slen)) tn->slen = n->slen; rcu_assign_pointer(tn->tnode[i], n); } static void update_children(struct key_vector *tn) { unsigned long i; /* update all of the child parent pointers */ for (i = child_length(tn); i;) { struct key_vector *inode = get_child(tn, --i); if (!inode) continue; /* Either update the children of a tnode that * already belongs to us or update the child * to point to ourselves. */ if (node_parent(inode) == tn) update_children(inode); else node_set_parent(inode, tn); } } static inline void put_child_root(struct key_vector *tp, t_key key, struct key_vector *n) { if (IS_TRIE(tp)) rcu_assign_pointer(tp->tnode[0], n); else put_child(tp, get_index(key, tp), n); } static inline void tnode_free_init(struct key_vector *tn) { tn_info(tn)->rcu.next = NULL; } static inline void tnode_free_append(struct key_vector *tn, struct key_vector *n) { tn_info(n)->rcu.next = tn_info(tn)->rcu.next; tn_info(tn)->rcu.next = &tn_info(n)->rcu; } static void tnode_free(struct key_vector *tn) { struct callback_head *head = &tn_info(tn)->rcu; while (head) { head = head->next; tnode_free_size += TNODE_SIZE(1ul << tn->bits); node_free(tn); tn = container_of(head, struct tnode, rcu)->kv; } if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { tnode_free_size = 0; synchronize_net(); } } static struct key_vector *replace(struct trie *t, struct key_vector *oldtnode, struct key_vector *tn) { struct key_vector *tp = node_parent(oldtnode); unsigned long i; /* setup the parent pointer out of and back into this node */ NODE_INIT_PARENT(tn, tp); put_child_root(tp, tn->key, tn); /* update all of the child parent pointers */ update_children(tn); /* all pointers should be clean so we are done */ tnode_free(oldtnode); /* resize children now that oldtnode is freed */ for (i = child_length(tn); i;) { struct key_vector *inode = get_child(tn, --i); /* resize child node */ if (tnode_full(tn, inode)) tn = resize(t, inode); } return tp; } static struct key_vector *inflate(struct trie *t, struct key_vector *oldtnode) { struct key_vector *tn; unsigned long i; t_key m; pr_debug("In inflate\n"); tn = tnode_new(oldtnode->key, oldtnode->pos - 1, oldtnode->bits + 1); if (!tn) goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); /* Assemble all of the pointers in our cluster, in this case that * represents all of the pointers out of our allocated nodes that * point to existing tnodes and the links between our allocated * nodes. */ for (i = child_length(oldtnode), m = 1u << tn->pos; i;) { struct key_vector *inode = get_child(oldtnode, --i); struct key_vector *node0, *node1; unsigned long j, k; /* An empty child */ if (!inode) continue; /* A leaf or an internal node with skipped bits */ if (!tnode_full(oldtnode, inode)) { put_child(tn, get_index(inode->key, tn), inode); continue; } /* drop the node in the old tnode free list */ tnode_free_append(oldtnode, inode); /* An internal node with two children */ if (inode->bits == 1) { put_child(tn, 2 * i + 1, get_child(inode, 1)); put_child(tn, 2 * i, get_child(inode, 0)); continue; } /* We will replace this node 'inode' with two new * ones, 'node0' and 'node1', each with half of the * original children. The two new nodes will have * a position one bit further down the key and this * means that the "significant" part of their keys * (see the discussion near the top of this file) * will differ by one bit, which will be "0" in * node0's key and "1" in node1's key. Since we are * moving the key position by one step, the bit that * we are moving away from - the bit at position * (tn->pos) - is the one that will differ between * node0 and node1. So... we synthesize that bit in the * two new keys. */ node1 = tnode_new(inode->key | m, inode->pos, inode->bits - 1); if (!node1) goto nomem; node0 = tnode_new(inode->key, inode->pos, inode->bits - 1); tnode_free_append(tn, node1); if (!node0) goto nomem; tnode_free_append(tn, node0); /* populate child pointers in new nodes */ for (k = child_length(inode), j = k / 2; j;) { put_child(node1, --j, get_child(inode, --k)); put_child(node0, j, get_child(inode, j)); put_child(node1, --j, get_child(inode, --k)); put_child(node0, j, get_child(inode, j)); } /* link new nodes to parent */ NODE_INIT_PARENT(node1, tn); NODE_INIT_PARENT(node0, tn); /* link parent to nodes */ put_child(tn, 2 * i + 1, node1); put_child(tn, 2 * i, node0); } /* setup the parent pointers into and out of this node */ return replace(t, oldtnode, tn); nomem: /* all pointers should be clean so we are done */ tnode_free(tn); notnode: return NULL; } static struct key_vector *halve(struct trie *t, struct key_vector *oldtnode) { struct key_vector *tn; unsigned long i; pr_debug("In halve\n"); tn = tnode_new(oldtnode->key, oldtnode->pos + 1, oldtnode->bits - 1); if (!tn) goto notnode; /* prepare oldtnode to be freed */ tnode_free_init(oldtnode); /* Assemble all of the pointers in our cluster, in this case that * represents all of the pointers out of our allocated nodes that * point to existing tnodes and the links between our allocated * nodes. */ for (i = child_length(oldtnode); i;) { struct key_vector *node1 = get_child(oldtnode, --i); struct key_vector *node0 = get_child(oldtnode, --i); struct key_vector *inode; /* At least one of the children is empty */ if (!node1 || !node0) { put_child(tn, i / 2, node1 ? : node0); continue; } /* Two nonempty children */ inode = tnode_new(node0->key, oldtnode->pos, 1); if (!inode) goto nomem; tnode_free_append(tn, inode); /* initialize pointers out of node */ put_child(inode, 1, node1); put_child(inode, 0, node0); NODE_INIT_PARENT(inode, tn); /* link parent to node */ put_child(tn, i / 2, inode); } /* setup the parent pointers into and out of this node */ return replace(t, oldtnode, tn); nomem: /* all pointers should be clean so we are done */ tnode_free(tn); notnode: return NULL; } static struct key_vector *collapse(struct trie *t, struct key_vector *oldtnode) { struct key_vector *n, *tp; unsigned long i; /* scan the tnode looking for that one child that might still exist */ for (n = NULL, i = child_length(oldtnode); !n && i;) n = get_child(oldtnode, --i); /* compress one level */ tp = node_parent(oldtnode); put_child_root(tp, oldtnode->key, n); node_set_parent(n, tp); /* drop dead node */ node_free(oldtnode); return tp; } static unsigned char update_suffix(struct key_vector *tn) { unsigned char slen = tn->pos; unsigned long stride, i; unsigned char slen_max; /* only vector 0 can have a suffix length greater than or equal to * tn->pos + tn->bits, the second highest node will have a suffix * length at most of tn->pos + tn->bits - 1 */ slen_max = min_t(unsigned char, tn->pos + tn->bits - 1, tn->slen); /* search though the list of children looking for nodes that might * have a suffix greater than the one we currently have. This is * why we start with a stride of 2 since a stride of 1 would * represent the nodes with suffix length equal to tn->pos */ for (i = 0, stride = 0x2ul ; i < child_length(tn); i += stride) { struct key_vector *n = get_child(tn, i); if (!n || (n->slen <= slen)) continue; /* update stride and slen based on new value */ stride <<= (n->slen - slen); slen = n->slen; i &= ~(stride - 1); /* stop searching if we have hit the maximum possible value */ if (slen >= slen_max) break; } tn->slen = slen; return slen; } /* From "Implementing a dynamic compressed trie" by Stefan Nilsson of * the Helsinki University of Technology and Matti Tikkanen of Nokia * Telecommunications, page 6: * "A node is doubled if the ratio of non-empty children to all * children in the *doubled* node is at least 'high'." * * 'high' in this instance is the variable 'inflate_threshold'. It * is expressed as a percentage, so we multiply it with * child_length() and instead of multiplying by 2 (since the * child array will be doubled by inflate()) and multiplying * the left-hand side by 100 (to handle the percentage thing) we * multiply the left-hand side by 50. * * The left-hand side may look a bit weird: child_length(tn) * - tn->empty_children is of course the number of non-null children * in the current node. tn->full_children is the number of "full" * children, that is non-null tnodes with a skip value of 0. * All of those will be doubled in the resulting inflated tnode, so * we just count them one extra time here. * * A clearer way to write this would be: * * to_be_doubled = tn->full_children; * not_to_be_doubled = child_length(tn) - tn->empty_children - * tn->full_children; * * new_child_length = child_length(tn) * 2; * * new_fill_factor = 100 * (not_to_be_doubled + 2*to_be_doubled) / * new_child_length; * if (new_fill_factor >= inflate_threshold) * * ...and so on, tho it would mess up the while () loop. * * anyway, * 100 * (not_to_be_doubled + 2*to_be_doubled) / new_child_length >= * inflate_threshold * * avoid a division: * 100 * (not_to_be_doubled + 2*to_be_doubled) >= * inflate_threshold * new_child_length * * expand not_to_be_doubled and to_be_doubled, and shorten: * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= inflate_threshold * new_child_length * * expand new_child_length: * 100 * (child_length(tn) - tn->empty_children + * tn->full_children) >= * inflate_threshold * child_length(tn) * 2 * * shorten again: * 50 * (tn->full_children + child_length(tn) - * tn->empty_children) >= inflate_threshold * * child_length(tn) * */ static inline bool should_inflate(struct key_vector *tp, struct key_vector *tn) { unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ threshold *= IS_TRIE(tp) ? inflate_threshold_root : inflate_threshold; used -= tn_info(tn)->empty_children; used += tn_info(tn)->full_children; /* if bits == KEYLENGTH then pos = 0, and will fail below */ return (used > 1) && tn->pos && ((50 * used) >= threshold); } static inline bool should_halve(struct key_vector *tp, struct key_vector *tn) { unsigned long used = child_length(tn); unsigned long threshold = used; /* Keep root node larger */ threshold *= IS_TRIE(tp) ? halve_threshold_root : halve_threshold; used -= tn_info(tn)->empty_children; /* if bits == KEYLENGTH then used = 100% on wrap, and will fail below */ return (used > 1) && (tn->bits > 1) && ((100 * used) < threshold); } static inline bool should_collapse(struct key_vector *tn) { unsigned long used = child_length(tn); used -= tn_info(tn)->empty_children; /* account for bits == KEYLENGTH case */ if ((tn->bits == KEYLENGTH) && tn_info(tn)->full_children) used -= KEY_MAX; /* One child or none, time to drop us from the trie */ return used < 2; } #define MAX_WORK 10 static struct key_vector *resize(struct trie *t, struct key_vector *tn) { #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats = t->stats; #endif struct key_vector *tp = node_parent(tn); unsigned long cindex = get_index(tn->key, tp); int max_work = MAX_WORK; pr_debug("In tnode_resize %p inflate_threshold=%d threshold=%d\n", tn, inflate_threshold, halve_threshold); /* track the tnode via the pointer from the parent instead of * doing it ourselves. This way we can let RCU fully do its * thing without us interfering */ BUG_ON(tn != get_child(tp, cindex)); /* Double as long as the resulting node has a number of * nonempty nodes that are above the threshold. */ while (should_inflate(tp, tn) && max_work) { tp = inflate(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; tn = get_child(tp, cindex); } /* update parent in case inflate failed */ tp = node_parent(tn); /* Return if at least one inflate is run */ if (max_work != MAX_WORK) return tp; /* Halve as long as the number of empty children in this * node is above threshold. */ while (should_halve(tp, tn) && max_work) { tp = halve(t, tn); if (!tp) { #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->resize_node_skipped); #endif break; } max_work--; tn = get_child(tp, cindex); } /* Only one child remains */ if (should_collapse(tn)) return collapse(t, tn); /* update parent in case halve failed */ return node_parent(tn); } static void node_pull_suffix(struct key_vector *tn, unsigned char slen) { unsigned char node_slen = tn->slen; while ((node_slen > tn->pos) && (node_slen > slen)) { slen = update_suffix(tn); if (node_slen == slen) break; tn = node_parent(tn); node_slen = tn->slen; } } static void node_push_suffix(struct key_vector *tn, unsigned char slen) { while (tn->slen < slen) { tn->slen = slen; tn = node_parent(tn); } } /* rcu_read_lock needs to be hold by caller from readside */ static struct key_vector *fib_find_node(struct trie *t, struct key_vector **tp, u32 key) { struct key_vector *pn, *n = t->kv; unsigned long index = 0; do { pn = n; n = get_child_rcu(n, index); if (!n) break; index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the bits in the cindex. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex * * This check is safe even if bits == KEYLENGTH due to the * fact that we can only allocate a node with 32 bits if a * long is greater than 32 bits. */ if (index >= (1ul << n->bits)) { n = NULL; break; } /* keep searching until we find a perfect match leaf or NULL */ } while (IS_TNODE(n)); *tp = pn; return n; } /* Return the first fib alias matching DSCP with * priority less than or equal to PRIO. * If 'find_first' is set, return the first matching * fib alias, regardless of DSCP and priority. */ static struct fib_alias *fib_find_alias(struct hlist_head *fah, u8 slen, dscp_t dscp, u32 prio, u32 tb_id, bool find_first) { struct fib_alias *fa; if (!fah) return NULL; hlist_for_each_entry(fa, fah, fa_list) { /* Avoid Sparse warning when using dscp_t in inequalities */ u8 __fa_dscp = inet_dscp_to_dsfield(fa->fa_dscp); u8 __dscp = inet_dscp_to_dsfield(dscp); if (fa->fa_slen < slen) continue; if (fa->fa_slen != slen) break; if (fa->tb_id > tb_id) continue; if (fa->tb_id != tb_id) break; if (find_first) return fa; if (__fa_dscp > __dscp) continue; if (fa->fa_info->fib_priority >= prio || __fa_dscp < __dscp) return fa; } return NULL; } static struct fib_alias * fib_find_matching_alias(struct net *net, const struct fib_rt_info *fri) { u8 slen = KEYLENGTH - fri->dst_len; struct key_vector *l, *tp; struct fib_table *tb; struct fib_alias *fa; struct trie *t; tb = fib_get_table(net, fri->tb_id); if (!tb) return NULL; t = (struct trie *)tb->tb_data; l = fib_find_node(t, &tp, be32_to_cpu(fri->dst)); if (!l) return NULL; hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { if (fa->fa_slen == slen && fa->tb_id == fri->tb_id && fa->fa_dscp == fri->dscp && fa->fa_info == fri->fi && fa->fa_type == fri->type) return fa; } return NULL; } void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri) { u8 fib_notify_on_flag_change; struct fib_alias *fa_match; struct sk_buff *skb; int err; rcu_read_lock(); fa_match = fib_find_matching_alias(net, fri); if (!fa_match) goto out; /* These are paired with the WRITE_ONCE() happening in this function. * The reason is that we are only protected by RCU at this point. */ if (READ_ONCE(fa_match->offload) == fri->offload && READ_ONCE(fa_match->trap) == fri->trap && READ_ONCE(fa_match->offload_failed) == fri->offload_failed) goto out; WRITE_ONCE(fa_match->offload, fri->offload); WRITE_ONCE(fa_match->trap, fri->trap); fib_notify_on_flag_change = READ_ONCE(net->ipv4.sysctl_fib_notify_on_flag_change); /* 2 means send notifications only if offload_failed was changed. */ if (fib_notify_on_flag_change == 2 && READ_ONCE(fa_match->offload_failed) == fri->offload_failed) goto out; WRITE_ONCE(fa_match->offload_failed, fri->offload_failed); if (!fib_notify_on_flag_change) goto out; skb = nlmsg_new(fib_nlmsg_size(fa_match->fa_info), GFP_ATOMIC); if (!skb) { err = -ENOBUFS; goto errout; } err = fib_dump_info(skb, 0, 0, RTM_NEWROUTE, fri, 0); if (err < 0) { /* -EMSGSIZE implies BUG in fib_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_IPV4_ROUTE, NULL, GFP_ATOMIC); goto out; errout: rtnl_set_sk_err(net, RTNLGRP_IPV4_ROUTE, err); out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(fib_alias_hw_flags_set); static void trie_rebalance(struct trie *t, struct key_vector *tn) { while (!IS_TRIE(tn)) tn = resize(t, tn); } static int fib_insert_node(struct trie *t, struct key_vector *tp, struct fib_alias *new, t_key key) { struct key_vector *n, *l; l = leaf_new(key, new); if (!l) goto noleaf; /* retrieve child from parent node */ n = get_child(tp, get_index(key, tp)); /* Case 2: n is a LEAF or a TNODE and the key doesn't match. * * Add a new tnode here * first tnode need some special handling * leaves us in position for handling as case 3 */ if (n) { struct key_vector *tn; tn = tnode_new(key, __fls(key ^ n->key), 1); if (!tn) goto notnode; /* initialize routes out of node */ NODE_INIT_PARENT(tn, tp); put_child(tn, get_index(key, tn) ^ 1, n); /* start adding routes into the node */ put_child_root(tp, key, tn); node_set_parent(n, tn); /* parent now has a NULL spot where the leaf can go */ tp = tn; } /* Case 3: n is NULL, and will just insert a new leaf */ node_push_suffix(tp, new->fa_slen); NODE_INIT_PARENT(l, tp); put_child_root(tp, key, l); trie_rebalance(t, tp); return 0; notnode: node_free(l); noleaf: return -ENOMEM; } static int fib_insert_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *new, struct fib_alias *fa, t_key key) { if (!l) return fib_insert_node(t, tp, new, key); if (fa) { hlist_add_before_rcu(&new->fa_list, &fa->fa_list); } else { struct fib_alias *last; hlist_for_each_entry(last, &l->leaf, fa_list) { if (new->fa_slen < last->fa_slen) break; if ((new->fa_slen == last->fa_slen) && (new->tb_id > last->tb_id)) break; fa = last; } if (fa) hlist_add_behind_rcu(&new->fa_list, &fa->fa_list); else hlist_add_head_rcu(&new->fa_list, &l->leaf); } /* if we added to the tail node then we need to update slen */ if (l->slen < new->fa_slen) { l->slen = new->fa_slen; node_push_suffix(tp, new->fa_slen); } return 0; } static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old); /* Caller must hold RTNL. */ int fib_table_insert(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *)tb->tb_data; struct fib_alias *fa, *new_fa; struct key_vector *l, *tp; u16 nlflags = NLM_F_EXCL; struct fib_info *fi; u8 plen = cfg->fc_dst_len; u8 slen = KEYLENGTH - plen; dscp_t dscp; u32 key; int err; key = ntohl(cfg->fc_dst); pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); fi = fib_create_info(cfg, extack); if (IS_ERR(fi)) { err = PTR_ERR(fi); goto err; } dscp = cfg->fc_dscp; l = fib_find_node(t, &tp, key); fa = l ? fib_find_alias(&l->leaf, slen, dscp, fi->fib_priority, tb->tb_id, false) : NULL; /* Now fa, if non-NULL, points to the first fib alias * with the same keys [prefix,dscp,priority], if such key already * exists or to the node before which we will insert new one. * * If fa is NULL, we will need to allocate a new one and * insert to the tail of the section matching the suffix length * of the new alias. */ if (fa && fa->fa_dscp == dscp && fa->fa_info->fib_priority == fi->fib_priority) { struct fib_alias *fa_first, *fa_match; err = -EEXIST; if (cfg->fc_nlflags & NLM_F_EXCL) goto out; nlflags &= ~NLM_F_EXCL; /* We have 2 goals: * 1. Find exact match for type, scope, fib_info to avoid * duplicate routes * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it */ fa_match = NULL; fa_first = fa; hlist_for_each_entry_from(fa, fa_list) { if ((fa->fa_slen != slen) || (fa->tb_id != tb->tb_id) || (fa->fa_dscp != dscp)) break; if (fa->fa_info->fib_priority != fi->fib_priority) break; if (fa->fa_type == cfg->fc_type && fa->fa_info == fi) { fa_match = fa; break; } } if (cfg->fc_nlflags & NLM_F_REPLACE) { struct fib_info *fi_drop; u8 state; nlflags |= NLM_F_REPLACE; fa = fa_first; if (fa_match) { if (fa == fa_match) err = 0; goto out; } err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; fi_drop = fa->fa_info; new_fa->fa_dscp = fa->fa_dscp; new_fa->fa_info = fi; new_fa->fa_type = cfg->fc_type; state = fa->fa_state; new_fa->fa_state = state & ~FA_S_ACCESSED; new_fa->fa_slen = fa->fa_slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; new_fa->offload = 0; new_fa->trap = 0; new_fa->offload_failed = 0; hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list); if (fib_find_alias(&l->leaf, fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { enum fib_event_type fib_event; fib_event = FIB_EVENT_ENTRY_REPLACE; err = call_fib_entry_notifiers(net, fib_event, key, plen, new_fa, extack); if (err) { hlist_replace_rcu(&new_fa->fa_list, &fa->fa_list); goto out_free_new_fa; } } rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, nlflags); alias_free_mem_rcu(fa); fib_release_info(fi_drop); if (state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); goto succeeded; } /* Error if we find a perfect match which * uses the same scope, type, and nexthop * information. */ if (fa_match) goto out; if (cfg->fc_nlflags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; else fa = fa_first; } err = -ENOENT; if (!(cfg->fc_nlflags & NLM_F_CREATE)) goto out; nlflags |= NLM_F_CREATE; err = -ENOBUFS; new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; new_fa->fa_info = fi; new_fa->fa_dscp = dscp; new_fa->fa_type = cfg->fc_type; new_fa->fa_state = 0; new_fa->fa_slen = slen; new_fa->tb_id = tb->tb_id; new_fa->fa_default = -1; new_fa->offload = 0; new_fa->trap = 0; new_fa->offload_failed = 0; /* Insert new entry to the list. */ err = fib_insert_alias(t, tp, l, new_fa, fa, key); if (err) goto out_free_new_fa; /* The alias was already inserted, so the node must exist. */ l = l ? l : fib_find_node(t, &tp, key); if (WARN_ON_ONCE(!l)) { err = -ENOENT; goto out_free_new_fa; } if (fib_find_alias(&l->leaf, new_fa->fa_slen, 0, 0, tb->tb_id, true) == new_fa) { enum fib_event_type fib_event; fib_event = FIB_EVENT_ENTRY_REPLACE; err = call_fib_entry_notifiers(net, fib_event, key, plen, new_fa, extack); if (err) goto out_remove_new_fa; } if (!plen) tb->tb_num_default++; rt_cache_flush(cfg->fc_nlinfo.nl_net); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id, &cfg->fc_nlinfo, nlflags); succeeded: return 0; out_remove_new_fa: fib_remove_alias(t, tp, l, new_fa); out_free_new_fa: kmem_cache_free(fn_alias_kmem, new_fa); out: fib_release_info(fi); err: return err; } static inline t_key prefix_mismatch(t_key key, struct key_vector *n) { t_key prefix = n->key; return (key ^ prefix) & (prefix | -prefix); } bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, const struct flowi4 *flp) { if (nhc->nhc_flags & RTNH_F_DEAD) return false; if (ip_ignore_linkdown(nhc->nhc_dev) && nhc->nhc_flags & RTNH_F_LINKDOWN && !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) return false; if (flp->flowi4_oif && flp->flowi4_oif != nhc->nhc_oif) return false; return true; } /* should be called with rcu_read_lock */ int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, struct fib_result *res, int fib_flags) { struct trie *t = (struct trie *) tb->tb_data; #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie_use_stats __percpu *stats = t->stats; #endif const t_key key = ntohl(flp->daddr); struct key_vector *n, *pn; struct fib_alias *fa; unsigned long index; t_key cindex; pn = t->kv; cindex = 0; n = get_child_rcu(pn, cindex); if (!n) { trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN); return -EAGAIN; } #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->gets); #endif /* Step 1: Travel to the longest prefix match in the trie */ for (;;) { index = get_cindex(key, n); /* This bit of code is a bit tricky but it combines multiple * checks into a single check. The prefix consists of the * prefix plus zeros for the "bits" in the prefix. The index * is the difference between the key and this value. From * this we can actually derive several pieces of data. * if (index >= (1ul << bits)) * we have a mismatch in skip bits and failed * else * we know the value is cindex * * This check is safe even if bits == KEYLENGTH due to the * fact that we can only allocate a node with 32 bits if a * long is greater than 32 bits. */ if (index >= (1ul << n->bits)) break; /* we have found a leaf. Prefixes have already been compared */ if (IS_LEAF(n)) goto found; /* only record pn and cindex if we are going to be chopping * bits later. Otherwise we are just wasting cycles. */ if (n->slen > n->pos) { pn = n; cindex = index; } n = get_child_rcu(n, index); if (unlikely(!n)) goto backtrace; } /* Step 2: Sort out leaves and begin backtracing for longest prefix */ for (;;) { /* record the pointer where our next node pointer is stored */ struct key_vector __rcu **cptr = n->tnode; /* This test verifies that none of the bits that differ * between the key and the prefix exist in the region of * the lsb and higher in the prefix. */ if (unlikely(prefix_mismatch(key, n)) || (n->slen == n->pos)) goto backtrace; /* exit out and process leaf */ if (unlikely(IS_LEAF(n))) break; /* Don't bother recording parent info. Since we are in * prefix match mode we will have to come back to wherever * we started this traversal anyway */ while ((n = rcu_dereference(*cptr)) == NULL) { backtrace: #ifdef CONFIG_IP_FIB_TRIE_STATS if (!n) this_cpu_inc(stats->null_node_hit); #endif /* If we are at cindex 0 there are no more bits for * us to strip at this level so we must ascend back * up one level to see if there are any more bits to * be stripped there. */ while (!cindex) { t_key pkey = pn->key; /* If we don't have a parent then there is * nothing for us to do as we do not have any * further nodes to parse. */ if (IS_TRIE(pn)) { trace_fib_table_lookup(tb->tb_id, flp, NULL, -EAGAIN); return -EAGAIN; } #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->backtrack); #endif /* Get Child's index */ pn = node_parent_rcu(pn); cindex = get_index(pkey, pn); } /* strip the least significant bit from the cindex */ cindex &= cindex - 1; /* grab pointer for next child node */ cptr = &pn->tnode[cindex]; } } found: /* this line carries forward the xor from earlier in the function */ index = key ^ n->key; /* Step 3: Process the leaf, if that fails fall back to backtracing */ hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; struct fib_nh_common *nhc; int nhsel, err; if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { if (index >= (1ul << fa->fa_slen)) continue; } if (fa->fa_dscp && !fib_dscp_masked_match(fa->fa_dscp, flp)) continue; /* Paired with WRITE_ONCE() in fib_release_info() */ if (READ_ONCE(fi->fib_dead)) continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; fib_alias_accessed(fa); err = fib_props[fa->fa_type].error; if (unlikely(err < 0)) { out_reject: #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_passed); #endif trace_fib_table_lookup(tb->tb_id, flp, NULL, err); return err; } if (fi->fib_flags & RTNH_F_DEAD) continue; if (unlikely(fi->nh)) { if (nexthop_is_blackhole(fi->nh)) { err = fib_props[RTN_BLACKHOLE].error; goto out_reject; } nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, &nhsel); if (nhc) goto set_result; goto miss; } for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { nhc = fib_info_nhc(fi, nhsel); if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) continue; set_result: if (!(fib_flags & FIB_LOOKUP_NOREF)) refcount_inc(&fi->fib_clntref); res->prefix = htonl(n->key); res->prefixlen = KEYLENGTH - fa->fa_slen; res->nh_sel = nhsel; res->nhc = nhc; res->type = fa->fa_type; res->scope = fi->fib_scope; res->dscp = fa->fa_dscp; res->fi = fi; res->table = tb; res->fa_head = &n->leaf; #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_passed); #endif trace_fib_table_lookup(tb->tb_id, flp, nhc, err); return err; } } miss: #ifdef CONFIG_IP_FIB_TRIE_STATS this_cpu_inc(stats->semantic_match_miss); #endif goto backtrace; } EXPORT_SYMBOL_GPL(fib_table_lookup); static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old) { /* record the location of the previous list_info entry */ struct hlist_node **pprev = old->fa_list.pprev; struct fib_alias *fa = hlist_entry(pprev, typeof(*fa), fa_list.next); /* remove the fib_alias from the list */ hlist_del_rcu(&old->fa_list); /* if we emptied the list this leaf will be freed and we can sort * out parent suffix lengths as a part of trie_rebalance */ if (hlist_empty(&l->leaf)) { if (tp->slen == l->slen) node_pull_suffix(tp, tp->pos); put_child_root(tp, l->key, NULL); node_free(l); trie_rebalance(t, tp); return; } /* only access fa if it is pointing at the last valid hlist_node */ if (*pprev) return; /* update the trie with the latest suffix length */ l->slen = fa->fa_slen; node_pull_suffix(tp, fa->fa_slen); } static void fib_notify_alias_delete(struct net *net, u32 key, struct hlist_head *fah, struct fib_alias *fa_to_delete, struct netlink_ext_ack *extack) { struct fib_alias *fa_next, *fa_to_notify; u32 tb_id = fa_to_delete->tb_id; u8 slen = fa_to_delete->fa_slen; enum fib_event_type fib_event; /* Do not notify if we do not care about the route. */ if (fib_find_alias(fah, slen, 0, 0, tb_id, true) != fa_to_delete) return; /* Determine if the route should be replaced by the next route in the * list. */ fa_next = hlist_entry_safe(fa_to_delete->fa_list.next, struct fib_alias, fa_list); if (fa_next && fa_next->fa_slen == slen && fa_next->tb_id == tb_id) { fib_event = FIB_EVENT_ENTRY_REPLACE; fa_to_notify = fa_next; } else { fib_event = FIB_EVENT_ENTRY_DEL; fa_to_notify = fa_to_delete; } call_fib_entry_notifiers(net, fib_event, key, KEYLENGTH - slen, fa_to_notify, extack); } /* Caller must hold RTNL. */ int fib_table_delete(struct net *net, struct fib_table *tb, struct fib_config *cfg, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *) tb->tb_data; struct fib_alias *fa, *fa_to_delete; struct key_vector *l, *tp; u8 plen = cfg->fc_dst_len; u8 slen = KEYLENGTH - plen; dscp_t dscp; u32 key; key = ntohl(cfg->fc_dst); l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; dscp = cfg->fc_dscp; fa = fib_find_alias(&l->leaf, slen, dscp, 0, tb->tb_id, false); if (!fa) return -ESRCH; pr_debug("Deleting %08x/%d dsfield=0x%02x t=%p\n", key, plen, inet_dscp_to_dsfield(dscp), t); fa_to_delete = NULL; hlist_for_each_entry_from(fa, fa_list) { struct fib_info *fi = fa->fa_info; if ((fa->fa_slen != slen) || (fa->tb_id != tb->tb_id) || (fa->fa_dscp != dscp)) break; if ((!cfg->fc_type || fa->fa_type == cfg->fc_type) && (cfg->fc_scope == RT_SCOPE_NOWHERE || fa->fa_info->fib_scope == cfg->fc_scope) && (!cfg->fc_prefsrc || fi->fib_prefsrc == cfg->fc_prefsrc) && (!cfg->fc_protocol || fi->fib_protocol == cfg->fc_protocol) && fib_nh_match(net, cfg, fi, extack) == 0 && fib_metrics_match(cfg, fi)) { fa_to_delete = fa; break; } } if (!fa_to_delete) return -ESRCH; fib_notify_alias_delete(net, key, &l->leaf, fa_to_delete, extack); rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id, &cfg->fc_nlinfo, 0); if (!plen) tb->tb_num_default--; fib_remove_alias(t, tp, l, fa_to_delete); if (fa_to_delete->fa_state & FA_S_ACCESSED) rt_cache_flush(cfg->fc_nlinfo.nl_net); fib_release_info(fa_to_delete->fa_info); alias_free_mem_rcu(fa_to_delete); return 0; } /* Scan for the next leaf starting at the provided key value */ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key) { struct key_vector *pn, *n = *tn; unsigned long cindex; /* this loop is meant to try and find the key in the trie */ do { /* record parent and next child index */ pn = n; cindex = (key > pn->key) ? get_index(key, pn) : 0; if (cindex >> pn->bits) break; /* descend into the next child */ n = get_child_rcu(pn, cindex++); if (!n) break; /* guarantee forward progress on the keys */ if (IS_LEAF(n) && (n->key >= key)) goto found; } while (IS_TNODE(n)); /* this loop will search for the next leaf with a greater key */ while (!IS_TRIE(pn)) { /* if we exhausted the parent node we will need to climb */ if (cindex >= (1ul << pn->bits)) { t_key pkey = pn->key; pn = node_parent_rcu(pn); cindex = get_index(pkey, pn) + 1; continue; } /* grab the next available node */ n = get_child_rcu(pn, cindex++); if (!n) continue; /* no need to compare keys since we bumped the index */ if (IS_LEAF(n)) goto found; /* Rescan start scanning in new node */ pn = n; cindex = 0; } *tn = pn; return NULL; /* Root of trie */ found: /* if we are at the limit for keys just return NULL for the tnode */ *tn = pn; return n; } static void fib_trie_free(struct fib_table *tb) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; unsigned long cindex = 1; struct hlist_node *tmp; struct fib_alias *fa; /* walk trie in reverse order and free everything */ for (;;) { struct key_vector *n; if (!(cindex--)) { t_key pkey = pn->key; if (IS_TRIE(pn)) break; n = pn; pn = node_parent(pn); /* drop emptied tnode */ put_child_root(pn, n->key, NULL); node_free(n); cindex = get_index(pkey, pn); continue; } /* grab the next available node */ n = get_child(pn, cindex); if (!n) continue; if (IS_TNODE(n)) { /* record pn and cindex for leaf walking */ pn = n; cindex = 1ul << n->bits; continue; } hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { hlist_del_rcu(&fa->fa_list); alias_free_mem_rcu(fa); } put_child_root(pn, n->key, NULL); node_free(n); } #ifdef CONFIG_IP_FIB_TRIE_STATS free_percpu(t->stats); #endif kfree(tb); } struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) { struct trie *ot = (struct trie *)oldtb->tb_data; struct key_vector *l, *tp = ot->kv; struct fib_table *local_tb; struct fib_alias *fa; struct trie *lt; t_key key = 0; if (oldtb->tb_data == oldtb->__data) return oldtb; local_tb = fib_trie_table(RT_TABLE_LOCAL, NULL); if (!local_tb) return NULL; lt = (struct trie *)local_tb->tb_data; while ((l = leaf_walk_rcu(&tp, key)) != NULL) { struct key_vector *local_l = NULL, *local_tp; hlist_for_each_entry(fa, &l->leaf, fa_list) { struct fib_alias *new_fa; if (local_tb->tb_id != fa->tb_id) continue; /* clone fa for new local table */ new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (!new_fa) goto out; memcpy(new_fa, fa, sizeof(*fa)); /* insert clone into table */ if (!local_l) local_l = fib_find_node(lt, &local_tp, l->key); if (fib_insert_alias(lt, local_tp, local_l, new_fa, NULL, l->key)) { kmem_cache_free(fn_alias_kmem, new_fa); goto out; } } /* stop loop if key wrapped back to 0 */ key = l->key + 1; if (key < l->key) break; } return local_tb; out: fib_trie_free(local_tb); return NULL; } /* Caller must hold RTNL */ void fib_table_flush_external(struct fib_table *tb) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; unsigned long cindex = 1; struct hlist_node *tmp; struct fib_alias *fa; /* walk trie in reverse order */ for (;;) { unsigned char slen = 0; struct key_vector *n; if (!(cindex--)) { t_key pkey = pn->key; /* cannot resize the trie vector */ if (IS_TRIE(pn)) break; /* update the suffix to address pulled leaves */ if (pn->slen > pn->pos) update_suffix(pn); /* resize completed node */ pn = resize(t, pn); cindex = get_index(pkey, pn); continue; } /* grab the next available node */ n = get_child(pn, cindex); if (!n) continue; if (IS_TNODE(n)) { /* record pn and cindex for leaf walking */ pn = n; cindex = 1ul << n->bits; continue; } hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { /* if alias was cloned to local then we just * need to remove the local copy from main */ if (tb->tb_id != fa->tb_id) { hlist_del_rcu(&fa->fa_list); alias_free_mem_rcu(fa); continue; } /* record local slen */ slen = fa->fa_slen; } /* update leaf slen */ n->slen = slen; if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); } } } /* Caller must hold RTNL. */ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) { struct trie *t = (struct trie *)tb->tb_data; struct nl_info info = { .nl_net = net }; struct key_vector *pn = t->kv; unsigned long cindex = 1; struct hlist_node *tmp; struct fib_alias *fa; int found = 0; /* walk trie in reverse order */ for (;;) { unsigned char slen = 0; struct key_vector *n; if (!(cindex--)) { t_key pkey = pn->key; /* cannot resize the trie vector */ if (IS_TRIE(pn)) break; /* update the suffix to address pulled leaves */ if (pn->slen > pn->pos) update_suffix(pn); /* resize completed node */ pn = resize(t, pn); cindex = get_index(pkey, pn); continue; } /* grab the next available node */ n = get_child(pn, cindex); if (!n) continue; if (IS_TNODE(n)) { /* record pn and cindex for leaf walking */ pn = n; cindex = 1ul << n->bits; continue; } hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; if (!fi || tb->tb_id != fa->tb_id || (!(fi->fib_flags & RTNH_F_DEAD) && !fib_props[fa->fa_type].error)) { slen = fa->fa_slen; continue; } /* When not flushing the entire table, skip error * routes that are not marked for deletion. */ if (!flush_all && fib_props[fa->fa_type].error && !(fi->fib_flags & RTNH_F_DEAD)) { slen = fa->fa_slen; continue; } fib_notify_alias_delete(net, n->key, &n->leaf, fa, NULL); if (fi->pfsrc_removed) rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa, KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0); hlist_del_rcu(&fa->fa_list); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); found++; } /* update leaf slen */ n->slen = slen; if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); } } pr_debug("trie_flush found=%d\n", found); return found; } /* derived from fib_trie_free */ static void __fib_info_notify_update(struct net *net, struct fib_table *tb, struct nl_info *info) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *pn = t->kv; unsigned long cindex = 1; struct fib_alias *fa; for (;;) { struct key_vector *n; if (!(cindex--)) { t_key pkey = pn->key; if (IS_TRIE(pn)) break; pn = node_parent(pn); cindex = get_index(pkey, pn); continue; } /* grab the next available node */ n = get_child(pn, cindex); if (!n) continue; if (IS_TNODE(n)) { /* record pn and cindex for leaf walking */ pn = n; cindex = 1ul << n->bits; continue; } hlist_for_each_entry(fa, &n->leaf, fa_list) { struct fib_info *fi = fa->fa_info; if (!fi || !fi->nh_updated || fa->tb_id != tb->tb_id) continue; rtmsg_fib(RTM_NEWROUTE, htonl(n->key), fa, KEYLENGTH - fa->fa_slen, tb->tb_id, info, NLM_F_REPLACE); } } } void fib_info_notify_update(struct net *net, struct nl_info *info) { unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist, lockdep_rtnl_is_held()) __fib_info_notify_update(net, tb, info); } } static int fib_leaf_notify(struct key_vector *l, struct fib_table *tb, struct notifier_block *nb, struct netlink_ext_ack *extack) { struct fib_alias *fa; int last_slen = -1; int err; hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { struct fib_info *fi = fa->fa_info; if (!fi) continue; /* local and main table can share the same trie, * so don't notify twice for the same entry. */ if (tb->tb_id != fa->tb_id) continue; if (fa->fa_slen == last_slen) continue; last_slen = fa->fa_slen; err = call_fib_entry_notifier(nb, FIB_EVENT_ENTRY_REPLACE, l->key, KEYLENGTH - fa->fa_slen, fa, extack); if (err) return err; } return 0; } static int fib_table_notify(struct fib_table *tb, struct notifier_block *nb, struct netlink_ext_ack *extack) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *l, *tp = t->kv; t_key key = 0; int err; while ((l = leaf_walk_rcu(&tp, key)) != NULL) { err = fib_leaf_notify(l, tb, nb, extack); if (err) return err; key = l->key + 1; /* stop in case of wrap around */ if (key < l->key) break; } return 0; } int fib_notify(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { unsigned int h; int err; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist) { err = fib_table_notify(tb, nb, extack); if (err) return err; } } return 0; } static void __trie_free_rcu(struct rcu_head *head) { struct fib_table *tb = container_of(head, struct fib_table, rcu); #ifdef CONFIG_IP_FIB_TRIE_STATS struct trie *t = (struct trie *)tb->tb_data; if (tb->tb_data == tb->__data) free_percpu(t->stats); #endif /* CONFIG_IP_FIB_TRIE_STATS */ kfree(tb); } void fib_free_table(struct fib_table *tb) { call_rcu(&tb->rcu, __trie_free_rcu); } static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb, struct fib_dump_filter *filter) { unsigned int flags = NLM_F_MULTI; __be32 xkey = htonl(l->key); int i, s_i, i_fa, s_fa, err; struct fib_alias *fa; if (filter->filter_set || !filter->dump_exceptions || !filter->dump_routes) flags |= NLM_F_DUMP_FILTERED; s_i = cb->args[4]; s_fa = cb->args[5]; i = 0; /* rcu_read_lock is hold by caller */ hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { struct fib_info *fi = fa->fa_info; if (i < s_i) goto next; i_fa = 0; if (tb->tb_id != fa->tb_id) goto next; if (filter->filter_set) { if (filter->rt_type && fa->fa_type != filter->rt_type) goto next; if ((filter->protocol && fi->fib_protocol != filter->protocol)) goto next; if (filter->dev && !fib_info_nh_uses_dev(fi, filter->dev)) goto next; } if (filter->dump_routes) { if (!s_fa) { struct fib_rt_info fri; fri.fi = fi; fri.tb_id = tb->tb_id; fri.dst = xkey; fri.dst_len = KEYLENGTH - fa->fa_slen; fri.dscp = fa->fa_dscp; fri.type = fa->fa_type; fri.offload = READ_ONCE(fa->offload); fri.trap = READ_ONCE(fa->trap); fri.offload_failed = READ_ONCE(fa->offload_failed); err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWROUTE, &fri, flags); if (err < 0) goto stop; } i_fa++; } if (filter->dump_exceptions) { err = fib_dump_info_fnhe(skb, cb, tb->tb_id, fi, &i_fa, s_fa, flags); if (err < 0) goto stop; } next: i++; } cb->args[4] = i; return skb->len; stop: cb->args[4] = i; cb->args[5] = i_fa; return err; } /* rcu_read_lock needs to be hold by caller from readside */ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb, struct netlink_callback *cb, struct fib_dump_filter *filter) { struct trie *t = (struct trie *)tb->tb_data; struct key_vector *l, *tp = t->kv; /* Dump starting at last key. * Note: 0.0.0.0/0 (ie default) is first key. */ int count = cb->args[2]; t_key key = cb->args[3]; /* First time here, count and key are both always 0. Count > 0 * and key == 0 means the dump has wrapped around and we are done. */ if (count && !key) return 0; while ((l = leaf_walk_rcu(&tp, key)) != NULL) { int err; err = fn_trie_dump_leaf(l, tb, skb, cb, filter); if (err < 0) { cb->args[3] = key; cb->args[2] = count; return err; } ++count; key = l->key + 1; memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0])); /* stop loop if key wrapped back to 0 */ if (key < l->key) break; } cb->args[3] = key; cb->args[2] = count; return 0; } void __init fib_trie_init(void) { fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias), 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); trie_leaf_kmem = kmem_cache_create("ip_fib_trie", LEAF_SIZE, 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); } struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) { struct fib_table *tb; struct trie *t; size_t sz = sizeof(*tb); if (!alias) sz += sizeof(struct trie); tb = kzalloc(sz, GFP_KERNEL); if (!tb) return NULL; tb->tb_id = id; tb->tb_num_default = 0; tb->tb_data = (alias ? alias->__data : tb->__data); if (alias) return tb; t = (struct trie *) tb->tb_data; t->kv[0].pos = KEYLENGTH; t->kv[0].slen = KEYLENGTH; #ifdef CONFIG_IP_FIB_TRIE_STATS t->stats = alloc_percpu(struct trie_use_stats); if (!t->stats) { kfree(tb); tb = NULL; } #endif return tb; } #ifdef CONFIG_PROC_FS /* Depth first Trie walk iterator */ struct fib_trie_iter { struct seq_net_private p; struct fib_table *tb; struct key_vector *tnode; unsigned int index; unsigned int depth; }; static struct key_vector *fib_trie_get_next(struct fib_trie_iter *iter) { unsigned long cindex = iter->index; struct key_vector *pn = iter->tnode; t_key pkey; pr_debug("get_next iter={node=%p index=%d depth=%d}\n", iter->tnode, iter->index, iter->depth); while (!IS_TRIE(pn)) { while (cindex < child_length(pn)) { struct key_vector *n = get_child_rcu(pn, cindex++); if (!n) continue; if (IS_LEAF(n)) { iter->tnode = pn; iter->index = cindex; } else { /* push down one level */ iter->tnode = n; iter->index = 0; ++iter->depth; } return n; } /* Current node exhausted, pop back up */ pkey = pn->key; pn = node_parent_rcu(pn); cindex = get_index(pkey, pn) + 1; --iter->depth; } /* record root node so further searches know we are done */ iter->tnode = pn; iter->index = 0; return NULL; } static struct key_vector *fib_trie_get_first(struct fib_trie_iter *iter, struct trie *t) { struct key_vector *n, *pn; if (!t) return NULL; pn = t->kv; n = rcu_dereference(pn->tnode[0]); if (!n) return NULL; if (IS_TNODE(n)) { iter->tnode = n; iter->index = 0; iter->depth = 1; } else { iter->tnode = pn; iter->index = 0; iter->depth = 0; } return n; } static void trie_collect_stats(struct trie *t, struct trie_stat *s) { struct key_vector *n; struct fib_trie_iter iter; memset(s, 0, sizeof(*s)); rcu_read_lock(); for (n = fib_trie_get_first(&iter, t); n; n = fib_trie_get_next(&iter)) { if (IS_LEAF(n)) { struct fib_alias *fa; s->leaves++; s->totdepth += iter.depth; if (iter.depth > s->maxdepth) s->maxdepth = iter.depth; hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) ++s->prefixes; } else { s->tnodes++; if (n->bits < MAX_STAT_DEPTH) s->nodesizes[n->bits]++; s->nullpointers += tn_info(n)->empty_children; } } rcu_read_unlock(); } /* * This outputs /proc/net/fib_triestats */ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat) { unsigned int i, max, pointers, bytes, avdepth; if (stat->leaves) avdepth = stat->totdepth*100 / stat->leaves; else avdepth = 0; seq_printf(seq, "\tAver depth: %u.%02d\n", avdepth / 100, avdepth % 100); seq_printf(seq, "\tMax depth: %u\n", stat->maxdepth); seq_printf(seq, "\tLeaves: %u\n", stat->leaves); bytes = LEAF_SIZE * stat->leaves; seq_printf(seq, "\tPrefixes: %u\n", stat->prefixes); bytes += sizeof(struct fib_alias) * stat->prefixes; seq_printf(seq, "\tInternal nodes: %u\n\t", stat->tnodes); bytes += TNODE_SIZE(0) * stat->tnodes; max = MAX_STAT_DEPTH; while (max > 0 && stat->nodesizes[max-1] == 0) max--; pointers = 0; for (i = 1; i < max; i++) if (stat->nodesizes[i] != 0) { seq_printf(seq, " %u: %u", i, stat->nodesizes[i]); pointers += (1<<i) * stat->nodesizes[i]; } seq_putc(seq, '\n'); seq_printf(seq, "\tPointers: %u\n", pointers); bytes += sizeof(struct key_vector *) * pointers; seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers); seq_printf(seq, "Total size: %u kB\n", (bytes + 1023) / 1024); } #ifdef CONFIG_IP_FIB_TRIE_STATS static void trie_show_usage(struct seq_file *seq, const struct trie_use_stats __percpu *stats) { struct trie_use_stats s = { 0 }; int cpu; /* loop through all of the CPUs and gather up the stats */ for_each_possible_cpu(cpu) { const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu); s.gets += pcpu->gets; s.backtrack += pcpu->backtrack; s.semantic_match_passed += pcpu->semantic_match_passed; s.semantic_match_miss += pcpu->semantic_match_miss; s.null_node_hit += pcpu->null_node_hit; s.resize_node_skipped += pcpu->resize_node_skipped; } seq_printf(seq, "\nCounters:\n---------\n"); seq_printf(seq, "gets = %u\n", s.gets); seq_printf(seq, "backtracks = %u\n", s.backtrack); seq_printf(seq, "semantic match passed = %u\n", s.semantic_match_passed); seq_printf(seq, "semantic match miss = %u\n", s.semantic_match_miss); seq_printf(seq, "null node hit= %u\n", s.null_node_hit); seq_printf(seq, "skipped node resize = %u\n\n", s.resize_node_skipped); } #endif /* CONFIG_IP_FIB_TRIE_STATS */ static void fib_table_print(struct seq_file *seq, struct fib_table *tb) { if (tb->tb_id == RT_TABLE_LOCAL) seq_puts(seq, "Local:\n"); else if (tb->tb_id == RT_TABLE_MAIN) seq_puts(seq, "Main:\n"); else seq_printf(seq, "Id %d:\n", tb->tb_id); } static int fib_triestat_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; unsigned int h; seq_printf(seq, "Basic info: size of leaf:" " %zd bytes, size of tnode: %zd bytes.\n", LEAF_SIZE, TNODE_SIZE(0)); rcu_read_lock(); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist) { struct trie *t = (struct trie *) tb->tb_data; struct trie_stat stat; if (!t) continue; fib_table_print(seq, tb); trie_collect_stats(t, &stat); trie_show_stats(seq, &stat); #ifdef CONFIG_IP_FIB_TRIE_STATS trie_show_usage(seq, t->stats); #endif } cond_resched_rcu(); } rcu_read_unlock(); return 0; } static struct key_vector *fib_trie_get_idx(struct seq_file *seq, loff_t pos) { struct fib_trie_iter *iter = seq->private; struct net *net = seq_file_net(seq); loff_t idx = 0; unsigned int h; for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; hlist_for_each_entry_rcu(tb, head, tb_hlist) { struct key_vector *n; for (n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); n; n = fib_trie_get_next(iter)) if (pos == idx++) { iter->tb = tb; return n; } } } return NULL; } static void *fib_trie_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { rcu_read_lock(); return fib_trie_get_idx(seq, *pos); } static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_trie_iter *iter = seq->private; struct net *net = seq_file_net(seq); struct fib_table *tb = iter->tb; struct hlist_node *tb_node; unsigned int h; struct key_vector *n; ++*pos; /* next node in same table */ n = fib_trie_get_next(iter); if (n) return n; /* walk rest of this hash chain */ h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) { tb = hlist_entry(tb_node, struct fib_table, tb_hlist); n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); if (n) goto found; } /* new hash chain */ while (++h < FIB_TABLE_HASHSZ) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; hlist_for_each_entry_rcu(tb, head, tb_hlist) { n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); if (n) goto found; } } return NULL; found: iter->tb = tb; return n; } static void fib_trie_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static void seq_indent(struct seq_file *seq, int n) { while (n-- > 0) seq_puts(seq, " "); } static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s) { switch (s) { case RT_SCOPE_UNIVERSE: return "universe"; case RT_SCOPE_SITE: return "site"; case RT_SCOPE_LINK: return "link"; case RT_SCOPE_HOST: return "host"; case RT_SCOPE_NOWHERE: return "nowhere"; default: snprintf(buf, len, "scope=%d", s); return buf; } } static const char *const rtn_type_names[__RTN_MAX] = { [RTN_UNSPEC] = "UNSPEC", [RTN_UNICAST] = "UNICAST", [RTN_LOCAL] = "LOCAL", [RTN_BROADCAST] = "BROADCAST", [RTN_ANYCAST] = "ANYCAST", [RTN_MULTICAST] = "MULTICAST", [RTN_BLACKHOLE] = "BLACKHOLE", [RTN_UNREACHABLE] = "UNREACHABLE", [RTN_PROHIBIT] = "PROHIBIT", [RTN_THROW] = "THROW", [RTN_NAT] = "NAT", [RTN_XRESOLVE] = "XRESOLVE", }; static inline const char *rtn_type(char *buf, size_t len, unsigned int t) { if (t < __RTN_MAX && rtn_type_names[t]) return rtn_type_names[t]; snprintf(buf, len, "type %u", t); return buf; } /* Pretty print the trie */ static int fib_trie_seq_show(struct seq_file *seq, void *v) { const struct fib_trie_iter *iter = seq->private; struct key_vector *n = v; if (IS_TRIE(node_parent_rcu(n))) fib_table_print(seq, iter->tb); if (IS_TNODE(n)) { __be32 prf = htonl(n->key); seq_indent(seq, iter->depth-1); seq_printf(seq, " +-- %pI4/%zu %u %u %u\n", &prf, KEYLENGTH - n->pos - n->bits, n->bits, tn_info(n)->full_children, tn_info(n)->empty_children); } else { __be32 val = htonl(n->key); struct fib_alias *fa; seq_indent(seq, iter->depth); seq_printf(seq, " |-- %pI4\n", &val); hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { char buf1[32], buf2[32]; seq_indent(seq, iter->depth + 1); seq_printf(seq, " /%zu %s %s", KEYLENGTH - fa->fa_slen, rtn_scope(buf1, sizeof(buf1), fa->fa_info->fib_scope), rtn_type(buf2, sizeof(buf2), fa->fa_type)); if (fa->fa_dscp) seq_printf(seq, " tos=%d", inet_dscp_to_dsfield(fa->fa_dscp)); seq_putc(seq, '\n'); } } return 0; } static const struct seq_operations fib_trie_seq_ops = { .start = fib_trie_seq_start, .next = fib_trie_seq_next, .stop = fib_trie_seq_stop, .show = fib_trie_seq_show, }; struct fib_route_iter { struct seq_net_private p; struct fib_table *main_tb; struct key_vector *tnode; loff_t pos; t_key key; }; static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) { struct key_vector *l, **tp = &iter->tnode; t_key key; /* use cached location of previously found key */ if (iter->pos > 0 && pos >= iter->pos) { key = iter->key; } else { iter->pos = 1; key = 0; } pos -= iter->pos; while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { key = l->key + 1; iter->pos++; l = NULL; /* handle unlikely case of a key wrap */ if (!key) break; } if (l) iter->key = l->key; /* remember it */ else iter->pos = 0; /* forget it */ return l; } static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct fib_route_iter *iter = seq->private; struct fib_table *tb; struct trie *t; rcu_read_lock(); tb = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN); if (!tb) return NULL; iter->main_tb = tb; t = (struct trie *)tb->tb_data; iter->tnode = t->kv; if (*pos != 0) return fib_route_get_idx(iter, *pos); iter->pos = 0; iter->key = KEY_MAX; return SEQ_START_TOKEN; } static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_route_iter *iter = seq->private; struct key_vector *l = NULL; t_key key = iter->key + 1; ++*pos; /* only allow key of 0 for start of sequence */ if ((v == SEQ_START_TOKEN) || key) l = leaf_walk_rcu(&iter->tnode, key); if (l) { iter->key = l->key; iter->pos++; } else { iter->pos = 0; } return l; } static void fib_route_seq_stop(struct seq_file *seq, void *v) __releases(RCU) { rcu_read_unlock(); } static unsigned int fib_flag_trans(int type, __be32 mask, struct fib_info *fi) { unsigned int flags = 0; if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT) flags = RTF_REJECT; if (fi) { const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); if (nhc->nhc_gw.ipv4) flags |= RTF_GATEWAY; } if (mask == htonl(0xFFFFFFFF)) flags |= RTF_HOST; flags |= RTF_UP; return flags; } /* * This outputs /proc/net/route. * The format of the file is not supposed to be changed * and needs to be same as fib_hash output to avoid breaking * legacy utilities */ static int fib_route_seq_show(struct seq_file *seq, void *v) { struct fib_route_iter *iter = seq->private; struct fib_table *tb = iter->main_tb; struct fib_alias *fa; struct key_vector *l = v; __be32 prefix; if (v == SEQ_START_TOKEN) { seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway " "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU" "\tWindow\tIRTT"); return 0; } prefix = htonl(l->key); hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { struct fib_info *fi = fa->fa_info; __be32 mask = inet_make_mask(KEYLENGTH - fa->fa_slen); unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi); if ((fa->fa_type == RTN_BROADCAST) || (fa->fa_type == RTN_MULTICAST)) continue; if (fa->tb_id != tb->tb_id) continue; seq_setwidth(seq, 127); if (fi) { struct fib_nh_common *nhc = fib_info_nhc(fi, 0); __be32 gw = 0; if (nhc->nhc_gw_family == AF_INET) gw = nhc->nhc_gw.ipv4; seq_printf(seq, "%s\t%08X\t%08X\t%04X\t%d\t%u\t" "%u\t%08X\t%d\t%u\t%u", nhc->nhc_dev ? nhc->nhc_dev->name : "*", prefix, gw, flags, 0, 0, fi->fib_priority, mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0), fi->fib_window, fi->fib_rtt >> 3); } else { seq_printf(seq, "*\t%08X\t%08X\t%04X\t%d\t%u\t" "%u\t%08X\t%d\t%u\t%u", prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0); } seq_pad(seq, '\n'); } return 0; } static const struct seq_operations fib_route_seq_ops = { .start = fib_route_seq_start, .next = fib_route_seq_next, .stop = fib_route_seq_stop, .show = fib_route_seq_show, }; int __net_init fib_proc_init(struct net *net) { if (!proc_create_net("fib_trie", 0444, net->proc_net, &fib_trie_seq_ops, sizeof(struct fib_trie_iter))) goto out1; if (!proc_create_net_single("fib_triestat", 0444, net->proc_net, fib_triestat_seq_show, NULL)) goto out2; if (!proc_create_net("route", 0444, net->proc_net, &fib_route_seq_ops, sizeof(struct fib_route_iter))) goto out3; return 0; out3: remove_proc_entry("fib_triestat", net->proc_net); out2: remove_proc_entry("fib_trie", net->proc_net); out1: return -ENOMEM; } void __net_exit fib_proc_exit(struct net *net) { remove_proc_entry("fib_trie", net->proc_net); remove_proc_entry("fib_triestat", net->proc_net); remove_proc_entry("route", net->proc_net); } #endif /* CONFIG_PROC_FS */
1 27 27 142 143 143 143 165 164 62 143 6 6 6 6 7 7 6 1 4 4 1 4 4 1 4 6 1 1 5 3 2 2 2 2 1 1 1 2 4 4 12 12 12 13 13 13 7 6 6 6 76 1 52 40 40 35 4 10 2 1 4 4 4 4 4 20 11 12 10 1 1 1 1 1 8 4 8 16 15 23 23 23 17 8 7 7 7 1 1 1 9 9 4 9 9 22 22 22 22 22 12 9 3 9 12 11 21 22 14 14 14 12 4 1 1 29 15 15 12 15 15 3 15 4 4 1 22 22 22 4 18 18 29 29 29 10 3 14 13 13 13 13 40 40 39 3 13 40 2 1 1 40 3 39 16 3 40 7 38 37 38 7 4 11 11 11 11 11 11 11 11 11 3 3 11 11 11 11 11 11 11 11 3 11 11 11 11 11 4 4 4 4 4 4 4 4 4 4 4 4 4 10 11 7 7 1 11 11 11 11 11 10 7 4 4 11 38 38 38 2 11 2 11 11 36 10 37 3 11 5 5 5 5 42 42 2 40 1 12 11 1 29 11 11 3 11 9 68 68 52 53 53 19 19 4 1 15 15 11 2 11 3 11 11 11 11 3 11 11 4 7 3 42 9 9 2 2 3 2 3 4 1 2 1 9 9 8 4 2 2 5 3 4 1 1 1 1 1 10 10 1 10 9 5 7 355 355 354 357 10 10 10 3 3 1 6 6 20 20 3 1 17 17 13 4 15 9 9 9 2 1 3 1 2 4 5 7 6 12 4 1 1 4 16 9 8 2 8 55 50 6 68 68 66 2 68 2 14 55 14 66 4 68 55 14 14 68 18 50 3 52 52 4 10 10 1 3 7 9 9 2 2 2 1 1 1 1 2 2 63 63 63 63 63 51 51 51 2 49 49 6 32 32 77 77 32 32 32 32 32 32 63 63 2 16 16 1 20 3 3 18 11 11 7 7 66 36 126 18 12 12 12 22 22 22 12 11 10 12 12 12 16 15 16 16 16 16 2 23 59 50 11 11 11 17 17 16 13 1 1 8 4 15 15 15 15 16 17 16 4 17 4 16 17 85 85 85 48 41 63 63 63 63 48 48 48 48 48 36 36 16 16 33 33 5 29 38 38 5 34 355 354 353 38 38 2 37 38 2 5 7 33 10 351 31 352 15 36 36 15 48 2 2 2 3 3 1 2 2 1 2 10 10 10 3 144 2 2 2 2 2 5 5 3 4 4 2 2 2 14 15 9 2 2 2 2 79 80 78 80 80 80 38 43 45 61 38 43 45 61 26 26 1 14 12 14 14 21 21 21 1 20 20 1 19 2 2 2 2 2 7 7 7 2 5 3 3 1 2 2 2 1 1 1 22 25 25 25 25 5 23 2 22 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 // SPDX-License-Identifier: GPL-2.0 /* Multipath TCP * * Copyright (c) 2017 - 2019, Intel Corporation. */ #define pr_fmt(fmt) "MPTCP: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/sched/signal.h> #include <linux/atomic.h> #include <net/aligned_data.h> #include <net/rps.h> #include <net/sock.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/protocol.h> #include <net/tcp_states.h> #if IS_ENABLED(CONFIG_MPTCP_IPV6) #include <net/transp_v6.h> #endif #include <net/mptcp.h> #include <net/hotdata.h> #include <net/xfrm.h> #include <asm/ioctls.h> #include "protocol.h" #include "mib.h" #define CREATE_TRACE_POINTS #include <trace/events/mptcp.h> #if IS_ENABLED(CONFIG_MPTCP_IPV6) struct mptcp6_sock { struct mptcp_sock msk; struct ipv6_pinfo np; }; #endif enum { MPTCP_CMSG_TS = BIT(0), MPTCP_CMSG_INQ = BIT(1), }; static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp; static void __mptcp_destroy_sock(struct sock *sk); static void mptcp_check_send_data_fin(struct sock *sk); DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), }; static struct net_device *mptcp_napi_dev; /* Returns end sequence number of the receiver's advertised window */ static u64 mptcp_wnd_end(const struct mptcp_sock *msk) { return READ_ONCE(msk->wnd_end); } static const struct proto_ops *mptcp_fallback_tcp_ops(const struct sock *sk) { unsigned short family = READ_ONCE(sk->sk_family); #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (family == AF_INET6) return &inet6_stream_ops; #endif WARN_ON_ONCE(family != AF_INET); return &inet_stream_ops; } bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib) { struct net *net = sock_net((struct sock *)msk); if (__mptcp_check_fallback(msk)) return true; /* The caller possibly is not holding the msk socket lock, but * in the fallback case only the current subflow is touching * the OoO queue. */ if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) return false; spin_lock_bh(&msk->fallback_lock); if (!msk->allow_infinite_fallback) { spin_unlock_bh(&msk->fallback_lock); return false; } msk->allow_subflows = false; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); __MPTCP_INC_STATS(net, fb_mib); spin_unlock_bh(&msk->fallback_lock); return true; } static int __mptcp_socket_create(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; struct socket *ssock; int err; err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock); if (err) return err; msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; WRITE_ONCE(msk->first, ssock->sk); subflow = mptcp_subflow_ctx(ssock->sk); list_add(&subflow->node, &msk->conn_list); sock_hold(ssock->sk); subflow->request_mptcp = 1; subflow->subflow_id = msk->subflow_id++; /* This is the first subflow, always with id 0 */ WRITE_ONCE(subflow->local_id, 0); mptcp_sock_graft(msk->first, sk->sk_socket); iput(SOCK_INODE(ssock)); return 0; } /* If the MPC handshake is not started, returns the first subflow, * eventually allocating it. */ struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) { struct sock *sk = (struct sock *)msk; int ret; if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) return ERR_PTR(-EINVAL); if (!msk->first) { ret = __mptcp_socket_create(msk); if (ret) return ERR_PTR(ret); } return msk->first; } static void mptcp_drop(struct sock *sk, struct sk_buff *skb) { sk_drops_skbadd(sk, skb); __kfree_skb(skb); } static bool __mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta) { int limit = READ_ONCE(sk->sk_rcvbuf); if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) || MPTCP_SKB_CB(from)->offset || ((to->len + from->len) > (limit >> 3)) || !skb_try_coalesce(to, from, fragstolen, delta)) return false; pr_debug("colesced seq %llx into %llx new len %d new end seq %llx\n", MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq, to->len, MPTCP_SKB_CB(from)->end_seq); MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq; return true; } static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from) { bool fragstolen; int delta; if (!__mptcp_try_coalesce(sk, to, from, &fragstolen, &delta)) return false; /* note the fwd memory can reach a negative value after accounting * for the delta, but the later skb free will restore a non * negative one */ atomic_add(delta, &sk->sk_rmem_alloc); sk_mem_charge(sk, delta); kfree_skb_partial(from, fragstolen); return true; } static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, struct sk_buff *from) { if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq) return false; return mptcp_try_coalesce((struct sock *)msk, to, from); } /* "inspired" by tcp_rcvbuf_grow(), main difference: * - mptcp does not maintain a msk-level window clamp * - returns true when the receive buffer is actually updated */ static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval) { struct mptcp_sock *msk = mptcp_sk(sk); const struct net *net = sock_net(sk); u32 rcvwin, rcvbuf, cap, oldval; u64 grow; oldval = msk->rcvq_space.space; msk->rcvq_space.space = newval; if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) return false; /* DRS is always one RTT late. */ rcvwin = newval << 1; /* slow start: allow the sender to double its rate. */ grow = (u64)rcvwin * (newval - oldval); do_div(grow, oldval); rcvwin += grow << 1; if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; cap = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]); rcvbuf = min_t(u32, mptcp_space_from_win(sk, rcvwin), cap); if (rcvbuf > sk->sk_rcvbuf) { WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); return true; } return false; } /* "inspired" by tcp_data_queue_ofo(), main differences: * - use mptcp seqs * - don't cope with sacks */ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) { struct sock *sk = (struct sock *)msk; struct rb_node **p, *parent; u64 seq, end_seq, max_seq; struct sk_buff *skb1; seq = MPTCP_SKB_CB(skb)->map_seq; end_seq = MPTCP_SKB_CB(skb)->end_seq; max_seq = atomic64_read(&msk->rcv_wnd_sent); pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, RB_EMPTY_ROOT(&msk->out_of_order_queue)); if (after64(end_seq, max_seq)) { /* out of window */ mptcp_drop(sk, skb); pr_debug("oow by %lld, rcv_wnd_sent %llu\n", (unsigned long long)end_seq - (unsigned long)max_seq, (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); return; } p = &msk->out_of_order_queue.rb_node; MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { rb_link_node(&skb->rbnode, NULL, p); rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); msk->ooo_last_skb = skb; goto end; } /* with 2 subflows, adding at end of ooo queue is quite likely * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. */ if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); return; } /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */ if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); parent = &msk->ooo_last_skb->rbnode; p = &parent->rb_right; goto insert; } /* Find place to insert this segment. Handle overlaps on the way. */ parent = NULL; while (*p) { parent = *p; skb1 = rb_to_skb(parent); if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { p = &parent->rb_left; continue; } if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) { if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) { /* All the bits are present. Drop. */ mptcp_drop(sk, skb); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); return; } if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) { /* partial overlap: * | skb | * | skb1 | * continue traversing */ } else { /* skb's seq == skb1's seq and skb covers skb1. * Replace skb1 with skb. */ rb_replace_node(&skb1->rbnode, &skb->rbnode, &msk->out_of_order_queue); mptcp_drop(sk, skb1); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); goto merge_right; } } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); return; } p = &parent->rb_right; } insert: /* Insert segment into RB tree. */ rb_link_node(&skb->rbnode, parent, p); rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); merge_right: /* Remove other segments covered by skb. */ while ((skb1 = skb_rb_next(skb)) != NULL) { if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) break; rb_erase(&skb1->rbnode, &msk->out_of_order_queue); mptcp_drop(sk, skb1); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); } /* If there is no skb after us, we are the last_skb ! */ if (!skb1) msk->ooo_last_skb = skb; end: skb_condense(skb); skb_set_owner_r(skb, sk); /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ if (sk->sk_socket) mptcp_rcvbuf_grow(sk, msk->rcvq_space.space); } static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, int copy_len) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); bool has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; /* the skb map_seq accounts for the skb offset: * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq * value */ MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; MPTCP_SKB_CB(skb)->offset = offset; MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; MPTCP_SKB_CB(skb)->cant_coalesce = 0; __skb_unlink(skb, &ssk->sk_receive_queue); skb_ext_reset(skb); skb_dst_drop(skb); } static bool __mptcp_move_skb(struct sock *sk, struct sk_buff *skb) { u64 copy_len = MPTCP_SKB_CB(skb)->end_seq - MPTCP_SKB_CB(skb)->map_seq; struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *tail; mptcp_borrow_fwdmem(sk, skb); if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { /* in sequence */ msk->bytes_received += copy_len; WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); tail = skb_peek_tail(&sk->sk_receive_queue); if (tail && mptcp_try_coalesce(sk, tail, skb)) return true; skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); return true; } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { mptcp_data_queue_ofo(msk, skb); return false; } /* old data, keep it simple and drop the whole pkt, sender * will retransmit as needed, if needed. */ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); mptcp_drop(sk, skb); return false; } static void mptcp_stop_rtx_timer(struct sock *sk) { sk_stop_timer(sk, &sk->mptcp_retransmit_timer); mptcp_sk(sk)->timer_ival = 0; } static void mptcp_close_wake_up(struct sock *sk) { if (sock_flag(sk, SOCK_DEAD)) return; sk->sk_state_change(sk); if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); else sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); } static void mptcp_shutdown_subflows(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow; slow = lock_sock_fast(ssk); tcp_shutdown(ssk, SEND_SHUTDOWN); unlock_sock_fast(ssk, slow); } } /* called under the msk socket lock */ static bool mptcp_pending_data_fin_ack(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); return ((1 << sk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && msk->write_seq == READ_ONCE(msk->snd_una); } static void mptcp_check_data_fin_ack(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); /* Look for an acknowledged DATA_FIN */ if (mptcp_pending_data_fin_ack(sk)) { WRITE_ONCE(msk->snd_data_fin_enable, 0); switch (sk->sk_state) { case TCP_FIN_WAIT1: mptcp_set_state(sk, TCP_FIN_WAIT2); break; case TCP_CLOSING: case TCP_LAST_ACK: mptcp_shutdown_subflows(msk); mptcp_set_state(sk, TCP_CLOSE); break; } mptcp_close_wake_up(sk); } } /* can be called with no lock acquired */ static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) { struct mptcp_sock *msk = mptcp_sk(sk); if (READ_ONCE(msk->rcv_data_fin) && ((1 << inet_sk_state_load(sk)) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { if (seq) *seq = rcv_data_fin_seq; return true; } } return false; } static void mptcp_set_datafin_timeout(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); u32 retransmits; retransmits = min_t(u32, icsk->icsk_retransmits, ilog2(TCP_RTO_MAX / TCP_RTO_MIN)); mptcp_sk(sk)->timer_ival = TCP_RTO_MIN << retransmits; } static void __mptcp_set_timeout(struct sock *sk, long tout) { mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; } static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) { const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? tcp_timeout_expires(ssk) - jiffies : 0; } static void mptcp_set_timeout(struct sock *sk) { struct mptcp_subflow_context *subflow; long tout = 0; mptcp_for_each_subflow(mptcp_sk(sk), subflow) tout = max(tout, mptcp_timeout_from_subflow(subflow)); __mptcp_set_timeout(sk, tout); } static inline bool tcp_can_send_ack(const struct sock *ssk) { return !((1 << inet_sk_state_load(ssk)) & (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN)); } void __mptcp_subflow_send_ack(struct sock *ssk) { if (tcp_can_send_ack(ssk)) tcp_send_ack(ssk); } static void mptcp_subflow_send_ack(struct sock *ssk) { bool slow; slow = lock_sock_fast(ssk); __mptcp_subflow_send_ack(ssk); unlock_sock_fast(ssk, slow); } static void mptcp_send_ack(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; mptcp_for_each_subflow(msk, subflow) mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); } static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) { bool slow; slow = lock_sock_fast(ssk); if (tcp_can_send_ack(ssk)) tcp_cleanup_rbuf(ssk, copied); unlock_sock_fast(ssk, slow); } static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) { const struct inet_connection_sock *icsk = inet_csk(ssk); u8 ack_pending = READ_ONCE(icsk->icsk_ack.pending); const struct tcp_sock *tp = tcp_sk(ssk); return (ack_pending & ICSK_ACK_SCHED) && ((READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->rcv_wup) > READ_ONCE(icsk->icsk_ack.rcv_mss)) || (rx_empty && ack_pending & (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); } static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) { int old_space = READ_ONCE(msk->old_wspace); struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; int space = __mptcp_space(sk); bool cleanup, rx_empty; cleanup = (space > 0) && (space >= (old_space << 1)) && copied; rx_empty = !sk_rmem_alloc_get(sk) && copied; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) mptcp_subflow_cleanup_rbuf(ssk, copied); } } static void mptcp_check_data_fin(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); u64 rcv_data_fin_seq; /* Need to ack a DATA_FIN received from a peer while this side * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. * msk->rcv_data_fin was set when parsing the incoming options * at the subflow level and the msk lock was not held, so this * is the first opportunity to act on the DATA_FIN and change * the msk state. * * If we are caught up to the sequence number of the incoming * DATA_FIN, send the DATA_ACK now and do state transition. If * not caught up, do nothing and let the recv code send DATA_ACK * when catching up. */ if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); WRITE_ONCE(msk->rcv_data_fin, 0); WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ switch (sk->sk_state) { case TCP_ESTABLISHED: mptcp_set_state(sk, TCP_CLOSE_WAIT); break; case TCP_FIN_WAIT1: mptcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: mptcp_shutdown_subflows(msk); mptcp_set_state(sk, TCP_CLOSE); break; default: /* Other states not expected */ WARN_ON_ONCE(1); break; } if (!__mptcp_check_fallback(msk)) mptcp_send_ack(msk); mptcp_close_wake_up(sk); } } static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) { if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET); mptcp_subflow_reset(ssk); } } static void __mptcp_add_backlog(struct sock *sk, struct mptcp_subflow_context *subflow, struct sk_buff *skb) { struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *tail = NULL; struct sock *ssk = skb->sk; bool fragstolen; int delta; if (unlikely(sk->sk_state == TCP_CLOSE)) { kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); return; } /* Try to coalesce with the last skb in our backlog */ if (!list_empty(&msk->backlog_list)) tail = list_last_entry(&msk->backlog_list, struct sk_buff, list); if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq && ssk == tail->sk && __mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) { skb->truesize -= delta; kfree_skb_partial(skb, fragstolen); __mptcp_subflow_lend_fwdmem(subflow, delta); goto account; } list_add_tail(&skb->list, &msk->backlog_list); mptcp_subflow_lend_fwdmem(subflow, skb); delta = skb->truesize; account: WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta); /* Possibly not accept()ed yet, keep track of memory not CG * accounted, mptcp_graft_subflows() will handle it. */ if (!mem_cgroup_from_sk(ssk)) msk->backlog_unaccounted += delta; } static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct sock *ssk, bool own_msk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = (struct sock *)msk; bool more_data_avail; struct tcp_sock *tp; bool ret = false; pr_debug("msk=%p ssk=%p\n", msk, ssk); tp = tcp_sk(ssk); do { u32 map_remaining, offset; u32 seq = tp->copied_seq; struct sk_buff *skb; bool fin; /* try to move as much data as available */ map_remaining = subflow->map_data_len - mptcp_subflow_get_map_offset(subflow); skb = skb_peek(&ssk->sk_receive_queue); if (unlikely(!skb)) break; if (__mptcp_check_fallback(msk)) { /* Under fallback skbs have no MPTCP extension and TCP could * collapse them between the dummy map creation and the * current dequeue. Be sure to adjust the map size. */ map_remaining = skb->len; subflow->map_data_len = skb->len; } offset = seq - TCP_SKB_CB(skb)->seq; fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (fin) seq++; if (offset < skb->len) { size_t len = skb->len - offset; mptcp_init_skb(ssk, skb, offset, len); if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) { mptcp_subflow_lend_fwdmem(subflow, skb); ret |= __mptcp_move_skb(sk, skb); } else { __mptcp_add_backlog(sk, subflow, skb); } seq += len; if (unlikely(map_remaining < len)) { DEBUG_NET_WARN_ON_ONCE(1); mptcp_dss_corruption(msk, ssk); } } else { if (unlikely(!fin)) { DEBUG_NET_WARN_ON_ONCE(1); mptcp_dss_corruption(msk, ssk); } sk_eat_skb(ssk, skb); } WRITE_ONCE(tp->copied_seq, seq); more_data_avail = mptcp_subflow_data_available(ssk); } while (more_data_avail); if (ret) msk->last_data_recv = tcp_jiffies32; return ret; } static bool __mptcp_ofo_queue(struct mptcp_sock *msk) { struct sock *sk = (struct sock *)msk; struct sk_buff *skb, *tail; bool moved = false; struct rb_node *p; u64 end_seq; p = rb_first(&msk->out_of_order_queue); pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); while (p) { skb = rb_to_skb(p); if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) break; p = rb_next(p); rb_erase(&skb->rbnode, &msk->out_of_order_queue); if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq, msk->ack_seq))) { mptcp_drop(sk, skb); MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); continue; } end_seq = MPTCP_SKB_CB(skb)->end_seq; tail = skb_peek_tail(&sk->sk_receive_queue); if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; /* skip overlapping data, if any */ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d\n", MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, delta); MPTCP_SKB_CB(skb)->offset += delta; MPTCP_SKB_CB(skb)->map_seq += delta; __skb_queue_tail(&sk->sk_receive_queue, skb); } msk->bytes_received += end_seq - msk->ack_seq; WRITE_ONCE(msk->ack_seq, end_seq); moved = true; } return moved; } static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) { int err = sock_error(ssk); int ssk_state; if (!err) return false; /* only propagate errors on fallen-back sockets or * on MPC connect */ if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) return false; /* We need to propagate only transition to CLOSE state. * Orphaned socket will see such state change via * subflow_sched_work_if_closed() and that path will properly * destroy the msk as needed. */ ssk_state = inet_sk_state_load(ssk); if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD)) mptcp_set_state(sk, ssk_state); WRITE_ONCE(sk->sk_err, -err); /* This barrier is coupled with smp_rmb() in mptcp_poll() */ smp_wmb(); sk_error_report(sk); return true; } void __mptcp_error_report(struct sock *sk) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); mptcp_for_each_subflow(msk, subflow) if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) break; } /* In most cases we will be able to lock the mptcp socket. If its already * owned, we need to defer to the work queue to avoid ABBA deadlock. */ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; bool moved; moved = __mptcp_move_skbs_from_subflow(msk, ssk, true); __mptcp_ofo_queue(msk); if (unlikely(ssk->sk_err)) __mptcp_subflow_error_report(sk, ssk); /* If the moves have caught up with the DATA_FIN sequence number * it's time to ack the DATA_FIN and change socket state, but * this is not a good place to change state. Let the workqueue * do it. */ if (mptcp_pending_data_fin(sk, NULL)) mptcp_schedule_work(sk); return moved; } void mptcp_data_ready(struct sock *sk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_sock *msk = mptcp_sk(sk); /* The peer can send data while we are shutting down this * subflow at subflow destruction time, but we must avoid enqueuing * more data to the msk receive queue */ if (unlikely(subflow->closing)) return; mptcp_data_lock(sk); if (!sock_owned_by_user(sk)) { /* Wake-up the reader only for in-sequence data */ if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) sk->sk_data_ready(sk); } else { __mptcp_move_skbs_from_subflow(msk, ssk, false); } mptcp_data_unlock(sk); } static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) { mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); msk->allow_infinite_fallback = false; mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); } static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; if (sk->sk_state != TCP_ESTABLISHED) return false; spin_lock_bh(&msk->fallback_lock); if (!msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); return false; } mptcp_subflow_joined(msk, ssk); spin_unlock_bh(&msk->fallback_lock); mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; mptcp_sockopt_sync_locked(msk, ssk); mptcp_stop_tout_timer(sk); __mptcp_propagate_sndbuf(sk, ssk); return true; } static void __mptcp_flush_join_list(struct sock *sk, struct list_head *join_list) { struct mptcp_subflow_context *tmp, *subflow; struct mptcp_sock *msk = mptcp_sk(sk); list_for_each_entry_safe(subflow, tmp, join_list, node) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast(ssk); list_move_tail(&subflow->node, &msk->conn_list); if (!__mptcp_finish_join(msk, ssk)) mptcp_subflow_reset(ssk); unlock_sock_fast(ssk, slow); } } static bool mptcp_rtx_timer_pending(struct sock *sk) { return timer_pending(&sk->mptcp_retransmit_timer); } static void mptcp_reset_rtx_timer(struct sock *sk) { unsigned long tout; /* prevent rescheduling on close */ if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) return; tout = mptcp_sk(sk)->timer_ival; sk_reset_timer(sk, &sk->mptcp_retransmit_timer, jiffies + tout); } bool mptcp_schedule_work(struct sock *sk) { if (inet_sk_state_load(sk) == TCP_CLOSE) return false; /* Get a reference on this socket, mptcp_worker() will release it. * As mptcp_worker() might complete before us, we can not avoid * a sock_hold()/sock_put() if schedule_work() returns false. */ sock_hold(sk); if (schedule_work(&mptcp_sk(sk)->work)) return true; sock_put(sk); return false; } static bool mptcp_skb_can_collapse_to(u64 write_seq, const struct sk_buff *skb, const struct mptcp_ext *mpext) { if (!tcp_skb_can_collapse_to(skb)) return false; /* can collapse only if MPTCP level sequence is in order and this * mapping has not been xmitted yet */ return mpext && mpext->data_seq + mpext->data_len == write_seq && !mpext->frozen; } /* we can append data to the given data frag if: * - there is space available in the backing page_frag * - the data frag tail matches the current page_frag free offset * - the data frag end sequence number matches the current write seq */ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, const struct page_frag *pfrag, const struct mptcp_data_frag *df) { return df && pfrag->page == df->page && pfrag->size - pfrag->offset > 0 && pfrag->offset == (df->offset + df->data_len) && df->data_seq + df->data_len == msk->write_seq; } static void dfrag_uncharge(struct sock *sk, int len) { sk_mem_uncharge(sk, len); sk_wmem_queued_add(sk, -len); } static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) { int len = dfrag->data_len + dfrag->overhead; list_del(&dfrag->list); dfrag_uncharge(sk, len); put_page(dfrag->page); } /* called under both the msk socket lock and the data lock */ static void __mptcp_clean_una(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_data_frag *dtmp, *dfrag; u64 snd_una; snd_una = msk->snd_una; list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) break; if (unlikely(dfrag == msk->first_pending)) { /* in recovery mode can see ack after the current snd head */ if (WARN_ON_ONCE(!msk->recovery)) break; msk->first_pending = mptcp_send_next(sk); } dfrag_clear(sk, dfrag); } dfrag = mptcp_rtx_head(sk); if (dfrag && after64(snd_una, dfrag->data_seq)) { u64 delta = snd_una - dfrag->data_seq; /* prevent wrap around in recovery mode */ if (unlikely(delta > dfrag->already_sent)) { if (WARN_ON_ONCE(!msk->recovery)) goto out; if (WARN_ON_ONCE(delta > dfrag->data_len)) goto out; dfrag->already_sent += delta - dfrag->already_sent; } dfrag->data_seq += delta; dfrag->offset += delta; dfrag->data_len -= delta; dfrag->already_sent -= delta; dfrag_uncharge(sk, delta); } /* all retransmitted data acked, recovery completed */ if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) msk->recovery = false; out: if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) mptcp_stop_rtx_timer(sk); } else { mptcp_reset_rtx_timer(sk); } if (mptcp_pending_data_fin_ack(sk)) mptcp_schedule_work(sk); } static void __mptcp_clean_una_wakeup(struct sock *sk) { lockdep_assert_held_once(&sk->sk_lock.slock); __mptcp_clean_una(sk); mptcp_write_space(sk); } static void mptcp_clean_una_wakeup(struct sock *sk) { mptcp_data_lock(sk); __mptcp_clean_una_wakeup(sk); mptcp_data_unlock(sk); } static void mptcp_enter_memory_pressure(struct sock *sk) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); bool first = true; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (first && !ssk->sk_bypass_prot_mem) { tcp_enter_memory_pressure(ssk); first = false; } sk_stream_moderate_sndbuf(ssk); } __mptcp_sync_sndbuf(sk); } /* ensure we get enough memory for the frag hdr, beyond some minimal amount of * data */ static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) { if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), pfrag, sk->sk_allocation))) return true; mptcp_enter_memory_pressure(sk); return false; } static struct mptcp_data_frag * mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, int orig_offset) { int offset = ALIGN(orig_offset, sizeof(long)); struct mptcp_data_frag *dfrag; dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); dfrag->data_len = 0; dfrag->data_seq = msk->write_seq; dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); dfrag->offset = offset + sizeof(struct mptcp_data_frag); dfrag->already_sent = 0; dfrag->page = pfrag->page; return dfrag; } struct mptcp_sendmsg_info { int mss_now; int size_goal; u16 limit; u16 sent; unsigned int flags; bool data_lock_held; }; static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, u64 data_seq, int avail_size) { u64 window_end = mptcp_wnd_end(msk); u64 mptcp_snd_wnd; if (__mptcp_check_fallback(msk)) return avail_size; mptcp_snd_wnd = window_end - data_seq; avail_size = min_t(unsigned int, mptcp_snd_wnd, avail_size); if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); } return avail_size; } static bool __mptcp_add_ext(struct sk_buff *skb, gfp_t gfp) { struct skb_ext *mpext = __skb_ext_alloc(gfp); if (!mpext) return false; __skb_ext_set(skb, SKB_EXT_MPTCP, mpext); return true; } static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) { struct sk_buff *skb; skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); if (likely(skb)) { if (likely(__mptcp_add_ext(skb, gfp))) { skb_reserve(skb, MAX_TCP_HEADER); skb->ip_summed = CHECKSUM_PARTIAL; INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); return skb; } __kfree_skb(skb); } else { mptcp_enter_memory_pressure(sk); } return NULL; } static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) { struct sk_buff *skb; skb = __mptcp_do_alloc_tx_skb(sk, gfp); if (!skb) return NULL; if (likely(sk_wmem_schedule(ssk, skb->truesize))) { tcp_skb_entail(ssk, skb); return skb; } tcp_skb_tsorted_anchor_cleanup(skb); kfree_skb(skb); return NULL; } static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) { gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; return __mptcp_alloc_tx_skb(sk, ssk, gfp); } /* note: this always recompute the csum on the whole skb, even * if we just appended a single frag. More status info needed */ static void mptcp_update_data_checksum(struct sk_buff *skb, int added) { struct mptcp_ext *mpext = mptcp_get_ext(skb); __wsum csum = ~csum_unfold(mpext->csum); int offset = skb->len - added; mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); } static void mptcp_update_infinite_map(struct mptcp_sock *msk, struct sock *ssk, struct mptcp_ext *mpext) { if (!mpext) return; mpext->infinite_map = 1; mpext->data_len = 0; if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED); mptcp_subflow_reset(ssk); return; } mptcp_subflow_ctx(ssk)->send_infinite_map = 0; } #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1)) static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, struct mptcp_data_frag *dfrag, struct mptcp_sendmsg_info *info) { u64 data_seq = dfrag->data_seq + info->sent; int offset = dfrag->offset + info->sent; struct mptcp_sock *msk = mptcp_sk(sk); bool zero_window_probe = false; struct mptcp_ext *mpext = NULL; bool can_coalesce = false; bool reuse_skb = true; struct sk_buff *skb; size_t copy; int i; pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u\n", msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); if (WARN_ON_ONCE(info->sent > info->limit || info->limit > dfrag->data_len)) return 0; if (unlikely(!__tcp_can_send(ssk))) return -EAGAIN; /* compute send limit */ if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE)) ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE; info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); copy = info->size_goal; skb = tcp_write_queue_tail(ssk); if (skb && copy > skb->len) { /* Limit the write to the size available in the * current skb, if any, so that we create at most a new skb. * Explicitly tells TCP internals to avoid collapsing on later * queue management operation, to avoid breaking the ext <-> * SSN association set here */ mpext = mptcp_get_ext(skb); if (!mptcp_skb_can_collapse_to(data_seq, skb, mpext)) { TCP_SKB_CB(skb)->eor = 1; tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } copy -= skb->len; } else { alloc_skb: skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); if (!skb) return -ENOMEM; i = skb_shinfo(skb)->nr_frags; reuse_skb = false; mpext = mptcp_get_ext(skb); } /* Zero window and all data acked? Probe. */ copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); if (copy == 0) { u64 snd_una = READ_ONCE(msk->snd_una); /* No need for zero probe if there are any data pending * either at the msk or ssk level; skb is the current write * queue tail and can be empty at this point. */ if (snd_una != msk->snd_nxt || skb->len || skb != tcp_send_head(ssk)) { tcp_remove_empty_skb(ssk); return 0; } zero_window_probe = true; data_seq = snd_una - 1; copy = 1; } copy = min_t(size_t, copy, info->limit - info->sent); if (!sk_wmem_schedule(ssk, copy)) { tcp_remove_empty_skb(ssk); return -ENOMEM; } if (can_coalesce) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { get_page(dfrag->page); skb_fill_page_desc(skb, i, dfrag->page, offset, copy); } skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk_wmem_queued_add(ssk, copy); sk_mem_charge(ssk, copy); WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); TCP_SKB_CB(skb)->end_seq += copy; tcp_skb_pcount_set(skb, 0); /* on skb reuse we just need to update the DSS len */ if (reuse_skb) { TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; mpext->data_len += copy; goto out; } memset(mpext, 0, sizeof(*mpext)); mpext->data_seq = data_seq; mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; mpext->data_len = copy; mpext->use_map = 1; mpext->dsn64 = 1; pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d\n", mpext->data_seq, mpext->subflow_seq, mpext->data_len, mpext->dsn64); if (zero_window_probe) { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE); mptcp_subflow_ctx(ssk)->rel_write_seq += copy; mpext->frozen = 1; if (READ_ONCE(msk->csum_enabled)) mptcp_update_data_checksum(skb, copy); tcp_push_pending_frames(ssk); return 0; } out: if (READ_ONCE(msk->csum_enabled)) mptcp_update_data_checksum(skb, copy); if (mptcp_subflow_ctx(ssk)->send_infinite_map) mptcp_update_infinite_map(msk, ssk, mpext); trace_mptcp_sendmsg_frag(mpext); mptcp_subflow_ctx(ssk)->rel_write_seq += copy; return copy; } #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \ sizeof(struct tcphdr) - \ MAX_TCP_OPTION_SPACE - \ sizeof(struct ipv6hdr) - \ sizeof(struct frag_hdr)) struct subflow_send_info { struct sock *ssk; u64 linger_time; }; void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) { if (!subflow->stale) return; subflow->stale = 0; MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); } bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) { if (unlikely(subflow->stale)) { u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); if (subflow->stale_rcv_tstamp == rcv_tstamp) return false; mptcp_subflow_set_active(subflow); } return __mptcp_subflow_active(subflow); } #define SSK_MODE_ACTIVE 0 #define SSK_MODE_BACKUP 1 #define SSK_MODE_MAX 2 /* implement the mptcp packet scheduler; * returns the subflow that will transmit the next DSS * additionally updates the rtx timeout */ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) { struct subflow_send_info send_info[SSK_MODE_MAX]; struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; u32 pace, burst, wmem; int i, nr_active = 0; struct sock *ssk; u64 linger_time; long tout = 0; /* pick the subflow with the lower wmem/wspace ratio */ for (i = 0; i < SSK_MODE_MAX; ++i) { send_info[i].ssk = NULL; send_info[i].linger_time = -1; } mptcp_for_each_subflow(msk, subflow) { bool backup = subflow->backup || subflow->request_bkup; trace_mptcp_subflow_get_send(subflow); ssk = mptcp_subflow_tcp_sock(subflow); if (!mptcp_subflow_active(subflow)) continue; tout = max(tout, mptcp_timeout_from_subflow(subflow)); nr_active += !backup; pace = subflow->avg_pacing_rate; if (unlikely(!pace)) { /* init pacing rate from socket */ subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); pace = subflow->avg_pacing_rate; if (!pace) continue; } linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); if (linger_time < send_info[backup].linger_time) { send_info[backup].ssk = ssk; send_info[backup].linger_time = linger_time; } } __mptcp_set_timeout(sk, tout); /* pick the best backup if no other subflow is active */ if (!nr_active) send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; /* According to the blest algorithm, to avoid HoL blocking for the * faster flow, we need to: * - estimate the faster flow linger time * - use the above to estimate the amount of byte transferred * by the faster flow * - check that the amount of queued data is greater than the above, * otherwise do not use the picked, slower, subflow * We select the subflow with the shorter estimated time to flush * the queued mem, which basically ensure the above. We just need * to check that subflow has a non empty cwin. */ ssk = send_info[SSK_MODE_ACTIVE].ssk; if (!ssk || !sk_stream_memory_free(ssk)) return NULL; burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); wmem = READ_ONCE(ssk->sk_wmem_queued); if (!burst) return ssk; subflow = mptcp_subflow_ctx(ssk); subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + READ_ONCE(ssk->sk_pacing_rate) * burst, burst + wmem); msk->snd_burst = burst; return ssk; } static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) { tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); release_sock(ssk); } static void mptcp_update_post_push(struct mptcp_sock *msk, struct mptcp_data_frag *dfrag, u32 sent) { u64 snd_nxt_new = dfrag->data_seq; dfrag->already_sent += sent; msk->snd_burst -= sent; snd_nxt_new += dfrag->already_sent; /* snd_nxt_new can be smaller than snd_nxt in case mptcp * is recovering after a failover. In that event, this re-sends * old segments. * * Thus compute snd_nxt_new candidate based on * the dfrag->data_seq that was sent and the data * that has been handed to the subflow for transmission * and skip update in case it was old dfrag. */ if (likely(after64(snd_nxt_new, msk->snd_nxt))) { msk->bytes_sent += snd_nxt_new - msk->snd_nxt; WRITE_ONCE(msk->snd_nxt, snd_nxt_new); } } void mptcp_check_and_set_pending(struct sock *sk) { if (mptcp_send_head(sk)) { mptcp_data_lock(sk); mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); mptcp_data_unlock(sk); } } static int __subflow_push_pending(struct sock *sk, struct sock *ssk, struct mptcp_sendmsg_info *info) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_data_frag *dfrag; int len, copied = 0, err = 0; while ((dfrag = mptcp_send_head(sk))) { info->sent = dfrag->already_sent; info->limit = dfrag->data_len; len = dfrag->data_len - dfrag->already_sent; while (len > 0) { int ret = 0; ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); if (ret <= 0) { err = copied ? : ret; goto out; } info->sent += ret; copied += ret; len -= ret; mptcp_update_post_push(msk, dfrag, ret); } msk->first_pending = mptcp_send_next(sk); if (msk->snd_burst <= 0 || !sk_stream_memory_free(ssk) || !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) { err = copied; goto out; } mptcp_set_timeout(sk); } err = copied; out: if (err > 0) msk->last_data_sent = tcp_jiffies32; return err; } void __mptcp_push_pending(struct sock *sk, unsigned int flags) { struct sock *prev_ssk = NULL, *ssk = NULL; struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_sendmsg_info info = { .flags = flags, }; bool copied = false; int push_count = 1; while (mptcp_send_head(sk) && (push_count > 0)) { struct mptcp_subflow_context *subflow; int ret = 0; if (mptcp_sched_get_send(msk)) break; push_count = 0; mptcp_for_each_subflow(msk, subflow) { if (READ_ONCE(subflow->scheduled)) { mptcp_subflow_set_scheduled(subflow, false); prev_ssk = ssk; ssk = mptcp_subflow_tcp_sock(subflow); if (ssk != prev_ssk) { /* First check. If the ssk has changed since * the last round, release prev_ssk */ if (prev_ssk) mptcp_push_release(prev_ssk, &info); /* Need to lock the new subflow only if different * from the previous one, otherwise we are still * helding the relevant lock */ lock_sock(ssk); } push_count++; ret = __subflow_push_pending(sk, ssk, &info); if (ret <= 0) { if (ret != -EAGAIN || (1 << ssk->sk_state) & (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSE)) push_count--; continue; } copied = true; } } } /* at this point we held the socket lock for the last subflow we used */ if (ssk) mptcp_push_release(ssk, &info); /* Avoid scheduling the rtx timer if no data has been pushed; the timer * will be updated on positive acks by __mptcp_cleanup_una(). */ if (copied) { if (!mptcp_rtx_timer_pending(sk)) mptcp_reset_rtx_timer(sk); mptcp_check_send_data_fin(sk); } } static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_sendmsg_info info = { .data_lock_held = true, }; bool keep_pushing = true; struct sock *xmit_ssk; int copied = 0; info.flags = 0; while (mptcp_send_head(sk) && keep_pushing) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); int ret = 0; /* check for a different subflow usage only after * spooling the first chunk of data */ if (first) { mptcp_subflow_set_scheduled(subflow, false); ret = __subflow_push_pending(sk, ssk, &info); first = false; if (ret <= 0) break; copied += ret; continue; } if (mptcp_sched_get_send(msk)) goto out; if (READ_ONCE(subflow->scheduled)) { mptcp_subflow_set_scheduled(subflow, false); ret = __subflow_push_pending(sk, ssk, &info); if (ret <= 0) keep_pushing = false; copied += ret; } mptcp_for_each_subflow(msk, subflow) { if (READ_ONCE(subflow->scheduled)) { xmit_ssk = mptcp_subflow_tcp_sock(subflow); if (xmit_ssk != ssk) { mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SEND); keep_pushing = false; } } } } out: /* __mptcp_alloc_tx_skb could have released some wmem and we are * not going to flush it via release_sock() */ if (copied) { tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); if (!mptcp_rtx_timer_pending(sk)) mptcp_reset_rtx_timer(sk); if (msk->snd_data_fin_enable && msk->snd_nxt + 1 == msk->write_seq) mptcp_schedule_work(sk); } } static int mptcp_disconnect(struct sock *sk, int flags); static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, size_t len, int *copied_syn) { unsigned int saved_flags = msg->msg_flags; struct mptcp_sock *msk = mptcp_sk(sk); struct sock *ssk; int ret; /* on flags based fastopen the mptcp is supposed to create the * first subflow right now. Otherwise we are in the defer_connect * path, and the first subflow must be already present. * Since the defer_connect flag is cleared after the first succsful * fastopen attempt, no need to check for additional subflow status. */ if (msg->msg_flags & MSG_FASTOPEN) { ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) return PTR_ERR(ssk); } if (!msk->first) return -EINVAL; ssk = msk->first; lock_sock(ssk); msg->msg_flags |= MSG_DONTWAIT; msk->fastopening = 1; ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); msk->fastopening = 0; msg->msg_flags = saved_flags; release_sock(ssk); /* do the blocking bits of inet_stream_connect outside the ssk socket lock */ if (ret == -EINPROGRESS && !(msg->msg_flags & MSG_DONTWAIT)) { ret = __inet_stream_connect(sk->sk_socket, msg->msg_name, msg->msg_namelen, msg->msg_flags, 1); /* Keep the same behaviour of plain TCP: zero the copied bytes in * case of any error, except timeout or signal */ if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR) *copied_syn = 0; } else if (ret && ret != -EINPROGRESS) { /* The disconnect() op called by tcp_sendmsg_fastopen()/ * __inet_stream_connect() can fail, due to looking check, * see mptcp_disconnect(). * Attempt it again outside the problematic scope. */ if (!mptcp_disconnect(sk, 0)) { sk->sk_disconnects++; sk->sk_socket->state = SS_UNCONNECTED; } } inet_clear_bit(DEFER_CONNECT, sk); return ret; } static int do_copy_data_nocache(struct sock *sk, int copy, struct iov_iter *from, char *to) { if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) { if (!copy_from_iter_full_nocache(to, copy, from)) return -EFAULT; } else if (!copy_from_iter_full(to, copy, from)) { return -EFAULT; } return 0; } /* open-code sk_stream_memory_free() plus sent limit computation to * avoid indirect calls in fast-path. * Called under the msk socket lock, so we can avoid a bunch of ONCE * annotations. */ static u32 mptcp_send_limit(const struct sock *sk) { const struct mptcp_sock *msk = mptcp_sk(sk); u32 limit, not_sent; if (sk->sk_wmem_queued >= READ_ONCE(sk->sk_sndbuf)) return 0; limit = mptcp_notsent_lowat(sk); if (limit == UINT_MAX) return UINT_MAX; not_sent = msk->write_seq - msk->snd_nxt; if (not_sent >= limit) return 0; return limit - not_sent; } static void mptcp_rps_record_subflows(const struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; if (!rfs_is_needed()) return; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); sock_rps_record_flow(ssk); } } static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct mptcp_sock *msk = mptcp_sk(sk); struct page_frag *pfrag; size_t copied = 0; int ret = 0; long timeo; /* silently ignore everything else */ msg->msg_flags &= MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | MSG_FASTOPEN; lock_sock(sk); mptcp_rps_record_subflows(msk); if (unlikely(inet_test_bit(DEFER_CONNECT, sk) || msg->msg_flags & MSG_FASTOPEN)) { int copied_syn = 0; ret = mptcp_sendmsg_fastopen(sk, msg, len, &copied_syn); copied += copied_syn; if (ret == -EINPROGRESS && copied_syn > 0) goto out; else if (ret) goto do_error; } timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { ret = sk_stream_wait_connect(sk, &timeo); if (ret) goto do_error; } ret = -EPIPE; if (unlikely(sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))) goto do_error; pfrag = sk_page_frag(sk); while (msg_data_left(msg)) { int total_ts, frag_truesize = 0; struct mptcp_data_frag *dfrag; bool dfrag_collapsed; size_t psize, offset; u32 copy_limit; /* ensure fitting the notsent_lowat() constraint */ copy_limit = mptcp_send_limit(sk); if (!copy_limit) goto wait_for_memory; /* reuse tail pfrag, if possible, or carve a new one from the * page allocator */ dfrag = mptcp_pending_tail(sk); dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); if (!dfrag_collapsed) { if (!mptcp_page_frag_refill(sk, pfrag)) goto wait_for_memory; dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); frag_truesize = dfrag->overhead; } /* we do not bound vs wspace, to allow a single packet. * memory accounting will prevent execessive memory usage * anyway */ offset = dfrag->offset + dfrag->data_len; psize = pfrag->size - offset; psize = min_t(size_t, psize, msg_data_left(msg)); psize = min_t(size_t, psize, copy_limit); total_ts = psize + frag_truesize; if (!sk_wmem_schedule(sk, total_ts)) goto wait_for_memory; ret = do_copy_data_nocache(sk, psize, &msg->msg_iter, page_address(dfrag->page) + offset); if (ret) goto do_error; /* data successfully copied into the write queue */ sk_forward_alloc_add(sk, -total_ts); copied += psize; dfrag->data_len += psize; frag_truesize += psize; pfrag->offset += frag_truesize; WRITE_ONCE(msk->write_seq, msk->write_seq + psize); /* charge data on mptcp pending queue to the msk socket * Note: we charge such data both to sk and ssk */ sk_wmem_queued_add(sk, frag_truesize); if (!dfrag_collapsed) { get_page(dfrag->page); list_add_tail(&dfrag->list, &msk->rtx_queue); if (!msk->first_pending) msk->first_pending = dfrag; } pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, dfrag->data_seq, dfrag->data_len, dfrag->already_sent, !dfrag_collapsed); continue; wait_for_memory: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); __mptcp_push_pending(sk, msg->msg_flags); ret = sk_stream_wait_memory(sk, &timeo); if (ret) goto do_error; } if (copied) __mptcp_push_pending(sk, msg->msg_flags); out: release_sock(sk); return copied; do_error: if (copied) goto out; copied = sk_stream_error(sk, msg->msg_flags, ret); goto out; } static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, size_t len, int flags, int copied_total, struct scm_timestamping_internal *tss, int *cmsg_flags) { struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb, *tmp; int total_data_len = 0; int copied = 0; skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { u32 delta, offset = MPTCP_SKB_CB(skb)->offset; u32 data_len = skb->len - offset; u32 count; int err; if (flags & MSG_PEEK) { /* skip already peeked skbs */ if (total_data_len + data_len <= copied_total) { total_data_len += data_len; continue; } /* skip the already peeked data in the current skb */ delta = copied_total - total_data_len; offset += delta; data_len -= delta; } count = min_t(size_t, len - copied, data_len); if (!(flags & MSG_TRUNC)) { err = skb_copy_datagram_msg(skb, offset, msg, count); if (unlikely(err < 0)) { if (!copied) return err; break; } } if (MPTCP_SKB_CB(skb)->has_rxtstamp) { tcp_update_recv_tstamps(skb, tss); *cmsg_flags |= MPTCP_CMSG_TS; } copied += count; if (!(flags & MSG_PEEK)) { msk->bytes_consumed += count; if (count < data_len) { MPTCP_SKB_CB(skb)->offset += count; MPTCP_SKB_CB(skb)->map_seq += count; break; } /* avoid the indirect call, we know the destructor is sock_rfree */ skb->destructor = NULL; skb->sk = NULL; atomic_sub(skb->truesize, &sk->sk_rmem_alloc); sk_mem_uncharge(sk, skb->truesize); __skb_unlink(skb, &sk->sk_receive_queue); skb_attempt_defer_free(skb); } if (copied >= len) break; } mptcp_rcv_space_adjust(msk, copied); return copied; } /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. * * Only difference: Use highest rtt estimate of the subflows in use. */ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; u8 scaling_ratio = U8_MAX; u32 time, advmss = 1; u64 rtt_us, mstamp; msk_owned_by_me(msk); if (copied <= 0) return; if (!msk->rcvspace_init) mptcp_rcv_space_init(msk, msk->first); msk->rcvq_space.copied += copied; mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); rtt_us = msk->rcvq_space.rtt_us; if (rtt_us && time < (rtt_us >> 3)) return; rtt_us = 0; mptcp_for_each_subflow(msk, subflow) { const struct tcp_sock *tp; u64 sf_rtt_us; u32 sf_advmss; tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); sf_advmss = READ_ONCE(tp->advmss); rtt_us = max(sf_rtt_us, rtt_us); advmss = max(sf_advmss, advmss); scaling_ratio = min(tp->scaling_ratio, scaling_ratio); } msk->rcvq_space.rtt_us = rtt_us; msk->scaling_ratio = scaling_ratio; if (time < (rtt_us >> 3) || rtt_us == 0) return; if (msk->rcvq_space.copied <= msk->rcvq_space.space) goto new_measure; if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) { /* Make subflows follow along. If we do not do this, we * get drops at subflow level if skbs can't be moved to * the mptcp rx queue fast enough (announced rcv_win can * exceed ssk->sk_rcvbuf). */ mptcp_for_each_subflow(msk, subflow) { struct sock *ssk; bool slow; ssk = mptcp_subflow_tcp_sock(subflow); slow = lock_sock_fast(ssk); /* subflows can be added before tcp_init_transfer() */ if (tcp_sk(ssk)->rcvq_space.space) tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied); unlock_sock_fast(ssk, slow); } } new_measure: msk->rcvq_space.copied = 0; msk->rcvq_space.time = mstamp; } static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta) { struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list); struct mptcp_sock *msk = mptcp_sk(sk); bool moved = false; *delta = 0; while (1) { /* If the msk recvbuf is full stop, don't drop */ if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) break; prefetch(skb->next); list_del(&skb->list); *delta += skb->truesize; moved |= __mptcp_move_skb(sk, skb); if (list_empty(skbs)) break; skb = list_first_entry(skbs, struct sk_buff, list); } __mptcp_ofo_queue(msk); if (moved) mptcp_check_data_fin((struct sock *)msk); return moved; } static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs) { struct mptcp_sock *msk = mptcp_sk(sk); /* After CG initialization, subflows should never add skb before * gaining the CG themself. */ DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket && mem_cgroup_from_sk(sk)); /* Don't spool the backlog if the rcvbuf is full. */ if (list_empty(&msk->backlog_list) || sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) return false; INIT_LIST_HEAD(skbs); list_splice_init(&msk->backlog_list, skbs); return true; } static void mptcp_backlog_spooled(struct sock *sk, u32 moved, struct list_head *skbs) { struct mptcp_sock *msk = mptcp_sk(sk); WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved); list_splice(skbs, &msk->backlog_list); } static bool mptcp_move_skbs(struct sock *sk) { struct list_head skbs; bool enqueued = false; u32 moved; mptcp_data_lock(sk); while (mptcp_can_spool_backlog(sk, &skbs)) { mptcp_data_unlock(sk); enqueued |= __mptcp_move_skbs(sk, &skbs, &moved); mptcp_data_lock(sk); mptcp_backlog_spooled(sk, moved, &skbs); } mptcp_data_unlock(sk); return enqueued; } static unsigned int mptcp_inq_hint(const struct sock *sk) { const struct mptcp_sock *msk = mptcp_sk(sk); const struct sk_buff *skb; skb = skb_peek(&sk->sk_receive_queue); if (skb) { u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; if (hint_val >= INT_MAX) return INT_MAX; return (unsigned int)hint_val; } if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) return 1; return 0; } static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct mptcp_sock *msk = mptcp_sk(sk); struct scm_timestamping_internal tss; int copied = 0, cmsg_flags = 0; int target; long timeo; /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */ if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); lock_sock(sk); if (unlikely(sk->sk_state == TCP_LISTEN)) { copied = -ENOTCONN; goto out_err; } mptcp_rps_record_subflows(msk); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); len = min_t(size_t, len, INT_MAX); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(msk->recvmsg_inq)) cmsg_flags = MPTCP_CMSG_INQ; while (copied < len) { int err, bytes_read; bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, copied, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { if (!copied) copied = bytes_read; goto out_err; } copied += bytes_read; if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk)) continue; /* only the MPTCP socket status is relevant here. The exit * conditions mirror closely tcp_recvmsg() */ if (copied >= target) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || signal_pending(current)) break; } else { if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } } pr_debug("block timeout %ld\n", timeo); mptcp_cleanup_rbuf(msk, copied); err = sk_wait_data(sk, &timeo, NULL); if (err < 0) { err = copied ? : err; goto out_err; } } mptcp_cleanup_rbuf(msk, copied); out_err: if (cmsg_flags && copied >= 0) { if (cmsg_flags & MPTCP_CMSG_TS) tcp_recv_timestamp(msg, sk, &tss); if (cmsg_flags & MPTCP_CMSG_INQ) { unsigned int inq = mptcp_inq_hint(sk); put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq); } } pr_debug("msk=%p rx queue empty=%d copied=%d\n", msk, skb_queue_empty(&sk->sk_receive_queue), copied); release_sock(sk); return copied; } static void mptcp_retransmit_timer(struct timer_list *t) { struct sock *sk = timer_container_of(sk, t, mptcp_retransmit_timer); struct mptcp_sock *msk = mptcp_sk(sk); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { /* we need a process context to retransmit */ if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) mptcp_schedule_work(sk); } else { /* delegate our work to tcp_release_cb() */ __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); } bh_unlock_sock(sk); sock_put(sk); } static void mptcp_tout_timer(struct timer_list *t) { struct inet_connection_sock *icsk = timer_container_of(icsk, t, mptcp_tout_timer); struct sock *sk = &icsk->icsk_inet.sk; mptcp_schedule_work(sk); sock_put(sk); } /* Find an idle subflow. Return NULL if there is unacked data at tcp * level. * * A backup subflow is returned only if that is the only kind available. */ struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) { struct sock *backup = NULL, *pick = NULL; struct mptcp_subflow_context *subflow; int min_stale_count = INT_MAX; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (!__mptcp_subflow_active(subflow)) continue; /* still data outstanding at TCP level? skip this */ if (!tcp_rtx_and_write_queues_empty(ssk)) { mptcp_pm_subflow_chk_stale(msk, ssk); min_stale_count = min_t(int, min_stale_count, subflow->stale_count); continue; } if (subflow->backup || subflow->request_bkup) { if (!backup) backup = ssk; continue; } if (!pick) pick = ssk; } if (pick) return pick; /* use backup only if there are no progresses anywhere */ return min_stale_count > 1 ? backup : NULL; } bool __mptcp_retransmit_pending_data(struct sock *sk) { struct mptcp_data_frag *cur, *rtx_head; struct mptcp_sock *msk = mptcp_sk(sk); if (__mptcp_check_fallback(msk)) return false; /* the closing socket has some data untransmitted and/or unacked: * some data in the mptcp rtx queue has not really xmitted yet. * keep it simple and re-inject the whole mptcp level rtx queue */ mptcp_data_lock(sk); __mptcp_clean_una_wakeup(sk); rtx_head = mptcp_rtx_head(sk); if (!rtx_head) { mptcp_data_unlock(sk); return false; } msk->recovery_snd_nxt = msk->snd_nxt; msk->recovery = true; mptcp_data_unlock(sk); msk->first_pending = rtx_head; msk->snd_burst = 0; /* be sure to clear the "sent status" on all re-injected fragments */ list_for_each_entry(cur, &msk->rtx_queue, list) { if (!cur->already_sent) break; cur->already_sent = 0; } return true; } /* flags for __mptcp_close_ssk() */ #define MPTCP_CF_PUSH BIT(1) /* be sure to send a reset only if the caller asked for it, also * clean completely the subflow status when the subflow reaches * TCP_CLOSE state */ static void __mptcp_subflow_disconnect(struct sock *ssk, struct mptcp_subflow_context *subflow, bool fastclosing) { if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || fastclosing) { /* The MPTCP code never wait on the subflow sockets, TCP-level * disconnect should never fail */ WARN_ON_ONCE(tcp_disconnect(ssk, 0)); mptcp_subflow_ctx_reset(subflow); } else { tcp_shutdown(ssk, SEND_SHUTDOWN); } } /* subflow sockets can be either outgoing (connect) or incoming * (accept). * * Outgoing subflows use in-kernel sockets. * Incoming subflows do not have their own 'struct socket' allocated, * so we need to use tcp_close() after detaching them from the mptcp * parent socket. */ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_subflow_context *subflow, unsigned int flags) { struct mptcp_sock *msk = mptcp_sk(sk); bool dispose_it, need_push = false; int fwd_remaining; /* Do not pass RX data to the msk, even if the subflow socket is not * going to be freed (i.e. even for the first subflow on graceful * subflow close. */ lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); subflow->closing = 1; /* Borrow the fwd allocated page left-over; fwd memory for the subflow * could be negative at this point, but will be reach zero soon - when * the data allocated using such fragment will be freed. */ if (subflow->lent_mem_frag) { fwd_remaining = PAGE_SIZE - subflow->lent_mem_frag; sk_forward_alloc_add(sk, fwd_remaining); sk_forward_alloc_add(ssk, -fwd_remaining); subflow->lent_mem_frag = 0; } /* If the first subflow moved to a close state before accept, e.g. due * to an incoming reset or listener shutdown, the subflow socket is * already deleted by inet_child_forget() and the mptcp socket can't * survive too. */ if (msk->in_accept_queue && msk->first == ssk && (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { /* ensure later check in mptcp_worker() will dispose the msk */ sock_set_flag(sk, SOCK_DEAD); mptcp_set_close_tout(sk, tcp_jiffies32 - (mptcp_close_timeout(sk) + 1)); mptcp_subflow_drop_ctx(ssk); goto out_release; } dispose_it = msk->free_first || ssk != msk->first; if (dispose_it) list_del(&subflow->node); if (subflow->send_fastclose && ssk->sk_state != TCP_CLOSE) tcp_set_state(ssk, TCP_CLOSE); need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); if (!dispose_it) { __mptcp_subflow_disconnect(ssk, subflow, msk->fastclosing); release_sock(ssk); goto out; } subflow->disposable = 1; /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops * the ssk has been already destroyed, we just need to release the * reference owned by msk; */ if (!inet_csk(ssk)->icsk_ulp_ops) { WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); kfree_rcu(subflow, rcu); } else { /* otherwise tcp will dispose of the ssk and subflow ctx */ __tcp_close(ssk, 0); /* close acquired an extra ref */ __sock_put(ssk); } out_release: __mptcp_subflow_error_report(sk, ssk); release_sock(ssk); sock_put(ssk); if (ssk == msk->first) WRITE_ONCE(msk->first, NULL); out: __mptcp_sync_sndbuf(sk); if (need_push) __mptcp_push_pending(sk, 0); /* Catch every 'all subflows closed' scenario, including peers silently * closing them, e.g. due to timeout. * For established sockets, allow an additional timeout before closing, * as the protocol can still create more subflows. */ if (list_is_singular(&msk->conn_list) && msk->first && inet_sk_state_load(msk->first) == TCP_CLOSE) { if (sk->sk_state != TCP_ESTABLISHED || msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { mptcp_set_state(sk, TCP_CLOSE); mptcp_close_wake_up(sk); } else { mptcp_start_tout_timer(sk); } } } void mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_subflow_context *subflow) { struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb; /* The first subflow can already be closed and still in the list */ if (subflow->close_event_done) return; subflow->close_event_done = true; if (sk->sk_state == TCP_ESTABLISHED) mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); /* Remove any reference from the backlog to this ssk; backlog skbs consume * space in the msk receive queue, no need to touch sk->sk_rmem_alloc */ list_for_each_entry(skb, &msk->backlog_list, list) { if (skb->sk != ssk) continue; atomic_sub(skb->truesize, &skb->sk->sk_rmem_alloc); skb->sk = NULL; } /* subflow aborted before reaching the fully_established status * attempt the creation of the next subflow */ mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); } static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) { return 0; } static void __mptcp_close_subflow(struct sock *sk) { struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); might_sleep(); mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); int ssk_state = inet_sk_state_load(ssk); if (ssk_state != TCP_CLOSE && (ssk_state != TCP_CLOSE_WAIT || inet_sk_state_load(sk) != TCP_ESTABLISHED || __mptcp_check_fallback(msk))) continue; /* 'subflow_data_ready' will re-sched once rx queue is empty */ if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) continue; mptcp_close_ssk(sk, ssk, subflow); } } static bool mptcp_close_tout_expired(const struct sock *sk) { if (!inet_csk(sk)->icsk_mtup.probe_timestamp || sk->sk_state == TCP_CLOSE) return false; return time_after32(tcp_jiffies32, inet_csk(sk)->icsk_mtup.probe_timestamp + mptcp_close_timeout(sk)); } static void mptcp_check_fastclose(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow, *tmp; struct sock *sk = (struct sock *)msk; if (likely(!READ_ONCE(msk->rcv_fastclose))) return; mptcp_token_destroy(msk); mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); bool slow; slow = lock_sock_fast(tcp_sk); if (tcp_sk->sk_state != TCP_CLOSE) { mptcp_send_active_reset_reason(tcp_sk); tcp_set_state(tcp_sk, TCP_CLOSE); } unlock_sock_fast(tcp_sk, slow); } /* Mirror the tcp_reset() error propagation */ switch (sk->sk_state) { case TCP_SYN_SENT: WRITE_ONCE(sk->sk_err, ECONNREFUSED); break; case TCP_CLOSE_WAIT: WRITE_ONCE(sk->sk_err, EPIPE); break; case TCP_CLOSE: return; default: WRITE_ONCE(sk->sk_err, ECONNRESET); } mptcp_set_state(sk, TCP_CLOSE); WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); /* the calling mptcp_worker will properly destroy the socket */ if (sock_flag(sk, SOCK_DEAD)) return; sk->sk_state_change(sk); sk_error_report(sk); } static void __mptcp_retrans(struct sock *sk) { struct mptcp_sendmsg_info info = { .data_lock_held = true, }; struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_subflow_context *subflow; struct mptcp_data_frag *dfrag; struct sock *ssk; int ret, err; u16 len = 0; mptcp_clean_una_wakeup(sk); /* first check ssk: need to kick "stale" logic */ err = mptcp_sched_get_retrans(msk); dfrag = mptcp_rtx_head(sk); if (!dfrag) { if (mptcp_data_fin_enabled(msk)) { struct inet_connection_sock *icsk = inet_csk(sk); WRITE_ONCE(icsk->icsk_retransmits, icsk->icsk_retransmits + 1); mptcp_set_datafin_timeout(sk); mptcp_send_ack(msk); goto reset_timer; } if (!mptcp_send_head(sk)) goto clear_scheduled; goto reset_timer; } if (err) goto reset_timer; mptcp_for_each_subflow(msk, subflow) { if (READ_ONCE(subflow->scheduled)) { u16 copied = 0; mptcp_subflow_set_scheduled(subflow, false); ssk = mptcp_subflow_tcp_sock(subflow); lock_sock(ssk); /* limit retransmission to the bytes already sent on some subflows */ info.sent = 0; info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent; /* * make the whole retrans decision, xmit, disallow * fallback atomic, note that we can't retrans even * when an infinite fallback is in progress, i.e. new * subflows are disallowed. */ spin_lock_bh(&msk->fallback_lock); if (__mptcp_check_fallback(msk) || !msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); release_sock(ssk); goto clear_scheduled; } while (info.sent < info.limit) { ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); if (ret <= 0) break; MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); copied += ret; info.sent += ret; } if (copied) { len = max(copied, len); tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); msk->allow_infinite_fallback = false; } spin_unlock_bh(&msk->fallback_lock); release_sock(ssk); } } msk->bytes_retrans += len; dfrag->already_sent = max(dfrag->already_sent, len); reset_timer: mptcp_check_and_set_pending(sk); if (!mptcp_rtx_timer_pending(sk)) mptcp_reset_rtx_timer(sk); clear_scheduled: /* If no rtx data was available or in case of fallback, there * could be left-over scheduled subflows; clear them all * or later xmit could use bad ones */ mptcp_for_each_subflow(msk, subflow) if (READ_ONCE(subflow->scheduled)) mptcp_subflow_set_scheduled(subflow, false); } /* schedule the timeout timer for the relevant event: either close timeout * or mp_fail timeout. The close timeout takes precedence on the mp_fail one */ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) { struct sock *sk = (struct sock *)msk; unsigned long timeout, close_timeout; if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) return; close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + mptcp_close_timeout(sk); /* the close timeout takes precedence on the fail one, and here at least one of * them is active */ timeout = inet_csk(sk)->icsk_mtup.probe_timestamp ? close_timeout : fail_tout; sk_reset_timer(sk, &inet_csk(sk)->mptcp_tout_timer, timeout); } static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) { struct sock *ssk = msk->first; bool slow; if (!ssk) return; pr_debug("MP_FAIL doesn't respond, reset the subflow\n"); slow = lock_sock_fast(ssk); mptcp_subflow_reset(ssk); WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); unlock_sock_fast(ssk, slow); } static void mptcp_backlog_purge(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *tmp, *skb; LIST_HEAD(backlog); mptcp_data_lock(sk); list_splice_init(&msk->backlog_list, &backlog); msk->backlog_len = 0; mptcp_data_unlock(sk); list_for_each_entry_safe(skb, tmp, &backlog, list) { mptcp_borrow_fwdmem(sk, skb); kfree_skb_reason(skb, SKB_DROP_REASON_SOCKET_CLOSE); } sk_mem_reclaim(sk); } static void mptcp_do_fastclose(struct sock *sk) { struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); mptcp_set_state(sk, TCP_CLOSE); mptcp_backlog_purge(sk); msk->fastclosing = 1; /* Explicitly send the fastclose reset as need */ if (__mptcp_check_fallback(msk)) return; mptcp_for_each_subflow_safe(msk, subflow, tmp) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); lock_sock(ssk); /* Some subflow socket states don't allow/need a reset.*/ if ((1 << ssk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) goto unlock; subflow->send_fastclose = 1; /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 * issue in __tcp_select_window(), see tcp_disconnect(). */ inet_csk(ssk)->icsk_ack.rcv_mss = TCP_MIN_MSS; tcp_send_active_reset(ssk, ssk->sk_allocation, SK_RST_REASON_TCP_ABORT_ON_CLOSE); unlock: release_sock(ssk); } } static void mptcp_worker(struct work_struct *work) { struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); struct sock *sk = (struct sock *)msk; unsigned long fail_tout; int state; lock_sock(sk); state = sk->sk_state; if (unlikely((1 << state) & (TCPF_CLOSE | TCPF_LISTEN))) goto unlock; mptcp_check_fastclose(msk); mptcp_pm_worker(msk); mptcp_check_send_data_fin(sk); mptcp_check_data_fin_ack(sk); mptcp_check_data_fin(sk); if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) __mptcp_close_subflow(sk); if (mptcp_close_tout_expired(sk)) { struct mptcp_subflow_context *subflow, *tmp; mptcp_do_fastclose(sk); mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, subflow->tcp_sock, subflow, 0); mptcp_close_wake_up(sk); } if (sock_flag(sk, SOCK_DEAD) && sk->sk_state == TCP_CLOSE) { __mptcp_destroy_sock(sk); goto unlock; } if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) __mptcp_retrans(sk); fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; if (fail_tout && time_after(jiffies, fail_tout)) mptcp_mp_fail_no_response(msk); unlock: release_sock(sk); sock_put(sk); } static void __mptcp_init_sock(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); INIT_LIST_HEAD(&msk->conn_list); INIT_LIST_HEAD(&msk->join_list); INIT_LIST_HEAD(&msk->rtx_queue); INIT_LIST_HEAD(&msk->backlog_list); INIT_WORK(&msk->work, mptcp_worker); msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; msk->timer_ival = TCP_RTO_MIN; msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; msk->backlog_len = 0; WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); msk->allow_infinite_fallback = true; msk->allow_subflows = true; msk->recovery = false; msk->subflow_id = 1; msk->last_data_sent = tcp_jiffies32; msk->last_data_recv = tcp_jiffies32; msk->last_ack_recv = tcp_jiffies32; mptcp_pm_data_init(msk); spin_lock_init(&msk->fallback_lock); /* re-use the csk retrans timer for MPTCP-level retrans */ timer_setup(&sk->mptcp_retransmit_timer, mptcp_retransmit_timer, 0); timer_setup(&msk->sk.mptcp_tout_timer, mptcp_tout_timer, 0); } static void mptcp_ca_reset(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_assign_congestion_control(sk); strscpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name, sizeof(mptcp_sk(sk)->ca_name)); /* no need to keep a reference to the ops, the name will suffice */ tcp_cleanup_congestion_control(sk); icsk->icsk_ca_ops = NULL; } static int mptcp_init_sock(struct sock *sk) { struct net *net = sock_net(sk); int ret; __mptcp_init_sock(sk); if (!mptcp_is_enabled(net)) return -ENOPROTOOPT; if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) return -ENOMEM; rcu_read_lock(); ret = mptcp_init_sched(mptcp_sk(sk), mptcp_sched_find(mptcp_get_scheduler(net))); rcu_read_unlock(); if (ret) return ret; set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will * propagate the correct value */ mptcp_ca_reset(sk); sk_sockets_allocated_inc(sk); sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]); sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]); return 0; } static void __mptcp_clear_xmit(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_data_frag *dtmp, *dfrag; msk->first_pending = NULL; list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) dfrag_clear(sk, dfrag); } void mptcp_cancel_work(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); if (cancel_work_sync(&msk->work)) __sock_put(sk); } void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) { lock_sock(ssk); switch (ssk->sk_state) { case TCP_LISTEN: if (!(how & RCV_SHUTDOWN)) break; fallthrough; case TCP_SYN_SENT: WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); break; default: if (__mptcp_check_fallback(mptcp_sk(sk))) { pr_debug("Fallback\n"); ssk->sk_shutdown |= how; tcp_shutdown(ssk, how); /* simulate the data_fin ack reception to let the state * machine move forward */ WRITE_ONCE(mptcp_sk(sk)->snd_una, mptcp_sk(sk)->snd_nxt); mptcp_schedule_work(sk); } else { pr_debug("Sending DATA_FIN on subflow %p\n", ssk); tcp_send_ack(ssk); if (!mptcp_rtx_timer_pending(sk)) mptcp_reset_rtx_timer(sk); } break; } release_sock(ssk); } void mptcp_set_state(struct sock *sk, int state) { int oldstate = sk->sk_state; switch (state) { case TCP_ESTABLISHED: if (oldstate != TCP_ESTABLISHED) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); break; case TCP_CLOSE_WAIT: /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state: * MPTCP "accepted" sockets will be created later on. So no * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT. */ break; default: if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB); } inet_sk_state_store(sk, state); } static const unsigned char new_state[16] = { /* current state: new state: action: */ [0 /* (Invalid) */] = TCP_CLOSE, [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, [TCP_SYN_SENT] = TCP_CLOSE, [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ [TCP_CLOSE] = TCP_CLOSE, [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, [TCP_LAST_ACK] = TCP_LAST_ACK, [TCP_LISTEN] = TCP_CLOSE, [TCP_CLOSING] = TCP_CLOSING, [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ }; static int mptcp_close_state(struct sock *sk) { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; mptcp_set_state(sk, ns); return next & TCP_ACTION_FIN; } static void mptcp_check_send_data_fin(struct sock *sk) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu\n", msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), msk->snd_nxt, msk->write_seq); /* we still need to enqueue subflows or not really shutting down, * skip this */ if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || mptcp_send_head(sk)) return; WRITE_ONCE(msk->snd_nxt, msk->write_seq); mptcp_for_each_subflow(msk, subflow) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); } } static void __mptcp_wr_shutdown(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d\n", msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, !!mptcp_send_head(sk)); /* will be ignored by fallback sockets */ WRITE_ONCE(msk->write_seq, msk->write_seq + 1); WRITE_ONCE(msk->snd_data_fin_enable, 1); mptcp_check_send_data_fin(sk); } static void __mptcp_destroy_sock(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); pr_debug("msk=%p\n", msk); might_sleep(); mptcp_stop_rtx_timer(sk); sk_stop_timer(sk, &inet_csk(sk)->mptcp_tout_timer); msk->pm.status = 0; mptcp_release_sched(msk); sk->sk_prot->destroy(sk); sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); sock_put(sk); } void __mptcp_unaccepted_force_close(struct sock *sk) { sock_set_flag(sk, SOCK_DEAD); mptcp_do_fastclose(sk); __mptcp_destroy_sock(sk); } static __poll_t mptcp_check_readable(struct sock *sk) { return mptcp_epollin_ready(sk) ? EPOLLIN | EPOLLRDNORM : 0; } static void mptcp_check_listen_stop(struct sock *sk) { struct sock *ssk; if (inet_sk_state_load(sk) != TCP_LISTEN) return; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); ssk = mptcp_sk(sk)->first; if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) return; lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); tcp_set_state(ssk, TCP_CLOSE); mptcp_subflow_queue_clean(sk, ssk); inet_csk_listen_stop(ssk); mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); release_sock(ssk); } bool __mptcp_close(struct sock *sk, long timeout) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); bool do_cancel_work = false; int subflows_alive = 0; WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { mptcp_check_listen_stop(sk); mptcp_set_state(sk, TCP_CLOSE); goto cleanup; } if (mptcp_data_avail(msk) || timeout < 0) { /* If the msk has read data, or the caller explicitly ask it, * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose */ mptcp_do_fastclose(sk); timeout = 0; } else if (mptcp_close_state(sk)) { __mptcp_wr_shutdown(sk); } sk_stream_wait_close(sk, timeout); cleanup: /* orphan all the subflows */ mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bool slow = lock_sock_fast_nested(ssk); subflows_alive += ssk->sk_state != TCP_CLOSE; /* since the close timeout takes precedence on the fail one, * cancel the latter */ if (ssk == msk->first) subflow->fail_tout = 0; /* detach from the parent socket, but allow data_ready to * push incoming data into the mptcp stack, to properly ack it */ ssk->sk_socket = NULL; ssk->sk_wq = NULL; unlock_sock_fast(ssk, slow); } sock_orphan(sk); /* all the subflows are closed, only timeout can change the msk * state, let's not keep resources busy for no reasons */ if (subflows_alive == 0) mptcp_set_state(sk, TCP_CLOSE); sock_hold(sk); pr_debug("msk=%p state=%d\n", sk, sk->sk_state); mptcp_pm_connection_closed(msk); if (sk->sk_state == TCP_CLOSE) { __mptcp_destroy_sock(sk); do_cancel_work = true; } else { mptcp_start_tout_timer(sk); } return do_cancel_work; } static void mptcp_close(struct sock *sk, long timeout) { bool do_cancel_work; lock_sock(sk); do_cancel_work = __mptcp_close(sk, timeout); release_sock(sk); if (do_cancel_work) mptcp_cancel_work(sk); sock_put(sk); } static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); struct ipv6_pinfo *msk6 = inet6_sk(msk); msk->sk_v6_daddr = ssk->sk_v6_daddr; msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; if (msk6 && ssk6) { msk6->saddr = ssk6->saddr; msk6->flow_label = ssk6->flow_label; } #endif inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; } static void mptcp_destroy_common(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow, *tmp; struct sock *sk = (struct sock *)msk; __mptcp_clear_xmit(sk); mptcp_backlog_purge(sk); /* join list will be eventually flushed (with rst) at sock lock release time */ mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, 0); __skb_queue_purge(&sk->sk_receive_queue); skb_rbtree_purge(&msk->out_of_order_queue); /* move all the rx fwd alloc into the sk_mem_reclaim_final in * inet_sock_destruct() will dispose it */ mptcp_token_destroy(msk); mptcp_pm_destroy(msk); } static int mptcp_disconnect(struct sock *sk, int flags) { struct mptcp_sock *msk = mptcp_sk(sk); /* We are on the fastopen error path. We can't call straight into the * subflows cleanup code due to lock nesting (we are already under * msk->firstsocket lock). */ if (msk->fastopening) return -EBUSY; mptcp_check_listen_stop(sk); mptcp_set_state(sk, TCP_CLOSE); mptcp_stop_rtx_timer(sk); mptcp_stop_tout_timer(sk); mptcp_pm_connection_closed(msk); /* msk->subflow is still intact, the following will not free the first * subflow */ mptcp_do_fastclose(sk); mptcp_destroy_common(msk); /* The first subflow is already in TCP_CLOSE status, the following * can't overlap with a fallback anymore */ spin_lock_bh(&msk->fallback_lock); msk->allow_subflows = true; msk->allow_infinite_fallback = true; WRITE_ONCE(msk->flags, 0); spin_unlock_bh(&msk->fallback_lock); msk->cb_flags = 0; msk->recovery = false; WRITE_ONCE(msk->can_ack, false); WRITE_ONCE(msk->fully_established, false); WRITE_ONCE(msk->rcv_data_fin, false); WRITE_ONCE(msk->snd_data_fin_enable, false); WRITE_ONCE(msk->rcv_fastclose, false); WRITE_ONCE(msk->use_64bit_ack, false); WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); mptcp_pm_data_reset(msk); mptcp_ca_reset(sk); msk->bytes_consumed = 0; msk->bytes_acked = 0; msk->bytes_received = 0; msk->bytes_sent = 0; msk->bytes_retrans = 0; msk->rcvspace_init = 0; msk->fastclosing = 0; /* for fallback's sake */ WRITE_ONCE(msk->ack_seq, 0); WRITE_ONCE(sk->sk_shutdown, 0); sk_error_report(sk); return 0; } #if IS_ENABLED(CONFIG_MPTCP_IPV6) static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) { struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk); return &msk6->np; } static void mptcp_copy_ip6_options(struct sock *newsk, const struct sock *sk) { const struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct ipv6_pinfo *newnp; newnp = inet6_sk(newsk); rcu_read_lock(); opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); if (!opt) net_warn_ratelimited("%s: Failed to copy ip6 options\n", __func__); } RCU_INIT_POINTER(newnp->opt, opt); rcu_read_unlock(); } #endif static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) { struct ip_options_rcu *inet_opt, *newopt = NULL; const struct inet_sock *inet = inet_sk(sk); struct inet_sock *newinet; newinet = inet_sk(newsk); rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen, GFP_ATOMIC); if (!newopt) net_warn_ratelimited("%s: Failed to copy ip options\n", __func__); } RCU_INIT_POINTER(newinet->inet_opt, newopt); rcu_read_unlock(); } struct sock *mptcp_sk_clone_init(const struct sock *sk, const struct mptcp_options_received *mp_opt, struct sock *ssk, struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); struct mptcp_subflow_context *subflow; struct mptcp_sock *msk; if (!nsk) return NULL; #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (nsk->sk_family == AF_INET6) inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); #endif __mptcp_init_sock(nsk); #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (nsk->sk_family == AF_INET6) mptcp_copy_ip6_options(nsk, sk); else #endif mptcp_copy_ip_options(nsk, sk); msk = mptcp_sk(nsk); WRITE_ONCE(msk->local_key, subflow_req->local_key); WRITE_ONCE(msk->token, subflow_req->token); msk->in_accept_queue = 1; WRITE_ONCE(msk->fully_established, false); if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) WRITE_ONCE(msk->csum_enabled, true); WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); WRITE_ONCE(msk->snd_nxt, msk->write_seq); WRITE_ONCE(msk->snd_una, msk->write_seq); WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; mptcp_init_sched(msk, mptcp_sk(sk)->sched); /* passive msk is created after the first/MPC subflow */ msk->subflow_id = 2; sock_reset_flag(nsk, SOCK_RCU_FREE); security_inet_csk_clone(nsk, req); /* this can't race with mptcp_close(), as the msk is * not yet exposted to user-space */ mptcp_set_state(nsk, TCP_ESTABLISHED); /* The msk maintain a ref to each subflow in the connections list */ WRITE_ONCE(msk->first, ssk); subflow = mptcp_subflow_ctx(ssk); list_add(&subflow->node, &msk->conn_list); sock_hold(ssk); /* new mpc subflow takes ownership of the newly * created mptcp socket */ mptcp_token_accept(subflow_req, msk); /* set msk addresses early to ensure mptcp_pm_get_local_id() * uses the correct data */ mptcp_copy_inaddrs(nsk, ssk); __mptcp_propagate_sndbuf(nsk, ssk); mptcp_rcv_space_init(msk, ssk); if (mp_opt->suboptions & OPTION_MPTCP_MPC_ACK) __mptcp_subflow_fully_established(msk, subflow, mp_opt); bh_unlock_sock(nsk); /* note: the newly allocated socket refcount is 2 now */ return nsk; } void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) { const struct tcp_sock *tp = tcp_sk(ssk); msk->rcvspace_init = 1; msk->rcvq_space.copied = 0; msk->rcvq_space.rtt_us = 0; msk->rcvq_space.time = tp->tcp_mstamp; /* initial rcv_space offering made to peer */ msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss); if (msk->rcvq_space.space == 0) msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; } static void mptcp_destroy(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); /* allow the following to close even the initial subflow */ msk->free_first = 1; mptcp_destroy_common(msk); sk_sockets_allocated_dec(sk); } void __mptcp_data_acked(struct sock *sk) { if (!sock_owned_by_user(sk)) __mptcp_clean_una(sk); else __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags); } void __mptcp_check_push(struct sock *sk, struct sock *ssk) { if (!sock_owned_by_user(sk)) __mptcp_subflow_push_pending(sk, ssk, false); else __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); } #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ BIT(MPTCP_RETRANSMIT) | \ BIT(MPTCP_FLUSH_JOIN_LIST)) /* processes deferred events and flush wmem */ static void mptcp_release_cb(struct sock *sk) __must_hold(&sk->sk_lock.slock) { struct mptcp_sock *msk = mptcp_sk(sk); for (;;) { unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); struct list_head join_list, skbs; bool spool_bl; u32 moved; spool_bl = mptcp_can_spool_backlog(sk, &skbs); if (!flags && !spool_bl) break; INIT_LIST_HEAD(&join_list); list_splice_init(&msk->join_list, &join_list); /* the following actions acquire the subflow socket lock * * 1) can't be invoked in atomic scope * 2) must avoid ABBA deadlock with msk socket spinlock: the RX * datapath acquires the msk socket spinlock while helding * the subflow socket lock */ msk->cb_flags &= ~flags; spin_unlock_bh(&sk->sk_lock.slock); if (flags & BIT(MPTCP_FLUSH_JOIN_LIST)) __mptcp_flush_join_list(sk, &join_list); if (flags & BIT(MPTCP_PUSH_PENDING)) __mptcp_push_pending(sk, 0); if (flags & BIT(MPTCP_RETRANSMIT)) __mptcp_retrans(sk); if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) { /* notify ack seq update */ mptcp_cleanup_rbuf(msk, 0); sk->sk_data_ready(sk); } cond_resched(); spin_lock_bh(&sk->sk_lock.slock); if (spool_bl) mptcp_backlog_spooled(sk, moved, &skbs); } if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) __mptcp_clean_una_wakeup(sk); if (unlikely(msk->cb_flags)) { /* be sure to sync the msk state before taking actions * depending on sk_state (MPTCP_ERROR_REPORT) * On sk release avoid actions depending on the first subflow */ if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) __mptcp_sync_state(sk, msk->pending_state); if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) __mptcp_error_report(sk); if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) __mptcp_sync_sndbuf(sk); } } /* MP_JOIN client subflow must wait for 4th ack before sending any data: * TCP can't schedule delack timer before the subflow is fully established. * MPTCP uses the delack timer to do 3rd ack retransmissions */ static void schedule_3rdack_retransmission(struct sock *ssk) { struct inet_connection_sock *icsk = inet_csk(ssk); struct tcp_sock *tp = tcp_sk(ssk); unsigned long timeout; if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established)) return; /* reschedule with a timeout above RTT, as we must look only for drop */ if (tp->srtt_us) timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1)); else timeout = TCP_TIMEOUT_INIT; timeout += jiffies; WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); } void mptcp_subflow_process_delegated(struct sock *ssk, long status) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = subflow->conn; if (status & BIT(MPTCP_DELEGATE_SEND)) { mptcp_data_lock(sk); if (!sock_owned_by_user(sk)) __mptcp_subflow_push_pending(sk, ssk, true); else __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags); mptcp_data_unlock(sk); } if (status & BIT(MPTCP_DELEGATE_SNDBUF)) { mptcp_data_lock(sk); if (!sock_owned_by_user(sk)) __mptcp_sync_sndbuf(sk); else __set_bit(MPTCP_SYNC_SNDBUF, &mptcp_sk(sk)->cb_flags); mptcp_data_unlock(sk); } if (status & BIT(MPTCP_DELEGATE_ACK)) schedule_3rdack_retransmission(ssk); } static int mptcp_hash(struct sock *sk) { /* should never be called, * we hash the TCP subflows not the MPTCP socket */ WARN_ON_ONCE(1); return 0; } static void mptcp_unhash(struct sock *sk) { /* called from sk_common_release(), but nothing to do here */ } static int mptcp_get_port(struct sock *sk, unsigned short snum) { struct mptcp_sock *msk = mptcp_sk(sk); pr_debug("msk=%p, ssk=%p\n", msk, msk->first); if (WARN_ON_ONCE(!msk->first)) return -EINVAL; return inet_csk_get_port(msk->first, snum); } void mptcp_finish_connect(struct sock *ssk) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk; struct sock *sk; subflow = mptcp_subflow_ctx(ssk); sk = subflow->conn; msk = mptcp_sk(sk); pr_debug("msk=%p, token=%u\n", sk, subflow->token); subflow->map_seq = subflow->iasn; subflow->map_subflow_seq = 1; /* the socket is not connected yet, no msk/subflow ops can access/race * accessing the field below */ WRITE_ONCE(msk->local_key, subflow->local_key); mptcp_pm_new_connection(msk, ssk, 0); } void mptcp_sock_graft(struct sock *sk, struct socket *parent) { write_lock_bh(&sk->sk_callback_lock); rcu_assign_pointer(sk->sk_wq, &parent->wq); sk_set_socket(sk, parent); write_unlock_bh(&sk->sk_callback_lock); } /* Can be called without holding the msk socket lock; use the callback lock * to avoid {READ_,WRITE_}ONCE annotations on sk_socket. */ static void mptcp_sock_check_graft(struct sock *sk, struct sock *ssk) { struct socket *sock; write_lock_bh(&sk->sk_callback_lock); sock = sk->sk_socket; write_unlock_bh(&sk->sk_callback_lock); if (sock) { mptcp_sock_graft(ssk, sock); __mptcp_inherit_cgrp_data(sk, ssk); __mptcp_inherit_memcg(sk, ssk, GFP_ATOMIC); } } bool mptcp_finish_join(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_sock *msk = mptcp_sk(subflow->conn); struct sock *parent = (void *)msk; bool ret = true; pr_debug("msk=%p, subflow=%p\n", msk, subflow); /* mptcp socket already closing? */ if (!mptcp_is_fully_established(parent)) { subflow->reset_reason = MPTCP_RST_EMPTCP; return false; } /* Active subflow, already present inside the conn_list; is grafted * either by __mptcp_subflow_connect() or accept. */ if (!list_empty(&subflow->node)) { spin_lock_bh(&msk->fallback_lock); if (!msk->allow_subflows) { spin_unlock_bh(&msk->fallback_lock); return false; } mptcp_subflow_joined(msk, ssk); spin_unlock_bh(&msk->fallback_lock); mptcp_propagate_sndbuf(parent, ssk); return true; } if (!mptcp_pm_allow_new_subflow(msk)) { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED); goto err_prohibited; } /* If we can't acquire msk socket lock here, let the release callback * handle it */ mptcp_data_lock(parent); if (!sock_owned_by_user(parent)) { ret = __mptcp_finish_join(msk, ssk); if (ret) { sock_hold(ssk); list_add_tail(&subflow->node, &msk->conn_list); mptcp_sock_check_graft(parent, ssk); } } else { sock_hold(ssk); list_add_tail(&subflow->node, &msk->join_list); __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); /* In case of later failures, __mptcp_flush_join_list() will * properly orphan the ssk via mptcp_close_ssk(). */ mptcp_sock_check_graft(parent, ssk); } mptcp_data_unlock(parent); if (!ret) { err_prohibited: subflow->reset_reason = MPTCP_RST_EPROHIBIT; return false; } return true; } static void mptcp_shutdown(struct sock *sk, int how) { pr_debug("sk=%p, how=%d\n", sk, how); if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) __mptcp_wr_shutdown(sk); } static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) { const struct sock *sk = (void *)msk; u64 delta; if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) return 0; delta = msk->write_seq - v; if (__mptcp_check_fallback(msk) && msk->first) { struct tcp_sock *tp = tcp_sk(msk->first); /* the first subflow is disconnected after close - see * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq * so ignore that status, too. */ if (!((1 << msk->first->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))) delta += READ_ONCE(tp->write_seq) - tp->snd_una; } if (delta > INT_MAX) delta = INT_MAX; return (int)delta; } static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) { struct mptcp_sock *msk = mptcp_sk(sk); bool slow; switch (cmd) { case SIOCINQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; lock_sock(sk); if (mptcp_move_skbs(sk)) mptcp_cleanup_rbuf(msk, 0); *karg = mptcp_inq_hint(sk); release_sock(sk); break; case SIOCOUTQ: slow = lock_sock_fast(sk); *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); unlock_sock_fast(sk, slow); break; case SIOCOUTQNSD: slow = lock_sock_fast(sk); *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); unlock_sock_fast(sk, slow); break; default: return -ENOIOCTLCMD; } return 0; } static int mptcp_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); int err = -EINVAL; struct sock *ssk; ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) return PTR_ERR(ssk); mptcp_set_state(sk, TCP_SYN_SENT); subflow = mptcp_subflow_ctx(ssk); #ifdef CONFIG_TCP_MD5SIG /* no MPTCP if MD5SIG is enabled on this socket or we may run out of * TCP option space. */ if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK); #endif if (subflow->request_mptcp) { if (mptcp_active_should_disable(sk)) mptcp_early_fallback(msk, subflow, MPTCP_MIB_MPCAPABLEACTIVEDISABLED); else if (mptcp_token_new_connect(ssk) < 0) mptcp_early_fallback(msk, subflow, MPTCP_MIB_TOKENFALLBACKINIT); } WRITE_ONCE(msk->write_seq, subflow->idsn); WRITE_ONCE(msk->snd_nxt, subflow->idsn); WRITE_ONCE(msk->snd_una, subflow->idsn); if (likely(!__mptcp_check_fallback(msk))) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE); /* if reaching here via the fastopen/sendmsg path, the caller already * acquired the subflow socket lock, too. */ if (!msk->fastopening) lock_sock(ssk); /* the following mirrors closely a very small chunk of code from * __inet_stream_connect() */ if (ssk->sk_state != TCP_CLOSE) goto out; if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); if (err) goto out; } err = ssk->sk_prot->connect(ssk, uaddr, addr_len); if (err < 0) goto out; inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); out: if (!msk->fastopening) release_sock(ssk); /* on successful connect, the msk state will be moved to established by * subflow_finish_connect() */ if (unlikely(err)) { /* avoid leaving a dangling token in an unconnected socket */ mptcp_token_destroy(msk); mptcp_set_state(sk, TCP_CLOSE); return err; } mptcp_copy_inaddrs(sk, ssk); return 0; } static struct proto mptcp_prot = { .name = "MPTCP", .owner = THIS_MODULE, .init = mptcp_init_sock, .connect = mptcp_connect, .disconnect = mptcp_disconnect, .close = mptcp_close, .setsockopt = mptcp_setsockopt, .getsockopt = mptcp_getsockopt, .shutdown = mptcp_shutdown, .destroy = mptcp_destroy, .sendmsg = mptcp_sendmsg, .ioctl = mptcp_ioctl, .recvmsg = mptcp_recvmsg, .release_cb = mptcp_release_cb, .hash = mptcp_hash, .unhash = mptcp_unhash, .get_port = mptcp_get_port, .stream_memory_free = mptcp_stream_memory_free, .sockets_allocated = &mptcp_sockets_allocated, .memory_allocated = &net_aligned_data.tcp_memory_allocated, .per_cpu_fw_alloc = &tcp_memory_per_cpu_fw_alloc, .memory_pressure = &tcp_memory_pressure, .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .sysctl_mem = sysctl_tcp_mem, .obj_size = sizeof(struct mptcp_sock), .slab_flags = SLAB_TYPESAFE_BY_RCU, .no_autobind = true, }; static int mptcp_bind(struct socket *sock, struct sockaddr_unsized *uaddr, int addr_len) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *ssk, *sk = sock->sk; int err = -EINVAL; lock_sock(sk); ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { err = PTR_ERR(ssk); goto unlock; } if (sk->sk_family == AF_INET) err = inet_bind_sk(ssk, uaddr, addr_len); #if IS_ENABLED(CONFIG_MPTCP_IPV6) else if (sk->sk_family == AF_INET6) err = inet6_bind_sk(ssk, uaddr, addr_len); #endif if (!err) mptcp_copy_inaddrs(sk, ssk); unlock: release_sock(sk); return err; } static int mptcp_listen(struct socket *sock, int backlog) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *sk = sock->sk; struct sock *ssk; int err; pr_debug("msk=%p\n", msk); lock_sock(sk); err = -EINVAL; if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) goto unlock; ssk = __mptcp_nmpc_sk(msk); if (IS_ERR(ssk)) { err = PTR_ERR(ssk); goto unlock; } mptcp_set_state(sk, TCP_LISTEN); sock_set_flag(sk, SOCK_RCU_FREE); lock_sock(ssk); err = __inet_listen_sk(ssk, backlog); release_sock(ssk); mptcp_set_state(sk, inet_sk_state_load(ssk)); if (!err) { sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); mptcp_copy_inaddrs(sk, ssk); mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); } unlock: release_sock(sk); return err; } static void mptcp_graft_subflows(struct sock *sk) { struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); if (mem_cgroup_sockets_enabled) { LIST_HEAD(join_list); /* Subflows joining after __inet_accept() will get the * mem CG properly initialized at mptcp_finish_join() time, * but subflows pending in join_list need explicit * initialization before flushing `backlog_unaccounted` * or MPTCP can later unexpectedly observe unaccounted memory. */ mptcp_data_lock(sk); list_splice_init(&msk->join_list, &join_list); mptcp_data_unlock(sk); __mptcp_flush_join_list(sk, &join_list); } mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); lock_sock(ssk); /* Set ssk->sk_socket of accept()ed flows to mptcp socket. * This is needed so NOSPACE flag can be set from tcp stack. */ if (!ssk->sk_socket) mptcp_sock_graft(ssk, sk->sk_socket); if (!mem_cgroup_sk_enabled(sk)) goto unlock; __mptcp_inherit_cgrp_data(sk, ssk); __mptcp_inherit_memcg(sk, ssk, GFP_KERNEL); unlock: release_sock(ssk); } if (mem_cgroup_sk_enabled(sk)) { gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL; int amt; /* Account the backlog memory; prior accept() is aware of * fwd and rmem only. */ mptcp_data_lock(sk); amt = sk_mem_pages(sk->sk_forward_alloc + msk->backlog_unaccounted + atomic_read(&sk->sk_rmem_alloc)) - sk_mem_pages(sk->sk_forward_alloc + atomic_read(&sk->sk_rmem_alloc)); msk->backlog_unaccounted = 0; mptcp_data_unlock(sk); if (amt) mem_cgroup_sk_charge(sk, amt, gfp); } } static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct sock *ssk, *newsk; pr_debug("msk=%p\n", msk); /* Buggy applications can call accept on socket states other then LISTEN * but no need to allocate the first subflow just to error out. */ ssk = READ_ONCE(msk->first); if (!ssk) return -EINVAL; pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); newsk = inet_csk_accept(ssk, arg); if (!newsk) return arg->err; pr_debug("newsk=%p, subflow is mptcp=%d\n", newsk, sk_is_mptcp(newsk)); if (sk_is_mptcp(newsk)) { struct mptcp_subflow_context *subflow; struct sock *new_mptcp_sock; subflow = mptcp_subflow_ctx(newsk); new_mptcp_sock = subflow->conn; /* is_mptcp should be false if subflow->conn is missing, see * subflow_syn_recv_sock() */ if (WARN_ON_ONCE(!new_mptcp_sock)) { tcp_sk(newsk)->is_mptcp = 0; goto tcpfallback; } newsk = new_mptcp_sock; MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); newsk->sk_kern_sock = arg->kern; lock_sock(newsk); __inet_accept(sock, newsock, newsk); set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); msk = mptcp_sk(newsk); msk->in_accept_queue = 0; mptcp_graft_subflows(newsk); mptcp_rps_record_subflows(msk); /* Do late cleanup for the first subflow as necessary. Also * deal with bad peers not doing a complete shutdown. */ if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { if (unlikely(list_is_singular(&msk->conn_list))) mptcp_set_state(newsk, TCP_CLOSE); mptcp_close_ssk(newsk, msk->first, mptcp_subflow_ctx(msk->first)); } } else { tcpfallback: newsk->sk_kern_sock = arg->kern; lock_sock(newsk); __inet_accept(sock, newsock, newsk); /* we are being invoked after accepting a non-mp-capable * flow: sk is a tcp_sk, not an mptcp one. * * Hand the socket over to tcp so all further socket ops * bypass mptcp. */ WRITE_ONCE(newsock->sk->sk_socket->ops, mptcp_fallback_tcp_ops(newsock->sk)); } release_sock(newsk); return 0; } static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) { struct sock *sk = (struct sock *)msk; if (__mptcp_stream_is_writeable(sk, 1)) return EPOLLOUT | EPOLLWRNORM; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); smp_mb__after_atomic(); /* NOSPACE is changed by mptcp_write_space() */ if (__mptcp_stream_is_writeable(sk, 1)) return EPOLLOUT | EPOLLWRNORM; return 0; } static __poll_t mptcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait) { struct sock *sk = sock->sk; struct mptcp_sock *msk; __poll_t mask = 0; u8 shutdown; int state; msk = mptcp_sk(sk); sock_poll_wait(file, sock, wait); state = inet_sk_state_load(sk); pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); if (state == TCP_LISTEN) { struct sock *ssk = READ_ONCE(msk->first); if (WARN_ON_ONCE(!ssk)) return 0; return inet_csk_listen_poll(ssk); } shutdown = READ_ONCE(sk->sk_shutdown); if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) mask |= EPOLLHUP; if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { mask |= mptcp_check_readable(sk); if (shutdown & SEND_SHUTDOWN) mask |= EPOLLOUT | EPOLLWRNORM; else mask |= mptcp_check_writeable(msk); } else if (state == TCP_SYN_SENT && inet_test_bit(DEFER_CONNECT, sk)) { /* cf tcp_poll() note about TFO */ mask |= EPOLLOUT | EPOLLWRNORM; } /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */ smp_rmb(); if (READ_ONCE(sk->sk_err)) mask |= EPOLLERR; return mask; } static const struct proto_ops mptcp_stream_ops = { .family = PF_INET, .owner = THIS_MODULE, .release = inet_release, .bind = mptcp_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet_getname, .poll = mptcp_poll, .ioctl = inet_ioctl, .gettstamp = sock_gettstamp, .listen = mptcp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .set_rcvlowat = mptcp_set_rcvlowat, }; static struct inet_protosw mptcp_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_MPTCP, .prot = &mptcp_prot, .ops = &mptcp_stream_ops, .flags = INET_PROTOSW_ICSK, }; static int mptcp_napi_poll(struct napi_struct *napi, int budget) { struct mptcp_delegated_action *delegated; struct mptcp_subflow_context *subflow; int work_done = 0; delegated = container_of(napi, struct mptcp_delegated_action, napi); while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); bh_lock_sock_nested(ssk); if (!sock_owned_by_user(ssk)) { mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); } else { /* tcp_release_cb_override already processed * the action or will do at next release_sock(). * In both case must dequeue the subflow here - on the same * CPU that scheduled it. */ smp_wmb(); clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); } bh_unlock_sock(ssk); sock_put(ssk); if (++work_done == budget) return budget; } /* always provide a 0 'work_done' argument, so that napi_complete_done * will not try accessing the NULL napi->dev ptr */ napi_complete_done(napi, 0); return work_done; } void __init mptcp_proto_init(void) { struct mptcp_delegated_action *delegated; int cpu; mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) panic("Failed to allocate MPTCP pcpu counter\n"); mptcp_napi_dev = alloc_netdev_dummy(0); if (!mptcp_napi_dev) panic("Failed to allocate MPTCP dummy netdev\n"); for_each_possible_cpu(cpu) { delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu); INIT_LIST_HEAD(&delegated->head); netif_napi_add_tx(mptcp_napi_dev, &delegated->napi, mptcp_napi_poll); napi_enable(&delegated->napi); } mptcp_subflow_init(); mptcp_pm_init(); mptcp_sched_init(); mptcp_token_init(); if (proto_register(&mptcp_prot, 1) != 0) panic("Failed to register MPTCP proto.\n"); inet_register_protosw(&mptcp_protosw); BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); } #if IS_ENABLED(CONFIG_MPTCP_IPV6) static const struct proto_ops mptcp_v6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = mptcp_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, .getname = inet6_getname, .poll = mptcp_poll, .ioctl = inet6_ioctl, .gettstamp = sock_gettstamp, .listen = mptcp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet6_sendmsg, .recvmsg = inet6_recvmsg, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif .set_rcvlowat = mptcp_set_rcvlowat, }; static struct proto mptcp_v6_prot; static struct inet_protosw mptcp_v6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_MPTCP, .prot = &mptcp_v6_prot, .ops = &mptcp_v6_stream_ops, .flags = INET_PROTOSW_ICSK, }; int __init mptcp_proto_v6_init(void) { int err; mptcp_v6_prot = mptcp_prot; strscpy(mptcp_v6_prot.name, "MPTCPv6", sizeof(mptcp_v6_prot.name)); mptcp_v6_prot.slab = NULL; mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); mptcp_v6_prot.ipv6_pinfo_offset = offsetof(struct mptcp6_sock, np); err = proto_register(&mptcp_v6_prot, 1); if (err) return err; err = inet6_register_protosw(&mptcp_v6_protosw); if (err) proto_unregister(&mptcp_v6_prot); return err; } #endif
10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 // SPDX-License-Identifier: GPL-2.0 /* * OF NUMA Parsing support. * * Copyright (C) 2015 - 2016 Cavium Inc. */ #define pr_fmt(fmt) "OF: NUMA: " fmt #include <linux/of.h> #include <linux/of_address.h> #include <linux/nodemask.h> #include <linux/numa_memblks.h> #include <asm/numa.h> /* * Even though we connect cpus to numa domains later in SMP * init, we need to know the node ids now for all cpus. */ static void __init of_numa_parse_cpu_nodes(void) { u32 nid; int r; struct device_node *np; for_each_of_cpu_node(np) { r = of_property_read_u32(np, "numa-node-id", &nid); if (r) continue; pr_debug("CPU on %u\n", nid); if (nid >= MAX_NUMNODES) pr_warn("Node id %u exceeds maximum value\n", nid); else node_set(nid, numa_nodes_parsed); } } static int __init of_numa_parse_memory_nodes(void) { struct device_node *np = NULL; struct resource rsrc; u32 nid; int i, r = -EINVAL; for_each_node_by_type(np, "memory") { r = of_property_read_u32(np, "numa-node-id", &nid); if (r == -EINVAL) /* * property doesn't exist if -EINVAL, continue * looking for more memory nodes with * "numa-node-id" property */ continue; if (nid >= MAX_NUMNODES) { pr_warn("Node id %u exceeds maximum value\n", nid); r = -EINVAL; } for (i = 0; !r && !of_address_to_resource(np, i, &rsrc); i++) { r = numa_add_memblk(nid, rsrc.start, rsrc.end + 1); if (!r) node_set(nid, numa_nodes_parsed); } if (!i || r) { of_node_put(np); pr_err("bad property in memory node\n"); return r ? : -EINVAL; } } return r; } static int __init of_numa_parse_distance_map_v1(struct device_node *map) { const __be32 *matrix; int entry_count; int i; pr_info("parsing numa-distance-map-v1\n"); matrix = of_get_property(map, "distance-matrix", NULL); if (!matrix) { pr_err("No distance-matrix property in distance-map\n"); return -EINVAL; } entry_count = of_property_count_u32_elems(map, "distance-matrix"); if (entry_count <= 0) { pr_err("Invalid distance-matrix\n"); return -EINVAL; } for (i = 0; i + 2 < entry_count; i += 3) { u32 nodea, nodeb, distance; nodea = of_read_number(matrix, 1); matrix++; nodeb = of_read_number(matrix, 1); matrix++; distance = of_read_number(matrix, 1); matrix++; if ((nodea == nodeb && distance != LOCAL_DISTANCE) || (nodea != nodeb && distance <= LOCAL_DISTANCE)) { pr_err("Invalid distance[node%d -> node%d] = %d\n", nodea, nodeb, distance); return -EINVAL; } node_set(nodea, numa_nodes_parsed); numa_set_distance(nodea, nodeb, distance); /* Set default distance of node B->A same as A->B */ if (nodeb > nodea) numa_set_distance(nodeb, nodea, distance); } return 0; } static int __init of_numa_parse_distance_map(void) { int ret = 0; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "numa-distance-map-v1"); if (np) ret = of_numa_parse_distance_map_v1(np); of_node_put(np); return ret; } int of_node_to_nid(struct device_node *device) { struct device_node *np; u32 nid; int r = -ENODATA; np = of_node_get(device); while (np) { r = of_property_read_u32(np, "numa-node-id", &nid); /* * -EINVAL indicates the property was not found, and * we walk up the tree trying to find a parent with a * "numa-node-id". Any other type of error indicates * a bad device tree and we give up. */ if (r != -EINVAL) break; np = of_get_next_parent(np); } if (np && r) pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n", np); of_node_put(np); /* * If numa=off passed on command line, or with a defective * device tree, the nid may not be in the set of possible * nodes. Check for this case and return NUMA_NO_NODE. */ if (!r && nid < MAX_NUMNODES && node_possible(nid)) return nid; return NUMA_NO_NODE; } int __init of_numa_init(void) { int r; of_numa_parse_cpu_nodes(); r = of_numa_parse_memory_nodes(); if (r) return r; return of_numa_parse_distance_map(); }
1 1 6 5 1 1 4 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 6 6 3 1 2 2 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Sony / PS2 / PS3 / PS4 BD devices. * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2012 David Dillow <dave@thedillows.org> * Copyright (c) 2006-2013 Jiri Kosina * Copyright (c) 2013 Colin Leitner <colin.leitner@gmail.com> * Copyright (c) 2014-2016 Frank Praznik <frank.praznik@gmail.com> * Copyright (c) 2018 Todd Kelner * Copyright (c) 2020-2021 Pascal Giard <pascal.giard@etsmtl.ca> * Copyright (c) 2020 Sanjay Govind <sanjay.govind9@gmail.com> * Copyright (c) 2021 Daniel Nguyen <daniel.nguyen.1@ens.etsmtl.ca> */ /* */ /* * NOTE: in order for the Sony PS3 BD Remote Control to be found by * a Bluetooth host, the key combination Start+Enter has to be kept pressed * for about 7 seconds with the Bluetooth Host Controller in discovering mode. * * There will be no PIN request from the device. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/leds.h> #include <linux/power_supply.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/input/mt.h> #include <linux/crc32.h> #include <linux/usb.h> #include <linux/timer.h> #include <linux/unaligned.h> #include "hid-ids.h" #define VAIO_RDESC_CONSTANT BIT(0) #define SIXAXIS_CONTROLLER_USB BIT(1) #define SIXAXIS_CONTROLLER_BT BIT(2) #define BUZZ_CONTROLLER BIT(3) #define PS3REMOTE BIT(4) #define MOTION_CONTROLLER_USB BIT(5) #define MOTION_CONTROLLER_BT BIT(6) #define NAVIGATION_CONTROLLER_USB BIT(7) #define NAVIGATION_CONTROLLER_BT BIT(8) #define SINO_LITE_CONTROLLER BIT(9) #define FUTUREMAX_DANCE_MAT BIT(10) #define NSG_MR5U_REMOTE_BT BIT(11) #define NSG_MR7U_REMOTE_BT BIT(12) #define SHANWAN_GAMEPAD BIT(13) #define GH_GUITAR_CONTROLLER BIT(14) #define GHL_GUITAR_PS3WIIU BIT(15) #define GHL_GUITAR_PS4 BIT(16) #define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT) #define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT) #define NAVIGATION_CONTROLLER (NAVIGATION_CONTROLLER_USB |\ NAVIGATION_CONTROLLER_BT) #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER | BUZZ_CONTROLLER |\ MOTION_CONTROLLER | NAVIGATION_CONTROLLER) #define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER) #define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | MOTION_CONTROLLER) #define SONY_BT_DEVICE (SIXAXIS_CONTROLLER_BT | MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER_BT) #define NSG_MRXU_REMOTE (NSG_MR5U_REMOTE_BT | NSG_MR7U_REMOTE_BT) #define MAX_LEDS 4 #define NSG_MRXU_MAX_X 1667 #define NSG_MRXU_MAX_Y 1868 /* The PS3/Wii U dongles require a poke every 10 seconds, but the PS4 * requires one every 8 seconds. Using 8 seconds for all for simplicity. */ #define GHL_GUITAR_POKE_INTERVAL 8 /* In seconds */ #define GUITAR_TILT_USAGE 44 /* Magic data taken from GHLtarUtility: * https://github.com/ghlre/GHLtarUtility/blob/master/PS3Guitar.cs * Note: The Wii U and PS3 dongles happen to share the same! */ static const char ghl_ps3wiiu_magic_data[] = { 0x02, 0x08, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* Magic data for the PS4 dongles sniffed with a USB protocol * analyzer. */ static const char ghl_ps4_magic_data[] = { 0x30, 0x02, 0x08, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* PS/3 Motion controller */ static const u8 motion_rdesc[] = { 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x04, /* Usage (Joystick), */ 0xA1, 0x01, /* Collection (Application), */ 0xA1, 0x02, /* Collection (Logical), */ 0x85, 0x01, /* Report ID (1), */ 0x75, 0x01, /* Report Size (1), */ 0x95, 0x15, /* Report Count (21), */ 0x15, 0x00, /* Logical Minimum (0), */ 0x25, 0x01, /* Logical Maximum (1), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x45, 0x01, /* Physical Maximum (1), */ 0x05, 0x09, /* Usage Page (Button), */ 0x19, 0x01, /* Usage Minimum (01h), */ 0x29, 0x15, /* Usage Maximum (15h), */ 0x81, 0x02, /* Input (Variable), * Buttons */ 0x95, 0x0B, /* Report Count (11), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x81, 0x03, /* Input (Constant, Variable), * Padding */ 0x15, 0x00, /* Logical Minimum (0), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0xA1, 0x00, /* Collection (Physical), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x01, /* Report Count (1), */ 0x35, 0x00, /* Physical Minimum (0), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x09, 0x30, /* Usage (X), */ 0x81, 0x02, /* Input (Variable), * Trigger */ 0xC0, /* End Collection, */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x07, /* Report Count (7), * skip 7 bytes */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x75, 0x10, /* Report Size (16), */ 0x46, 0xFF, 0xFF, /* Physical Maximum (65535), */ 0x27, 0xFF, 0xFF, 0x00, 0x00, /* Logical Maximum (65535), */ 0x95, 0x03, /* Report Count (3), * 3x Accels */ 0x09, 0x33, /* Usage (rX), */ 0x09, 0x34, /* Usage (rY), */ 0x09, 0x35, /* Usage (rZ), */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x95, 0x03, /* Report Count (3), * Skip Accels 2nd frame */ 0x81, 0x02, /* Input (Variable), */ 0x05, 0x01, /* Usage Page (Desktop), */ 0x09, 0x01, /* Usage (Pointer), */ 0x95, 0x03, /* Report Count (3), * 3x Gyros */ 0x81, 0x02, /* Input (Variable), */ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */ 0x95, 0x03, /* Report Count (3), * Skip Gyros 2nd frame */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x0C, /* Report Size (12), */ 0x46, 0xFF, 0x0F, /* Physical Maximum (4095), */ 0x26, 0xFF, 0x0F, /* Logical Maximum (4095), */ 0x95, 0x04, /* Report Count (4), * Skip Temp and Magnetometers */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x08, /* Report Size (8), */ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ 0x95, 0x06, /* Report Count (6), * Skip Timestamp and Extension Bytes */ 0x81, 0x02, /* Input (Variable), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x30, /* Report Count (48), */ 0x09, 0x01, /* Usage (Pointer), */ 0x91, 0x02, /* Output (Variable), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x30, /* Report Count (48), */ 0x09, 0x01, /* Usage (Pointer), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x85, 0x02, /* Report ID (2), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x30, /* Report Count (48), */ 0x09, 0x01, /* Usage (Pointer), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x85, 0xEE, /* Report ID (238), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x30, /* Report Count (48), */ 0x09, 0x01, /* Usage (Pointer), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xA1, 0x02, /* Collection (Logical), */ 0x85, 0xEF, /* Report ID (239), */ 0x75, 0x08, /* Report Size (8), */ 0x95, 0x30, /* Report Count (48), */ 0x09, 0x01, /* Usage (Pointer), */ 0xB1, 0x02, /* Feature (Variable), */ 0xC0, /* End Collection, */ 0xC0 /* End Collection */ }; static const u8 ps3remote_rdesc[] = { 0x05, 0x01, /* GUsagePage Generic Desktop */ 0x09, 0x05, /* LUsage 0x05 [Game Pad] */ 0xA1, 0x01, /* MCollection Application (mouse, keyboard) */ /* Use collection 1 for joypad buttons */ 0xA1, 0x02, /* MCollection Logical (interrelated data) */ /* * Ignore the 1st byte, maybe it is used for a controller * number but it's not needed for correct operation */ 0x75, 0x08, /* GReportSize 0x08 [8] */ 0x95, 0x01, /* GReportCount 0x01 [1] */ 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */ /* * Bytes from 2nd to 4th are a bitmap for joypad buttons, for these * buttons multiple keypresses are allowed */ 0x05, 0x09, /* GUsagePage Button */ 0x19, 0x01, /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */ 0x29, 0x18, /* LUsageMaximum 0x18 [Button 24] */ 0x14, /* GLogicalMinimum [0] */ 0x25, 0x01, /* GLogicalMaximum 0x01 [1] */ 0x75, 0x01, /* GReportSize 0x01 [1] */ 0x95, 0x18, /* GReportCount 0x18 [24] */ 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */ 0xC0, /* MEndCollection */ /* Use collection 2 for remote control buttons */ 0xA1, 0x02, /* MCollection Logical (interrelated data) */ /* 5th byte is used for remote control buttons */ 0x05, 0x09, /* GUsagePage Button */ 0x18, /* LUsageMinimum [No button pressed] */ 0x29, 0xFE, /* LUsageMaximum 0xFE [Button 254] */ 0x14, /* GLogicalMinimum [0] */ 0x26, 0xFE, 0x00, /* GLogicalMaximum 0x00FE [254] */ 0x75, 0x08, /* GReportSize 0x08 [8] */ 0x95, 0x01, /* GReportCount 0x01 [1] */ 0x80, /* MInput */ /* * Ignore bytes from 6th to 11th, 6th to 10th are always constant at * 0xff and 11th is for press indication */ 0x75, 0x08, /* GReportSize 0x08 [8] */ 0x95, 0x06, /* GReportCount 0x06 [6] */ 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */ /* 12th byte is for battery strength */ 0x05, 0x06, /* GUsagePage Generic Device Controls */ 0x09, 0x20, /* LUsage 0x20 [Battery Strength] */ 0x14, /* GLogicalMinimum [0] */ 0x25, 0x05, /* GLogicalMaximum 0x05 [5] */ 0x75, 0x08, /* GReportSize 0x08 [8] */ 0x95, 0x01, /* GReportCount 0x01 [1] */ 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */ 0xC0, /* MEndCollection */ 0xC0 /* MEndCollection [Game Pad] */ }; static const unsigned int ps3remote_keymap_joypad_buttons[] = { [0x01] = KEY_SELECT, [0x02] = BTN_THUMBL, /* L3 */ [0x03] = BTN_THUMBR, /* R3 */ [0x04] = BTN_START, [0x05] = KEY_UP, [0x06] = KEY_RIGHT, [0x07] = KEY_DOWN, [0x08] = KEY_LEFT, [0x09] = BTN_TL2, /* L2 */ [0x0a] = BTN_TR2, /* R2 */ [0x0b] = BTN_TL, /* L1 */ [0x0c] = BTN_TR, /* R1 */ [0x0d] = KEY_OPTION, /* options/triangle */ [0x0e] = KEY_BACK, /* back/circle */ [0x0f] = BTN_0, /* cross */ [0x10] = KEY_SCREEN, /* view/square */ [0x11] = KEY_HOMEPAGE, /* PS button */ [0x14] = KEY_ENTER, }; static const unsigned int ps3remote_keymap_remote_buttons[] = { [0x00] = KEY_1, [0x01] = KEY_2, [0x02] = KEY_3, [0x03] = KEY_4, [0x04] = KEY_5, [0x05] = KEY_6, [0x06] = KEY_7, [0x07] = KEY_8, [0x08] = KEY_9, [0x09] = KEY_0, [0x0e] = KEY_ESC, /* return */ [0x0f] = KEY_CLEAR, [0x16] = KEY_EJECTCD, [0x1a] = KEY_MENU, /* top menu */ [0x28] = KEY_TIME, [0x30] = KEY_PREVIOUS, [0x31] = KEY_NEXT, [0x32] = KEY_PLAY, [0x33] = KEY_REWIND, /* scan back */ [0x34] = KEY_FORWARD, /* scan forward */ [0x38] = KEY_STOP, [0x39] = KEY_PAUSE, [0x40] = KEY_CONTEXT_MENU, /* pop up/menu */ [0x60] = KEY_FRAMEBACK, /* slow/step back */ [0x61] = KEY_FRAMEFORWARD, /* slow/step forward */ [0x63] = KEY_SUBTITLE, [0x64] = KEY_AUDIO, [0x65] = KEY_ANGLE, [0x70] = KEY_INFO, /* display */ [0x80] = KEY_BLUE, [0x81] = KEY_RED, [0x82] = KEY_GREEN, [0x83] = KEY_YELLOW, }; static const unsigned int buzz_keymap[] = { /* * The controller has 4 remote buzzers, each with one LED and 5 * buttons. * * We use the mapping chosen by the controller, which is: * * Key Offset * ------------------- * Buzz 1 * Blue 5 * Orange 4 * Green 3 * Yellow 2 * * So, for example, the orange button on the third buzzer is mapped to * BTN_TRIGGER_HAPPY14 */ [1] = BTN_TRIGGER_HAPPY1, [2] = BTN_TRIGGER_HAPPY2, [3] = BTN_TRIGGER_HAPPY3, [4] = BTN_TRIGGER_HAPPY4, [5] = BTN_TRIGGER_HAPPY5, [6] = BTN_TRIGGER_HAPPY6, [7] = BTN_TRIGGER_HAPPY7, [8] = BTN_TRIGGER_HAPPY8, [9] = BTN_TRIGGER_HAPPY9, [10] = BTN_TRIGGER_HAPPY10, [11] = BTN_TRIGGER_HAPPY11, [12] = BTN_TRIGGER_HAPPY12, [13] = BTN_TRIGGER_HAPPY13, [14] = BTN_TRIGGER_HAPPY14, [15] = BTN_TRIGGER_HAPPY15, [16] = BTN_TRIGGER_HAPPY16, [17] = BTN_TRIGGER_HAPPY17, [18] = BTN_TRIGGER_HAPPY18, [19] = BTN_TRIGGER_HAPPY19, [20] = BTN_TRIGGER_HAPPY20, }; /* The Navigation controller is a partial DS3 and uses the same HID report * and hence the same keymap indices, however not all axes/buttons * are physically present. We use the same axis and button mapping as * the DS3, which uses the Linux gamepad spec. */ static const unsigned int navigation_absmap[] = { [0x30] = ABS_X, [0x31] = ABS_Y, [0x33] = ABS_Z, /* L2 */ }; /* Buttons not physically available on the device, but still available * in the reports are explicitly set to 0 for documentation purposes. */ static const unsigned int navigation_keymap[] = { [0x01] = 0, /* Select */ [0x02] = BTN_THUMBL, /* L3 */ [0x03] = 0, /* R3 */ [0x04] = 0, /* Start */ [0x05] = BTN_DPAD_UP, /* Up */ [0x06] = BTN_DPAD_RIGHT, /* Right */ [0x07] = BTN_DPAD_DOWN, /* Down */ [0x08] = BTN_DPAD_LEFT, /* Left */ [0x09] = BTN_TL2, /* L2 */ [0x0a] = 0, /* R2 */ [0x0b] = BTN_TL, /* L1 */ [0x0c] = 0, /* R1 */ [0x0d] = BTN_NORTH, /* Triangle */ [0x0e] = BTN_EAST, /* Circle */ [0x0f] = BTN_SOUTH, /* Cross */ [0x10] = BTN_WEST, /* Square */ [0x11] = BTN_MODE, /* PS */ }; static const unsigned int sixaxis_absmap[] = { [0x30] = ABS_X, [0x31] = ABS_Y, [0x32] = ABS_RX, /* right stick X */ [0x35] = ABS_RY, /* right stick Y */ }; static const unsigned int sixaxis_keymap[] = { [0x01] = BTN_SELECT, /* Select */ [0x02] = BTN_THUMBL, /* L3 */ [0x03] = BTN_THUMBR, /* R3 */ [0x04] = BTN_START, /* Start */ [0x05] = BTN_DPAD_UP, /* Up */ [0x06] = BTN_DPAD_RIGHT, /* Right */ [0x07] = BTN_DPAD_DOWN, /* Down */ [0x08] = BTN_DPAD_LEFT, /* Left */ [0x09] = BTN_TL2, /* L2 */ [0x0a] = BTN_TR2, /* R2 */ [0x0b] = BTN_TL, /* L1 */ [0x0c] = BTN_TR, /* R1 */ [0x0d] = BTN_NORTH, /* Triangle */ [0x0e] = BTN_EAST, /* Circle */ [0x0f] = BTN_SOUTH, /* Cross */ [0x10] = BTN_WEST, /* Square */ [0x11] = BTN_MODE, /* PS */ }; static enum power_supply_property sony_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_STATUS, }; struct sixaxis_led { u8 time_enabled; /* the total time the led is active (0xff means forever) */ u8 duty_length; /* how long a cycle is in deciseconds (0 means "really fast") */ u8 enabled; u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */ u8 duty_on; /* % of duty_length the led is on (0xff mean 100%) */ } __packed; struct sixaxis_rumble { u8 padding; u8 right_duration; /* Right motor duration (0xff means forever) */ u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */ u8 left_duration; /* Left motor duration (0xff means forever) */ u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */ } __packed; struct sixaxis_output_report { u8 report_id; struct sixaxis_rumble rumble; u8 padding[4]; u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */ struct sixaxis_led led[4]; /* LEDx at (4 - x) */ struct sixaxis_led _reserved; /* LED5, not actually soldered */ } __packed; union sixaxis_output_report_01 { struct sixaxis_output_report data; u8 buf[36]; }; struct motion_output_report_02 { u8 type, zero; u8 r, g, b; u8 zero2; u8 rumble; }; #define SIXAXIS_REPORT_0xF2_SIZE 17 #define SIXAXIS_REPORT_0xF5_SIZE 8 #define MOTION_REPORT_0x02_SIZE 49 #define SENSOR_SUFFIX " Motion Sensors" #define TOUCHPAD_SUFFIX " Touchpad" #define SIXAXIS_INPUT_REPORT_ACC_X_OFFSET 41 #define SIXAXIS_ACC_RES_PER_G 113 static DEFINE_SPINLOCK(sony_dev_list_lock); static LIST_HEAD(sony_device_list); static DEFINE_IDA(sony_device_id_allocator); enum sony_worker { SONY_WORKER_STATE }; struct sony_sc { spinlock_t lock; struct list_head list_node; struct hid_device *hdev; struct input_dev *touchpad; struct input_dev *sensor_dev; struct led_classdev *leds[MAX_LEDS]; unsigned long quirks; struct work_struct state_worker; void (*send_output_report)(struct sony_sc *); struct power_supply *battery; struct power_supply_desc battery_desc; int device_id; u8 *output_report_dmabuf; #ifdef CONFIG_SONY_FF u8 left; u8 right; #endif u8 mac_address[6]; u8 state_worker_initialized; u8 defer_initialization; u8 battery_capacity; int battery_status; u8 led_state[MAX_LEDS]; u8 led_delay_on[MAX_LEDS]; u8 led_delay_off[MAX_LEDS]; u8 led_count; /* GH Live */ struct urb *ghl_urb; struct timer_list ghl_poke_timer; }; static void sony_set_leds(struct sony_sc *sc); static inline void sony_schedule_work(struct sony_sc *sc, enum sony_worker which) { unsigned long flags; switch (which) { case SONY_WORKER_STATE: spin_lock_irqsave(&sc->lock, flags); if (!sc->defer_initialization && sc->state_worker_initialized) schedule_work(&sc->state_worker); spin_unlock_irqrestore(&sc->lock, flags); break; } } static void ghl_magic_poke_cb(struct urb *urb) { struct sony_sc *sc = urb->context; if (urb->status < 0) hid_err(sc->hdev, "URB transfer failed : %d", urb->status); mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ); } static void ghl_magic_poke(struct timer_list *t) { int ret; struct sony_sc *sc = timer_container_of(sc, t, ghl_poke_timer); ret = usb_submit_urb(sc->ghl_urb, GFP_ATOMIC); if (ret < 0) hid_err(sc->hdev, "usb_submit_urb failed: %d", ret); } static int ghl_init_urb(struct sony_sc *sc, struct usb_device *usbdev, const char ghl_magic_data[], u16 poke_size) { struct usb_ctrlrequest *cr; u8 *databuf; unsigned int pipe; u16 ghl_magic_value = (((HID_OUTPUT_REPORT + 1) << 8) | ghl_magic_data[0]); pipe = usb_sndctrlpipe(usbdev, 0); cr = devm_kzalloc(&sc->hdev->dev, sizeof(*cr), GFP_ATOMIC); if (cr == NULL) return -ENOMEM; databuf = devm_kzalloc(&sc->hdev->dev, poke_size, GFP_ATOMIC); if (databuf == NULL) return -ENOMEM; cr->bRequestType = USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT; cr->bRequest = USB_REQ_SET_CONFIGURATION; cr->wValue = cpu_to_le16(ghl_magic_value); cr->wIndex = 0; cr->wLength = cpu_to_le16(poke_size); memcpy(databuf, ghl_magic_data, poke_size); usb_fill_control_urb( sc->ghl_urb, usbdev, pipe, (unsigned char *) cr, databuf, poke_size, ghl_magic_poke_cb, sc); return 0; } static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) { unsigned int abs = usage->hid & HID_USAGE; if (abs == GUITAR_TILT_USAGE) { hid_map_usage_clear(hi, usage, bit, max, EV_ABS, ABS_RY); return 1; } } return 0; } static const u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { *rsize = sizeof(motion_rdesc); return motion_rdesc; } static const u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { *rsize = sizeof(ps3remote_rdesc); return ps3remote_rdesc; } static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { unsigned int key = usage->hid & HID_USAGE; if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON) return -1; switch (usage->collection_index) { case 1: if (key >= ARRAY_SIZE(ps3remote_keymap_joypad_buttons)) return -1; key = ps3remote_keymap_joypad_buttons[key]; if (!key) return -1; break; case 2: if (key >= ARRAY_SIZE(ps3remote_keymap_remote_buttons)) return -1; key = ps3remote_keymap_remote_buttons[key]; if (!key) return -1; break; default: return -1; } hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key); return 1; } static int navigation_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) { unsigned int key = usage->hid & HID_USAGE; if (key >= ARRAY_SIZE(sixaxis_keymap)) return -1; key = navigation_keymap[key]; if (!key) return -1; hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key); return 1; } else if (usage->hid == HID_GD_POINTER) { /* See comment in sixaxis_mapping, basically the L2 (and R2) * triggers are reported through GD Pointer. * In addition we ignore any analog button 'axes' and only * support digital buttons. */ switch (usage->usage_index) { case 8: /* L2 */ usage->hid = HID_GD_Z; break; default: return -1; } hid_map_usage_clear(hi, usage, bit, max, EV_ABS, usage->hid & 0xf); return 1; } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) { unsigned int abs = usage->hid & HID_USAGE; if (abs >= ARRAY_SIZE(navigation_absmap)) return -1; abs = navigation_absmap[abs]; hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs); return 1; } return -1; } static int sixaxis_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) { unsigned int key = usage->hid & HID_USAGE; if (key >= ARRAY_SIZE(sixaxis_keymap)) return -1; key = sixaxis_keymap[key]; hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key); return 1; } else if (usage->hid == HID_GD_POINTER) { /* The DS3 provides analog values for most buttons and even * for HAT axes through GD Pointer. L2 and R2 are reported * among these as well instead of as GD Z / RZ. Remap L2 * and R2 and ignore other analog 'button axes' as there is * no good way for reporting them. */ switch (usage->usage_index) { case 8: /* L2 */ usage->hid = HID_GD_Z; break; case 9: /* R2 */ usage->hid = HID_GD_RZ; break; default: return -1; } hid_map_usage_clear(hi, usage, bit, max, EV_ABS, usage->hid & 0xf); return 1; } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) { unsigned int abs = usage->hid & HID_USAGE; if (abs >= ARRAY_SIZE(sixaxis_absmap)) return -1; abs = sixaxis_absmap[abs]; hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs); return 1; } return -1; } static const u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *rsize) { struct sony_sc *sc = hid_get_drvdata(hdev); if (sc->quirks & (SINO_LITE_CONTROLLER | FUTUREMAX_DANCE_MAT)) return rdesc; /* * Some Sony RF receivers wrongly declare the mouse pointer as a * a constant non-data variable. */ if ((sc->quirks & VAIO_RDESC_CONSTANT) && *rsize >= 56 && /* usage page: generic desktop controls */ /* rdesc[0] == 0x05 && rdesc[1] == 0x01 && */ /* usage: mouse */ rdesc[2] == 0x09 && rdesc[3] == 0x02 && /* input (usage page for x,y axes): constant, variable, relative */ rdesc[54] == 0x81 && rdesc[55] == 0x07) { hid_info(hdev, "Fixing up Sony RF Receiver report descriptor\n"); /* input: data, variable, relative */ rdesc[55] = 0x06; } if (sc->quirks & MOTION_CONTROLLER) return motion_fixup(hdev, rdesc, rsize); if (sc->quirks & PS3REMOTE) return ps3remote_fixup(hdev, rdesc, rsize); /* * Some knock-off USB dongles incorrectly report their button count * as 13 instead of 16 causing three non-functional buttons. */ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 && /* Report Count (13) */ rdesc[23] == 0x95 && rdesc[24] == 0x0D && /* Usage Maximum (13) */ rdesc[37] == 0x29 && rdesc[38] == 0x0D && /* Report Count (3) */ rdesc[43] == 0x95 && rdesc[44] == 0x03) { hid_info(hdev, "Fixing up USB dongle report descriptor\n"); rdesc[24] = 0x10; rdesc[38] = 0x10; rdesc[44] = 0x00; } return rdesc; } static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size) { static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 }; unsigned long flags; int offset; u8 battery_capacity; int battery_status; /* * The sixaxis is charging if the battery value is 0xee * and it is fully charged if the value is 0xef. * It does not report the actual level while charging so it * is set to 100% while charging is in progress. */ offset = (sc->quirks & MOTION_CONTROLLER) ? 12 : 30; if (rd[offset] >= 0xee) { battery_capacity = 100; battery_status = (rd[offset] & 0x01) ? POWER_SUPPLY_STATUS_FULL : POWER_SUPPLY_STATUS_CHARGING; } else { u8 index = rd[offset] <= 5 ? rd[offset] : 5; battery_capacity = sixaxis_battery_capacity[index]; battery_status = POWER_SUPPLY_STATUS_DISCHARGING; } spin_lock_irqsave(&sc->lock, flags); sc->battery_capacity = battery_capacity; sc->battery_status = battery_status; spin_unlock_irqrestore(&sc->lock, flags); if (sc->quirks & SIXAXIS_CONTROLLER) { int val; offset = SIXAXIS_INPUT_REPORT_ACC_X_OFFSET; val = ((rd[offset+1] << 8) | rd[offset]) - 511; input_report_abs(sc->sensor_dev, ABS_X, val); /* Y and Z are swapped and inversed */ val = 511 - ((rd[offset+5] << 8) | rd[offset+4]); input_report_abs(sc->sensor_dev, ABS_Y, val); val = 511 - ((rd[offset+3] << 8) | rd[offset+2]); input_report_abs(sc->sensor_dev, ABS_Z, val); input_sync(sc->sensor_dev); } } static void nsg_mrxu_parse_report(struct sony_sc *sc, u8 *rd, int size) { int n, offset, relx, rely; u8 active; /* * The NSG-MRxU multi-touch trackpad data starts at offset 1 and * the touch-related data starts at offset 2. * For the first byte, bit 0 is set when touchpad button is pressed. * Bit 2 is set when a touch is active and the drag (Fn) key is pressed. * This drag key is mapped to BTN_LEFT. It is operational only when a * touch point is active. * Bit 4 is set when only the first touch point is active. * Bit 6 is set when only the second touch point is active. * Bits 5 and 7 are set when both touch points are active. * The next 3 bytes are two 12 bit X/Y coordinates for the first touch. * The following byte, offset 5, has the touch width and length. * Bits 0-4=X (width), bits 5-7=Y (length). * A signed relative X coordinate is at offset 6. * The bytes at offset 7-9 are the second touch X/Y coordinates. * Offset 10 has the second touch width and length. * Offset 11 has the relative Y coordinate. */ offset = 1; input_report_key(sc->touchpad, BTN_LEFT, rd[offset] & 0x0F); active = (rd[offset] >> 4); relx = (s8) rd[offset+5]; rely = ((s8) rd[offset+10]) * -1; offset++; for (n = 0; n < 2; n++) { u16 x, y; u8 contactx, contacty; x = rd[offset] | ((rd[offset+1] & 0x0F) << 8); y = ((rd[offset+1] & 0xF0) >> 4) | (rd[offset+2] << 4); input_mt_slot(sc->touchpad, n); input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active & 0x03); if (active & 0x03) { contactx = rd[offset+3] & 0x0F; contacty = rd[offset+3] >> 4; input_report_abs(sc->touchpad, ABS_MT_TOUCH_MAJOR, max(contactx, contacty)); input_report_abs(sc->touchpad, ABS_MT_TOUCH_MINOR, min(contactx, contacty)); input_report_abs(sc->touchpad, ABS_MT_ORIENTATION, (bool) (contactx > contacty)); input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x); input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, NSG_MRXU_MAX_Y - y); /* * The relative coordinates belong to the first touch * point, when present, or to the second touch point * when the first is not active. */ if ((n == 0) || ((n == 1) && (active & 0x01))) { input_report_rel(sc->touchpad, REL_X, relx); input_report_rel(sc->touchpad, REL_Y, rely); } } offset += 5; active >>= 2; } input_mt_sync_frame(sc->touchpad); input_sync(sc->touchpad); } static int sony_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *rd, int size) { struct sony_sc *sc = hid_get_drvdata(hdev); /* * Sixaxis HID report has acclerometers/gyro with MSByte first, this * has to be BYTE_SWAPPED before passing up to joystick interface */ if ((sc->quirks & SIXAXIS_CONTROLLER) && rd[0] == 0x01 && size == 49) { /* * When connected via Bluetooth the Sixaxis occasionally sends * a report with the second byte 0xff and the rest zeroed. * * This report does not reflect the actual state of the * controller must be ignored to avoid generating false input * events. */ if (rd[1] == 0xff) return -EINVAL; swap(rd[41], rd[42]); swap(rd[43], rd[44]); swap(rd[45], rd[46]); swap(rd[47], rd[48]); sixaxis_parse_report(sc, rd, size); } else if ((sc->quirks & MOTION_CONTROLLER_BT) && rd[0] == 0x01 && size == 49) { sixaxis_parse_report(sc, rd, size); } else if ((sc->quirks & NAVIGATION_CONTROLLER) && rd[0] == 0x01 && size == 49) { sixaxis_parse_report(sc, rd, size); } else if ((sc->quirks & NSG_MRXU_REMOTE) && rd[0] == 0x02) { nsg_mrxu_parse_report(sc, rd, size); return 1; } if (sc->defer_initialization) { sc->defer_initialization = 0; sony_schedule_work(sc, SONY_WORKER_STATE); } return 0; } static int sony_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct sony_sc *sc = hid_get_drvdata(hdev); if (sc->quirks & BUZZ_CONTROLLER) { unsigned int key = usage->hid & HID_USAGE; if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON) return -1; switch (usage->collection_index) { case 1: if (key >= ARRAY_SIZE(buzz_keymap)) return -1; key = buzz_keymap[key]; if (!key) return -1; break; default: return -1; } hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key); return 1; } if (sc->quirks & PS3REMOTE) return ps3remote_mapping(hdev, hi, field, usage, bit, max); if (sc->quirks & NAVIGATION_CONTROLLER) return navigation_mapping(hdev, hi, field, usage, bit, max); if (sc->quirks & SIXAXIS_CONTROLLER) return sixaxis_mapping(hdev, hi, field, usage, bit, max); if (sc->quirks & GH_GUITAR_CONTROLLER) return guitar_mapping(hdev, hi, field, usage, bit, max); /* Let hid-core decide for the others */ return 0; } static int sony_register_touchpad(struct sony_sc *sc, int touch_count, int w, int h, int touch_major, int touch_minor, int orientation) { size_t name_sz; char *name; int ret; sc->touchpad = devm_input_allocate_device(&sc->hdev->dev); if (!sc->touchpad) return -ENOMEM; input_set_drvdata(sc->touchpad, sc); sc->touchpad->dev.parent = &sc->hdev->dev; sc->touchpad->phys = sc->hdev->phys; sc->touchpad->uniq = sc->hdev->uniq; sc->touchpad->id.bustype = sc->hdev->bus; sc->touchpad->id.vendor = sc->hdev->vendor; sc->touchpad->id.product = sc->hdev->product; sc->touchpad->id.version = sc->hdev->version; /* This suffix was originally apended when hid-sony also * supported DS4 devices. The DS4 was implemented using multiple * evdev nodes and hence had the need to separete them out using * a suffix. Other devices which were added later like Sony TV remotes * inhirited this suffix. */ name_sz = strlen(sc->hdev->name) + sizeof(TOUCHPAD_SUFFIX); name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL); if (!name) return -ENOMEM; snprintf(name, name_sz, "%s" TOUCHPAD_SUFFIX, sc->hdev->name); sc->touchpad->name = name; /* We map the button underneath the touchpad to BTN_LEFT. */ __set_bit(EV_KEY, sc->touchpad->evbit); __set_bit(BTN_LEFT, sc->touchpad->keybit); __set_bit(INPUT_PROP_BUTTONPAD, sc->touchpad->propbit); input_set_abs_params(sc->touchpad, ABS_MT_POSITION_X, 0, w, 0, 0); input_set_abs_params(sc->touchpad, ABS_MT_POSITION_Y, 0, h, 0, 0); if (touch_major > 0) { input_set_abs_params(sc->touchpad, ABS_MT_TOUCH_MAJOR, 0, touch_major, 0, 0); if (touch_minor > 0) input_set_abs_params(sc->touchpad, ABS_MT_TOUCH_MINOR, 0, touch_minor, 0, 0); if (orientation > 0) input_set_abs_params(sc->touchpad, ABS_MT_ORIENTATION, 0, orientation, 0, 0); } if (sc->quirks & NSG_MRXU_REMOTE) { __set_bit(EV_REL, sc->touchpad->evbit); } ret = input_mt_init_slots(sc->touchpad, touch_count, INPUT_MT_POINTER); if (ret < 0) return ret; ret = input_register_device(sc->touchpad); if (ret < 0) return ret; return 0; } static int sony_register_sensors(struct sony_sc *sc) { size_t name_sz; char *name; int ret; sc->sensor_dev = devm_input_allocate_device(&sc->hdev->dev); if (!sc->sensor_dev) return -ENOMEM; input_set_drvdata(sc->sensor_dev, sc); sc->sensor_dev->dev.parent = &sc->hdev->dev; sc->sensor_dev->phys = sc->hdev->phys; sc->sensor_dev->uniq = sc->hdev->uniq; sc->sensor_dev->id.bustype = sc->hdev->bus; sc->sensor_dev->id.vendor = sc->hdev->vendor; sc->sensor_dev->id.product = sc->hdev->product; sc->sensor_dev->id.version = sc->hdev->version; /* Append a suffix to the controller name as there are various * DS4 compatible non-Sony devices with different names. */ name_sz = strlen(sc->hdev->name) + sizeof(SENSOR_SUFFIX); name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL); if (!name) return -ENOMEM; snprintf(name, name_sz, "%s" SENSOR_SUFFIX, sc->hdev->name); sc->sensor_dev->name = name; if (sc->quirks & SIXAXIS_CONTROLLER) { /* For the DS3 we only support the accelerometer, which works * quite well even without calibration. The device also has * a 1-axis gyro, but it is very difficult to manage from within * the driver even to get data, the sensor is inaccurate and * the behavior is very different between hardware revisions. */ input_set_abs_params(sc->sensor_dev, ABS_X, -512, 511, 4, 0); input_set_abs_params(sc->sensor_dev, ABS_Y, -512, 511, 4, 0); input_set_abs_params(sc->sensor_dev, ABS_Z, -512, 511, 4, 0); input_abs_set_res(sc->sensor_dev, ABS_X, SIXAXIS_ACC_RES_PER_G); input_abs_set_res(sc->sensor_dev, ABS_Y, SIXAXIS_ACC_RES_PER_G); input_abs_set_res(sc->sensor_dev, ABS_Z, SIXAXIS_ACC_RES_PER_G); } __set_bit(INPUT_PROP_ACCELEROMETER, sc->sensor_dev->propbit); ret = input_register_device(sc->sensor_dev); if (ret < 0) return ret; return 0; } /* * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller * to "operational". Without this, the ps3 controller will not report any * events. */ static int sixaxis_set_operational_usb(struct hid_device *hdev) { struct sony_sc *sc = hid_get_drvdata(hdev); const int buf_size = max(SIXAXIS_REPORT_0xF2_SIZE, SIXAXIS_REPORT_0xF5_SIZE); u8 *buf; int ret; buf = kmalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, 0xf2, buf, SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { hid_err(hdev, "can't set operational mode: step 1\n"); goto out; } /* * Some compatible controllers like the Speedlink Strike FX and * Gasia need another query plus an USB interrupt to get operational. */ ret = hid_hw_raw_request(hdev, 0xf5, buf, SIXAXIS_REPORT_0xF5_SIZE, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) { hid_err(hdev, "can't set operational mode: step 2\n"); goto out; } /* * But the USB interrupt would cause SHANWAN controllers to * start rumbling non-stop, so skip step 3 for these controllers. */ if (sc->quirks & SHANWAN_GAMEPAD) goto out; ret = hid_hw_output_report(hdev, buf, 1); if (ret < 0) { hid_info(hdev, "can't set operational mode: step 3, ignoring\n"); ret = 0; } out: kfree(buf); return ret; } static int sixaxis_set_operational_bt(struct hid_device *hdev) { static const u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; u8 *buf; int ret; buf = kmemdup(report, sizeof(report), GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); kfree(buf); return ret; } static void sixaxis_set_leds_from_id(struct sony_sc *sc) { static const u8 sixaxis_leds[10][4] = { { 0x01, 0x00, 0x00, 0x00 }, { 0x00, 0x01, 0x00, 0x00 }, { 0x00, 0x00, 0x01, 0x00 }, { 0x00, 0x00, 0x00, 0x01 }, { 0x01, 0x00, 0x00, 0x01 }, { 0x00, 0x01, 0x00, 0x01 }, { 0x00, 0x00, 0x01, 0x01 }, { 0x01, 0x00, 0x01, 0x01 }, { 0x00, 0x01, 0x01, 0x01 }, { 0x01, 0x01, 0x01, 0x01 } }; int id = sc->device_id; BUILD_BUG_ON(MAX_LEDS < ARRAY_SIZE(sixaxis_leds[0])); if (id < 0) return; id %= 10; memcpy(sc->led_state, sixaxis_leds[id], sizeof(sixaxis_leds[id])); } static void buzz_set_leds(struct sony_sc *sc) { struct hid_device *hdev = sc->hdev; struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); s32 *value = report->field[0]->value; BUILD_BUG_ON(MAX_LEDS < 4); value[0] = 0x00; value[1] = sc->led_state[0] ? 0xff : 0x00; value[2] = sc->led_state[1] ? 0xff : 0x00; value[3] = sc->led_state[2] ? 0xff : 0x00; value[4] = sc->led_state[3] ? 0xff : 0x00; value[5] = 0x00; value[6] = 0x00; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); } static void sony_set_leds(struct sony_sc *sc) { if (!(sc->quirks & BUZZ_CONTROLLER)) sony_schedule_work(sc, SONY_WORKER_STATE); else buzz_set_leds(sc); } static void sony_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct sony_sc *drv_data; int n; int force_update; drv_data = hid_get_drvdata(hdev); if (!drv_data) { hid_err(hdev, "No device data\n"); return; } /* * The Sixaxis on USB will override any LED settings sent to it * and keep flashing all of the LEDs until the PS button is pressed. * Updates, even if redundant, must be always be sent to the * controller to avoid having to toggle the state of an LED just to * stop the flashing later on. */ force_update = !!(drv_data->quirks & SIXAXIS_CONTROLLER_USB); for (n = 0; n < drv_data->led_count; n++) { if (led == drv_data->leds[n] && (force_update || (value != drv_data->led_state[n] || drv_data->led_delay_on[n] || drv_data->led_delay_off[n]))) { drv_data->led_state[n] = value; /* Setting the brightness stops the blinking */ drv_data->led_delay_on[n] = 0; drv_data->led_delay_off[n] = 0; sony_set_leds(drv_data); break; } } } static enum led_brightness sony_led_get_brightness(struct led_classdev *led) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct sony_sc *drv_data; int n; drv_data = hid_get_drvdata(hdev); if (!drv_data) { hid_err(hdev, "No device data\n"); return LED_OFF; } for (n = 0; n < drv_data->led_count; n++) { if (led == drv_data->leds[n]) return drv_data->led_state[n]; } return LED_OFF; } static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on, unsigned long *delay_off) { struct device *dev = led->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct sony_sc *drv_data = hid_get_drvdata(hdev); int n; u8 new_on, new_off; if (!drv_data) { hid_err(hdev, "No device data\n"); return -EINVAL; } /* Max delay is 255 deciseconds or 2550 milliseconds */ if (*delay_on > 2550) *delay_on = 2550; if (*delay_off > 2550) *delay_off = 2550; /* Blink at 1 Hz if both values are zero */ if (!*delay_on && !*delay_off) *delay_on = *delay_off = 500; new_on = *delay_on / 10; new_off = *delay_off / 10; for (n = 0; n < drv_data->led_count; n++) { if (led == drv_data->leds[n]) break; } /* This LED is not registered on this device */ if (n >= drv_data->led_count) return -EINVAL; /* Don't schedule work if the values didn't change */ if (new_on != drv_data->led_delay_on[n] || new_off != drv_data->led_delay_off[n]) { drv_data->led_delay_on[n] = new_on; drv_data->led_delay_off[n] = new_off; sony_schedule_work(drv_data, SONY_WORKER_STATE); } return 0; } static int sony_leds_init(struct sony_sc *sc) { struct hid_device *hdev = sc->hdev; int n, ret = 0; int use_color_names; struct led_classdev *led; size_t name_sz; char *name; size_t name_len; const char *name_fmt; static const char * const color_name_str[] = { "red", "green", "blue", "global" }; u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 }; u8 use_hw_blink[MAX_LEDS] = { 0 }; if (WARN_ON(!(sc->quirks & SONY_LED_SUPPORT))) return -EINVAL; if (sc->quirks & BUZZ_CONTROLLER) { sc->led_count = 4; use_color_names = 0; name_len = strlen("::buzz#"); name_fmt = "%s::buzz%d"; /* Validate expected report characteristics. */ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7)) return -ENODEV; } else if (sc->quirks & MOTION_CONTROLLER) { sc->led_count = 3; memset(max_brightness, 255, 3); use_color_names = 1; name_len = 0; name_fmt = "%s:%s"; } else if (sc->quirks & NAVIGATION_CONTROLLER) { static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00}; memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds)); sc->led_count = 1; memset(use_hw_blink, 1, 4); use_color_names = 0; name_len = strlen("::sony#"); name_fmt = "%s::sony%d"; } else { sixaxis_set_leds_from_id(sc); sc->led_count = 4; memset(use_hw_blink, 1, 4); use_color_names = 0; name_len = strlen("::sony#"); name_fmt = "%s::sony%d"; } /* * Clear LEDs as we have no way of reading their initial state. This is * only relevant if the driver is loaded after somebody actively set the * LEDs to on */ sony_set_leds(sc); name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1; for (n = 0; n < sc->led_count; n++) { if (use_color_names) name_sz = strlen(dev_name(&hdev->dev)) + strlen(color_name_str[n]) + 2; led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL); if (!led) { hid_err(hdev, "Couldn't allocate memory for LED %d\n", n); return -ENOMEM; } name = (void *)(&led[1]); if (use_color_names) snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), color_name_str[n]); else snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1); led->name = name; led->brightness = sc->led_state[n]; led->max_brightness = max_brightness[n]; led->flags = LED_CORE_SUSPENDRESUME; led->brightness_get = sony_led_get_brightness; led->brightness_set = sony_led_set_brightness; if (use_hw_blink[n]) led->blink_set = sony_led_blink_set; sc->leds[n] = led; ret = devm_led_classdev_register(&hdev->dev, led); if (ret) { hid_err(hdev, "Failed to register LED %d\n", n); return ret; } } return 0; } static void sixaxis_send_output_report(struct sony_sc *sc) { static const union sixaxis_output_report_01 default_report = { .buf = { 0x01, 0x01, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x27, 0x10, 0x00, 0x32, 0xff, 0x27, 0x10, 0x00, 0x32, 0xff, 0x27, 0x10, 0x00, 0x32, 0xff, 0x27, 0x10, 0x00, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00 } }; struct sixaxis_output_report *report = (struct sixaxis_output_report *)sc->output_report_dmabuf; int n; /* Initialize the report with default values */ memcpy(report, &default_report, sizeof(struct sixaxis_output_report)); #ifdef CONFIG_SONY_FF report->rumble.right_motor_on = sc->right ? 1 : 0; report->rumble.left_motor_force = sc->left; #endif report->leds_bitmap |= sc->led_state[0] << 1; report->leds_bitmap |= sc->led_state[1] << 2; report->leds_bitmap |= sc->led_state[2] << 3; report->leds_bitmap |= sc->led_state[3] << 4; /* Set flag for all leds off, required for 3rd party INTEC controller */ if ((report->leds_bitmap & 0x1E) == 0) report->leds_bitmap |= 0x20; /* * The LEDs in the report are indexed in reverse order to their * corresponding light on the controller. * Index 0 = LED 4, index 1 = LED 3, etc... * * In the case of both delay values being zero (blinking disabled) the * default report values should be used or the controller LED will be * always off. */ for (n = 0; n < 4; n++) { if (sc->led_delay_on[n] || sc->led_delay_off[n]) { report->led[3 - n].duty_off = sc->led_delay_off[n]; report->led[3 - n].duty_on = sc->led_delay_on[n]; } } /* SHANWAN controllers require output reports via intr channel */ if (sc->quirks & SHANWAN_GAMEPAD) hid_hw_output_report(sc->hdev, (u8 *)report, sizeof(struct sixaxis_output_report)); else hid_hw_raw_request(sc->hdev, report->report_id, (u8 *)report, sizeof(struct sixaxis_output_report), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } static void motion_send_output_report(struct sony_sc *sc) { struct hid_device *hdev = sc->hdev; struct motion_output_report_02 *report = (struct motion_output_report_02 *)sc->output_report_dmabuf; memset(report, 0, MOTION_REPORT_0x02_SIZE); report->type = 0x02; /* set leds */ report->r = sc->led_state[0]; report->g = sc->led_state[1]; report->b = sc->led_state[2]; #ifdef CONFIG_SONY_FF report->rumble = max(sc->right, sc->left); #endif hid_hw_output_report(hdev, (u8 *)report, MOTION_REPORT_0x02_SIZE); } #ifdef CONFIG_SONY_FF static inline void sony_send_output_report(struct sony_sc *sc) { if (sc->send_output_report) sc->send_output_report(sc); } #endif static void sony_state_worker(struct work_struct *work) { struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); sc->send_output_report(sc); } static int sony_allocate_output_report(struct sony_sc *sc) { if ((sc->quirks & SIXAXIS_CONTROLLER) || (sc->quirks & NAVIGATION_CONTROLLER)) sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev, sizeof(union sixaxis_output_report_01), GFP_KERNEL); else if (sc->quirks & MOTION_CONTROLLER) sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev, MOTION_REPORT_0x02_SIZE, GFP_KERNEL); else return 0; if (!sc->output_report_dmabuf) return -ENOMEM; return 0; } #ifdef CONFIG_SONY_FF static int sony_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct sony_sc *sc = hid_get_drvdata(hid); if (effect->type != FF_RUMBLE) return 0; sc->left = effect->u.rumble.strong_magnitude / 256; sc->right = effect->u.rumble.weak_magnitude / 256; sony_schedule_work(sc, SONY_WORKER_STATE); return 0; } static int sony_init_ff(struct sony_sc *sc) { struct hid_input *hidinput; struct input_dev *input_dev; if (list_empty(&sc->hdev->inputs)) { hid_err(sc->hdev, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list); input_dev = hidinput->input; input_set_capability(input_dev, EV_FF, FF_RUMBLE); return input_ff_create_memless(input_dev, NULL, sony_play_effect); } #else static int sony_init_ff(struct sony_sc *sc) { return 0; } #endif static int sony_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct sony_sc *sc = power_supply_get_drvdata(psy); unsigned long flags; int ret = 0; u8 battery_capacity; int battery_status; spin_lock_irqsave(&sc->lock, flags); battery_capacity = sc->battery_capacity; battery_status = sc->battery_status; spin_unlock_irqrestore(&sc->lock, flags); switch (psp) { case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = battery_capacity; break; case POWER_SUPPLY_PROP_STATUS: val->intval = battery_status; break; default: ret = -EINVAL; break; } return ret; } static int sony_battery_probe(struct sony_sc *sc, int append_dev_id) { const char *battery_str_fmt = append_dev_id ? "sony_controller_battery_%pMR_%i" : "sony_controller_battery_%pMR"; struct power_supply_config psy_cfg = { .drv_data = sc, }; struct hid_device *hdev = sc->hdev; int ret; /* * Set the default battery level to 100% to avoid low battery warnings * if the battery is polled before the first device report is received. */ sc->battery_capacity = 100; sc->battery_desc.properties = sony_battery_props; sc->battery_desc.num_properties = ARRAY_SIZE(sony_battery_props); sc->battery_desc.get_property = sony_battery_get_property; sc->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY; sc->battery_desc.use_for_apm = 0; sc->battery_desc.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, battery_str_fmt, sc->mac_address, sc->device_id); if (!sc->battery_desc.name) return -ENOMEM; sc->battery = devm_power_supply_register(&hdev->dev, &sc->battery_desc, &psy_cfg); if (IS_ERR(sc->battery)) { ret = PTR_ERR(sc->battery); hid_err(hdev, "Unable to register battery device\n"); return ret; } power_supply_powers(sc->battery, &hdev->dev); return 0; } /* * If a controller is plugged in via USB while already connected via Bluetooth * it will show up as two devices. A global list of connected controllers and * their MAC addresses is maintained to ensure that a device is only connected * once. * * Some USB-only devices masquerade as Sixaxis controllers and all have the * same dummy Bluetooth address, so a comparison of the connection type is * required. Devices are only rejected in the case where two devices have * matching Bluetooth addresses on different bus types. */ static inline int sony_compare_connection_type(struct sony_sc *sc0, struct sony_sc *sc1) { const int sc0_not_bt = !(sc0->quirks & SONY_BT_DEVICE); const int sc1_not_bt = !(sc1->quirks & SONY_BT_DEVICE); return sc0_not_bt == sc1_not_bt; } static int sony_check_add_dev_list(struct sony_sc *sc) { struct sony_sc *entry; unsigned long flags; int ret; spin_lock_irqsave(&sony_dev_list_lock, flags); list_for_each_entry(entry, &sony_device_list, list_node) { ret = memcmp(sc->mac_address, entry->mac_address, sizeof(sc->mac_address)); if (!ret) { if (sony_compare_connection_type(sc, entry)) { ret = 1; } else { ret = -EEXIST; hid_info(sc->hdev, "controller with MAC address %pMR already connected\n", sc->mac_address); } goto unlock; } } ret = 0; list_add(&(sc->list_node), &sony_device_list); unlock: spin_unlock_irqrestore(&sony_dev_list_lock, flags); return ret; } static void sony_remove_dev_list(struct sony_sc *sc) { unsigned long flags; if (sc->list_node.next) { spin_lock_irqsave(&sony_dev_list_lock, flags); list_del(&(sc->list_node)); spin_unlock_irqrestore(&sony_dev_list_lock, flags); } } static int sony_get_bt_devaddr(struct sony_sc *sc) { int ret; /* HIDP stores the device MAC address as a string in the uniq field. */ ret = strlen(sc->hdev->uniq); if (ret != 17) return -EINVAL; ret = sscanf(sc->hdev->uniq, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &sc->mac_address[5], &sc->mac_address[4], &sc->mac_address[3], &sc->mac_address[2], &sc->mac_address[1], &sc->mac_address[0]); if (ret != 6) return -EINVAL; return 0; } static int sony_check_add(struct sony_sc *sc) { u8 *buf = NULL; int n, ret; if ((sc->quirks & MOTION_CONTROLLER_BT) || (sc->quirks & NAVIGATION_CONTROLLER_BT) || (sc->quirks & SIXAXIS_CONTROLLER_BT)) { /* * sony_get_bt_devaddr() attempts to parse the Bluetooth MAC * address from the uniq string where HIDP stores it. * As uniq cannot be guaranteed to be a MAC address in all cases * a failure of this function should not prevent the connection. */ if (sony_get_bt_devaddr(sc) < 0) { hid_warn(sc->hdev, "UNIQ does not contain a MAC address; duplicate check skipped\n"); return 0; } } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) || (sc->quirks & NAVIGATION_CONTROLLER_USB)) { buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; /* * The MAC address of a Sixaxis controller connected via USB can * be retrieved with feature report 0xf2. The address begins at * offset 4. */ ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != SIXAXIS_REPORT_0xF2_SIZE) { hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n"); ret = ret < 0 ? ret : -EINVAL; goto out_free; } /* * The Sixaxis device MAC in the report is big-endian and must * be byte-swapped. */ for (n = 0; n < 6; n++) sc->mac_address[5-n] = buf[4+n]; snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq), "%pMR", sc->mac_address); } else { return 0; } ret = sony_check_add_dev_list(sc); out_free: kfree(buf); return ret; } static int sony_set_device_id(struct sony_sc *sc) { int ret; /* * Only Sixaxis controllers get an id. * All others are set to -1. */ if (sc->quirks & SIXAXIS_CONTROLLER) { ret = ida_alloc(&sony_device_id_allocator, GFP_KERNEL); if (ret < 0) { sc->device_id = -1; return ret; } sc->device_id = ret; } else { sc->device_id = -1; } return 0; } static void sony_release_device_id(struct sony_sc *sc) { if (sc->device_id >= 0) { ida_free(&sony_device_id_allocator, sc->device_id); sc->device_id = -1; } } static inline void sony_init_output_report(struct sony_sc *sc, void (*send_output_report)(struct sony_sc *)) { sc->send_output_report = send_output_report; if (!sc->state_worker_initialized) INIT_WORK(&sc->state_worker, sony_state_worker); sc->state_worker_initialized = 1; } static inline void sony_cancel_work_sync(struct sony_sc *sc) { unsigned long flags; if (sc->state_worker_initialized) { spin_lock_irqsave(&sc->lock, flags); sc->state_worker_initialized = 0; spin_unlock_irqrestore(&sc->lock, flags); cancel_work_sync(&sc->state_worker); } } static int sony_input_configured(struct hid_device *hdev, struct hid_input *hidinput) { struct sony_sc *sc = hid_get_drvdata(hdev); int append_dev_id; int ret; ret = sony_set_device_id(sc); if (ret < 0) { hid_err(hdev, "failed to allocate the device id\n"); goto err_stop; } ret = append_dev_id = sony_check_add(sc); if (ret < 0) goto err_stop; ret = sony_allocate_output_report(sc); if (ret < 0) { hid_err(hdev, "failed to allocate the output report buffer\n"); goto err_stop; } if (sc->quirks & NAVIGATION_CONTROLLER_USB) { /* * The Sony Sixaxis does not handle HID Output Reports on the * Interrupt EP like it could, so we need to force HID Output * Reports to use HID_REQ_SET_REPORT on the Control EP. * * There is also another issue about HID Output Reports via USB, * the Sixaxis does not want the report_id as part of the data * packet, so we have to discard buf[0] when sending the actual * control message, even for numbered reports, humpf! * * Additionally, the Sixaxis on USB isn't properly initialized * until the PS logo button is pressed and as such won't retain * any state set by an output report, so the initial * configuration report is deferred until the first input * report arrives. */ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP; hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID; sc->defer_initialization = 1; ret = sixaxis_set_operational_usb(hdev); if (ret < 0) { hid_err(hdev, "Failed to set controller into operational mode\n"); goto err_stop; } sony_init_output_report(sc, sixaxis_send_output_report); } else if (sc->quirks & NAVIGATION_CONTROLLER_BT) { /* * The Navigation controller wants output reports sent on the ctrl * endpoint when connected via Bluetooth. */ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP; ret = sixaxis_set_operational_bt(hdev); if (ret < 0) { hid_err(hdev, "Failed to set controller into operational mode\n"); goto err_stop; } sony_init_output_report(sc, sixaxis_send_output_report); } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) { /* * The Sony Sixaxis does not handle HID Output Reports on the * Interrupt EP and the device only becomes active when the * PS button is pressed. See comment for Navigation controller * above for more details. */ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP; hdev->quirks |= HID_QUIRK_SKIP_OUTPUT_REPORT_ID; sc->defer_initialization = 1; ret = sixaxis_set_operational_usb(hdev); if (ret < 0) { hid_err(hdev, "Failed to set controller into operational mode\n"); goto err_stop; } ret = sony_register_sensors(sc); if (ret) { hid_err(sc->hdev, "Unable to initialize motion sensors: %d\n", ret); goto err_stop; } sony_init_output_report(sc, sixaxis_send_output_report); } else if (sc->quirks & SIXAXIS_CONTROLLER_BT) { /* * The Sixaxis wants output reports sent on the ctrl endpoint * when connected via Bluetooth. */ hdev->quirks |= HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP; ret = sixaxis_set_operational_bt(hdev); if (ret < 0) { hid_err(hdev, "Failed to set controller into operational mode\n"); goto err_stop; } ret = sony_register_sensors(sc); if (ret) { hid_err(sc->hdev, "Unable to initialize motion sensors: %d\n", ret); goto err_stop; } sony_init_output_report(sc, sixaxis_send_output_report); } else if (sc->quirks & NSG_MRXU_REMOTE) { /* * The NSG-MRxU touchpad supports 2 touches and has a * resolution of 1667x1868 */ ret = sony_register_touchpad(sc, 2, NSG_MRXU_MAX_X, NSG_MRXU_MAX_Y, 15, 15, 1); if (ret) { hid_err(sc->hdev, "Unable to initialize multi-touch slots: %d\n", ret); goto err_stop; } } else if (sc->quirks & MOTION_CONTROLLER) { sony_init_output_report(sc, motion_send_output_report); } if (sc->quirks & SONY_LED_SUPPORT) { ret = sony_leds_init(sc); if (ret < 0) goto err_stop; } if (sc->quirks & SONY_BATTERY_SUPPORT) { ret = sony_battery_probe(sc, append_dev_id); if (ret < 0) goto err_stop; /* Open the device to receive reports with battery info */ ret = hid_hw_open(hdev); if (ret < 0) { hid_err(hdev, "hw open failed\n"); goto err_stop; } } if (sc->quirks & SONY_FF_SUPPORT) { ret = sony_init_ff(sc); if (ret < 0) goto err_close; } return 0; err_close: hid_hw_close(hdev); err_stop: sony_cancel_work_sync(sc); sony_remove_dev_list(sc); sony_release_device_id(sc); return ret; } static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; unsigned long quirks = id->driver_data; struct sony_sc *sc; struct usb_device *usbdev; unsigned int connect_mask = HID_CONNECT_DEFAULT; if (!strcmp(hdev->name, "FutureMax Dance Mat")) quirks |= FUTUREMAX_DANCE_MAT; if (!strcmp(hdev->name, "SHANWAN PS3 GamePad") || !strcmp(hdev->name, "ShanWan PS(R) Ga`epad")) quirks |= SHANWAN_GAMEPAD; sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL); if (sc == NULL) { hid_err(hdev, "can't alloc sony descriptor\n"); return -ENOMEM; } spin_lock_init(&sc->lock); sc->quirks = quirks; hid_set_drvdata(hdev, sc); sc->hdev = hdev; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } if (sc->quirks & VAIO_RDESC_CONSTANT) connect_mask |= HID_CONNECT_HIDDEV_FORCE; else if (sc->quirks & SIXAXIS_CONTROLLER) connect_mask |= HID_CONNECT_HIDDEV_FORCE; /* Patch the hw version on DS3 compatible devices, so applications can * distinguish between the default HID mappings and the mappings defined * by the Linux game controller spec. This is important for the SDL2 * library, which has a game controller database, which uses device ids * in combination with version as a key. */ if (sc->quirks & SIXAXIS_CONTROLLER) hdev->version |= 0x8000; ret = hid_hw_start(hdev, connect_mask); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } /* sony_input_configured can fail, but this doesn't result * in hid_hw_start failures (intended). Check whether * the HID layer claimed the device else fail. * We don't know the actual reason for the failure, most * likely it is due to EEXIST in case of double connection * of USB and Bluetooth, but could have been due to ENOMEM * or other reasons as well. */ if (!(hdev->claimed & HID_CLAIMED_INPUT)) { hid_err(hdev, "failed to claim input\n"); ret = -ENODEV; goto err; } if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { if (!hid_is_usb(hdev)) { ret = -EINVAL; goto err; } usbdev = to_usb_device(sc->hdev->dev.parent->parent); sc->ghl_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!sc->ghl_urb) { ret = -ENOMEM; goto err; } if (sc->quirks & GHL_GUITAR_PS3WIIU) ret = ghl_init_urb(sc, usbdev, ghl_ps3wiiu_magic_data, ARRAY_SIZE(ghl_ps3wiiu_magic_data)); else if (sc->quirks & GHL_GUITAR_PS4) ret = ghl_init_urb(sc, usbdev, ghl_ps4_magic_data, ARRAY_SIZE(ghl_ps4_magic_data)); if (ret) { hid_err(hdev, "error preparing URB\n"); goto err; } timer_setup(&sc->ghl_poke_timer, ghl_magic_poke, 0); mod_timer(&sc->ghl_poke_timer, jiffies + GHL_GUITAR_POKE_INTERVAL*HZ); } return ret; err: usb_free_urb(sc->ghl_urb); hid_hw_stop(hdev); return ret; } static void sony_remove(struct hid_device *hdev) { struct sony_sc *sc = hid_get_drvdata(hdev); if (sc->quirks & (GHL_GUITAR_PS3WIIU | GHL_GUITAR_PS4)) { timer_delete_sync(&sc->ghl_poke_timer); usb_free_urb(sc->ghl_urb); } hid_hw_close(hdev); sony_cancel_work_sync(sc); sony_remove_dev_list(sc); sony_release_device_id(sc); hid_hw_stop(hdev); } #ifdef CONFIG_PM static int sony_suspend(struct hid_device *hdev, pm_message_t message) { #ifdef CONFIG_SONY_FF /* On suspend stop any running force-feedback events */ if (SONY_FF_SUPPORT) { struct sony_sc *sc = hid_get_drvdata(hdev); sc->left = sc->right = 0; sony_send_output_report(sc); } #endif return 0; } static int sony_resume(struct hid_device *hdev) { struct sony_sc *sc = hid_get_drvdata(hdev); /* * The Sixaxis and navigation controllers on USB need to be * reinitialized on resume or they won't behave properly. */ if ((sc->quirks & SIXAXIS_CONTROLLER_USB) || (sc->quirks & NAVIGATION_CONTROLLER_USB)) { sixaxis_set_operational_usb(sc->hdev); sc->defer_initialization = 1; } return 0; } #endif static const struct hid_device_id sony_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), .driver_data = SIXAXIS_CONTROLLER_USB }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER), .driver_data = NAVIGATION_CONTROLLER_USB }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER), .driver_data = NAVIGATION_CONTROLLER_BT }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER), .driver_data = MOTION_CONTROLLER_USB }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_MOTION_CONTROLLER), .driver_data = MOTION_CONTROLLER_BT }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), .driver_data = SIXAXIS_CONTROLLER_BT }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE), .driver_data = VAIO_RDESC_CONSTANT }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE), .driver_data = VAIO_RDESC_CONSTANT }, /* * Wired Buzz Controller. Reported as Sony Hub from its USB ID and as * Logitech joystick from the device descriptor. */ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER), .driver_data = BUZZ_CONTROLLER }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER), .driver_data = BUZZ_CONTROLLER }, /* PS3 BD Remote Control */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE), .driver_data = PS3REMOTE }, /* Logitech Harmony Adapter for PS3 */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3), .driver_data = PS3REMOTE }, /* SMK-Link PS3 BD Remote Control */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE), .driver_data = PS3REMOTE }, /* Nyko Core Controller for PS3 */ { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER), .driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER }, /* SMK-Link NSG-MR5U Remote Control */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_NSG_MR5U_REMOTE), .driver_data = NSG_MR5U_REMOTE_BT }, /* SMK-Link NSG-MR7U Remote Control */ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_NSG_MR7U_REMOTE), .driver_data = NSG_MR7U_REMOTE_BT }, /* Guitar Hero Live PS3 and Wii U guitar dongles */ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3WIIU_GHLIVE_DONGLE), .driver_data = GHL_GUITAR_PS3WIIU | GH_GUITAR_CONTROLLER }, /* Guitar Hero PC Guitar Dongle */ { HID_USB_DEVICE(USB_VENDOR_ID_REDOCTANE, USB_DEVICE_ID_REDOCTANE_GUITAR_DONGLE), .driver_data = GH_GUITAR_CONTROLLER }, /* Guitar Hero PS3 World Tour Guitar Dongle */ { HID_USB_DEVICE(USB_VENDOR_ID_SONY_RHYTHM, USB_DEVICE_ID_SONY_PS3_GUITAR_DONGLE), .driver_data = GH_GUITAR_CONTROLLER }, /* Guitar Hero Live PS4 guitar dongles */ { HID_USB_DEVICE(USB_VENDOR_ID_REDOCTANE, USB_DEVICE_ID_REDOCTANE_PS4_GHLIVE_DONGLE), .driver_data = GHL_GUITAR_PS4 | GH_GUITAR_CONTROLLER }, { } }; MODULE_DEVICE_TABLE(hid, sony_devices); static struct hid_driver sony_driver = { .name = "sony", .id_table = sony_devices, .input_mapping = sony_mapping, .input_configured = sony_input_configured, .probe = sony_probe, .remove = sony_remove, .report_fixup = sony_report_fixup, .raw_event = sony_raw_event, #ifdef CONFIG_PM .suspend = sony_suspend, .resume = sony_resume, .reset_resume = sony_resume, #endif }; static int __init sony_init(void) { dbg_hid("Sony:%s\n", __func__); return hid_register_driver(&sony_driver); } static void __exit sony_exit(void) { dbg_hid("Sony:%s\n", __func__); hid_unregister_driver(&sony_driver); ida_destroy(&sony_device_id_allocator); } module_init(sony_init); module_exit(sony_exit); MODULE_DESCRIPTION("HID driver for Sony / PS2 / PS3 / PS4 BD devices"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Asynchronous Compression operations * * Copyright (c) 2016, Intel Corporation * Authors: Weigang Li <weigang.li@intel.com> * Giovanni Cabiddu <giovanni.cabiddu@intel.com> */ #ifndef _CRYPTO_ACOMP_H #define _CRYPTO_ACOMP_H #include <linux/atomic.h> #include <linux/args.h> #include <linux/compiler_types.h> #include <linux/container_of.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/spinlock_types.h> #include <linux/types.h> /* Set this bit if source is virtual address instead of SG list. */ #define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 /* Set this bit for if virtual address source cannot be used for DMA. */ #define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 /* Set this bit if destination is virtual address instead of SG list. */ #define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 /* Set this bit for if virtual address destination cannot be used for DMA. */ #define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 /* Private flags that should not be touched by the user. */ #define CRYPTO_ACOMP_REQ_PRIVATE \ (CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | \ CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA) #define CRYPTO_ACOMP_DST_MAX 131072 #define MAX_SYNC_COMP_REQSIZE 0 #define ACOMP_REQUEST_ON_STACK(name, tfm) \ char __##name##_req[sizeof(struct acomp_req) + \ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ struct acomp_req *name = acomp_request_on_stack_init( \ __##name##_req, (tfm)) #define ACOMP_REQUEST_CLONE(name, gfp) \ acomp_request_clone(name, sizeof(__##name##_req), gfp) struct acomp_req; struct folio; struct acomp_req_chain { crypto_completion_t compl; void *data; struct scatterlist ssg; struct scatterlist dsg; union { const u8 *src; struct folio *sfolio; }; union { u8 *dst; struct folio *dfolio; }; u32 flags; }; /** * struct acomp_req - asynchronous (de)compression request * * @base: Common attributes for asynchronous crypto requests * @src: Source scatterlist * @dst: Destination scatterlist * @svirt: Source virtual address * @dvirt: Destination virtual address * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced * @chain: Private API code data, do not use * @__ctx: Start of private context data */ struct acomp_req { struct crypto_async_request base; union { struct scatterlist *src; const u8 *svirt; }; union { struct scatterlist *dst; u8 *dvirt; }; unsigned int slen; unsigned int dlen; struct acomp_req_chain chain; void *__ctx[] CRYPTO_MINALIGN_ATTR; }; /** * struct crypto_acomp - user-instantiated objects which encapsulate * algorithms and core processing logic * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @reqsize: Context size for (de)compression requests * @fb: Synchronous fallback tfm * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); unsigned int reqsize; struct crypto_tfm base; }; #define COMP_ALG_COMMON { \ struct crypto_alg base; \ } struct comp_alg_common COMP_ALG_COMMON; /** * DOC: Asynchronous Compression API * * The Asynchronous Compression API is used with the algorithms of type * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto) */ /** * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * compression algorithm e.g. "deflate" * @type: specifies the type of the algorithm * @mask: specifies the mask for the algorithm * * Allocate a handle for a compression algorithm. The returned struct * crypto_acomp is the handle that is required for any subsequent * API invocation for the compression operations. * * Return: allocated handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, u32 mask); /** * crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node * @alg_name: is the cra_name / name or cra_driver_name / driver name of the * compression algorithm e.g. "deflate" * @type: specifies the type of the algorithm * @mask: specifies the mask for the algorithm * @node: specifies the NUMA node the ZIP hardware belongs to * * Allocate a handle for a compression algorithm. Drivers should try to use * (de)compressors on the specified NUMA node. * The returned struct crypto_acomp is the handle that is required for any * subsequent API invocation for the compression operations. * * Return: allocated handle in case of success; IS_ERR() is true in case * of an error, PTR_ERR() returns the error code. */ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, u32 mask, int node); static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) { return &tfm->base; } static inline struct comp_alg_common *__crypto_comp_alg_common( struct crypto_alg *alg) { return container_of(alg, struct comp_alg_common, base); } static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm) { return container_of(tfm, struct crypto_acomp, base); } static inline struct comp_alg_common *crypto_comp_alg_common( struct crypto_acomp *tfm) { return __crypto_comp_alg_common(crypto_acomp_tfm(tfm)->__crt_alg); } static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm) { return tfm->reqsize; } static inline void acomp_request_set_tfm(struct acomp_req *req, struct crypto_acomp *tfm) { crypto_request_set_tfm(&req->base, crypto_acomp_tfm(tfm)); } static inline bool acomp_is_async(struct crypto_acomp *tfm) { return crypto_comp_alg_common(tfm)->base.cra_flags & CRYPTO_ALG_ASYNC; } static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req) { return __crypto_acomp_tfm(req->base.tfm); } /** * crypto_free_acomp() -- free ACOMPRESS tfm handle * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() * * If @tfm is a NULL or error pointer, this function does nothing. */ static inline void crypto_free_acomp(struct crypto_acomp *tfm) { crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm)); } static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_ACOMPRESS; mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; return crypto_has_alg(alg_name, type, mask); } static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) { return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); } static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) { return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); } /** * acomp_request_alloc() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error */ static inline struct acomp_req *acomp_request_alloc_extra_noprof( struct crypto_acomp *tfm, size_t extra, gfp_t gfp) { struct acomp_req *req; size_t len; len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); if (check_add_overflow(len, extra, &len)) return NULL; req = kzalloc_noprof(len, gfp); if (likely(req)) acomp_request_set_tfm(req, tfm); return req; } #define acomp_request_alloc_noprof(tfm, ...) \ CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ tfm, ##__VA_ARGS__) #define acomp_request_alloc_noprof_0(tfm) \ acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) #define acomp_request_alloc_noprof_1(tfm, gfp) \ acomp_request_alloc_extra_noprof(tfm, 0, gfp) #define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) /** * acomp_request_alloc_extra() -- allocate acomp request with extra memory * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() * @extra: amount of extra memory * @gfp: gfp to pass to kzalloc * * Return: allocated handle in case of success or NULL in case of an error */ #define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) static inline void *acomp_request_extra(struct acomp_req *req) { struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); size_t len; len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); return (void *)((char *)req + len); } static inline bool acomp_req_on_stack(struct acomp_req *req) { return crypto_req_on_stack(&req->base); } /** * acomp_request_free() -- zeroize and free asynchronous (de)compression * request as well as the output buffer if allocated * inside the algorithm * * @req: request to free */ static inline void acomp_request_free(struct acomp_req *req) { if (!req || acomp_req_on_stack(req)) return; kfree_sensitive(req); } /** * acomp_request_set_callback() -- Sets an asynchronous callback * * Callback will be called when an asynchronous operation on a given * request is finished. * * @req: request that the callback will be set for * @flgs: specify for instance if the operation may backlog * @cmlp: callback which will be called * @data: private data used by the caller */ static inline void acomp_request_set_callback(struct acomp_req *req, u32 flgs, crypto_completion_t cmpl, void *data) { flgs &= ~CRYPTO_ACOMP_REQ_PRIVATE; flgs |= req->base.flags & CRYPTO_ACOMP_REQ_PRIVATE; crypto_request_set_callback(&req->base, flgs, cmpl, data); } /** * acomp_request_set_params() -- Sets request parameters * * Sets parameters required by an acomp operation * * @req: asynchronous compress request * @src: pointer to input buffer scatterlist * @dst: pointer to output buffer scatterlist. If this is NULL, the * acomp layer will allocate the output memory * @slen: size of the input buffer * @dlen: size of the output buffer. If dst is NULL, this can be used by * the user to specify the maximum amount of memory to allocate */ static inline void acomp_request_set_params(struct acomp_req *req, struct scatterlist *src, struct scatterlist *dst, unsigned int slen, unsigned int dlen) { req->src = src; req->dst = dst; req->slen = slen; req->dlen = dlen; req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA); } /** * acomp_request_set_src_sg() -- Sets source scatterlist * * Sets source scatterlist required by an acomp operation. * * @req: asynchronous compress request * @src: pointer to input buffer scatterlist * @slen: size of the input buffer */ static inline void acomp_request_set_src_sg(struct acomp_req *req, struct scatterlist *src, unsigned int slen) { req->src = src; req->slen = slen; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; } /** * acomp_request_set_src_dma() -- Sets DMA source virtual address * * Sets source virtual address required by an acomp operation. * The address must be usable for DMA. * * @req: asynchronous compress request * @src: virtual address pointer to input buffer * @slen: size of the input buffer */ static inline void acomp_request_set_src_dma(struct acomp_req *req, const u8 *src, unsigned int slen) { req->svirt = src; req->slen = slen; req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } /** * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address * * Sets source virtual address required by an acomp operation. * The address can not be used for DMA. * * @req: asynchronous compress request * @src: virtual address pointer to input buffer * @slen: size of the input buffer */ static inline void acomp_request_set_src_nondma(struct acomp_req *req, const u8 *src, unsigned int slen) { req->svirt = src; req->slen = slen; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; } /** * acomp_request_set_src_folio() -- Sets source folio * * Sets source folio required by an acomp operation. * * @req: asynchronous compress request * @folio: pointer to input folio * @off: input folio offset * @len: size of the input buffer */ static inline void acomp_request_set_src_folio(struct acomp_req *req, struct folio *folio, size_t off, unsigned int len) { sg_init_table(&req->chain.ssg, 1); sg_set_folio(&req->chain.ssg, folio, len, off); acomp_request_set_src_sg(req, &req->chain.ssg, len); } /** * acomp_request_set_dst_sg() -- Sets destination scatterlist * * Sets destination scatterlist required by an acomp operation. * * @req: asynchronous compress request * @dst: pointer to output buffer scatterlist * @dlen: size of the output buffer */ static inline void acomp_request_set_dst_sg(struct acomp_req *req, struct scatterlist *dst, unsigned int dlen) { req->dst = dst; req->dlen = dlen; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; } /** * acomp_request_set_dst_dma() -- Sets DMA destination virtual address * * Sets destination virtual address required by an acomp operation. * The address must be usable for DMA. * * @req: asynchronous compress request * @dst: virtual address pointer to output buffer * @dlen: size of the output buffer */ static inline void acomp_request_set_dst_dma(struct acomp_req *req, u8 *dst, unsigned int dlen) { req->dvirt = dst; req->dlen = dlen; req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } /** * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address * * Sets destination virtual address required by an acomp operation. * The address can not be used for DMA. * * @req: asynchronous compress request * @dst: virtual address pointer to output buffer * @dlen: size of the output buffer */ static inline void acomp_request_set_dst_nondma(struct acomp_req *req, u8 *dst, unsigned int dlen) { req->dvirt = dst; req->dlen = dlen; req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; } /** * acomp_request_set_dst_folio() -- Sets destination folio * * Sets destination folio required by an acomp operation. * * @req: asynchronous compress request * @folio: pointer to input folio * @off: input folio offset * @len: size of the input buffer */ static inline void acomp_request_set_dst_folio(struct acomp_req *req, struct folio *folio, size_t off, unsigned int len) { sg_init_table(&req->chain.dsg, 1); sg_set_folio(&req->chain.dsg, folio, len, off); acomp_request_set_dst_sg(req, &req->chain.dsg, len); } /** * crypto_acomp_compress() -- Invoke asynchronous compress operation * * Function invokes the asynchronous compress operation * * @req: asynchronous compress request * * Return: zero on success; error code in case of error */ int crypto_acomp_compress(struct acomp_req *req); /** * crypto_acomp_decompress() -- Invoke asynchronous decompress operation * * Function invokes the asynchronous decompress operation * * @req: asynchronous compress request * * Return: zero on success; error code in case of error */ int crypto_acomp_decompress(struct acomp_req *req); static inline struct acomp_req *acomp_request_on_stack_init( char *buf, struct crypto_acomp *tfm) { struct acomp_req *req = (void *)buf; crypto_stack_request_init(&req->base, crypto_acomp_tfm(tfm)); return req; } struct acomp_req *acomp_request_clone(struct acomp_req *req, size_t total, gfp_t gfp); #endif
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 #ifndef _CRYPTO_GCM_H #define _CRYPTO_GCM_H #include <linux/errno.h> #include <crypto/aes.h> #include <crypto/gf128mul.h> #define GCM_AES_IV_SIZE 12 #define GCM_RFC4106_IV_SIZE 8 #define GCM_RFC4543_IV_SIZE 8 /* * validate authentication tag for GCM */ static inline int crypto_gcm_check_authsize(unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return 0; } /* * validate authentication tag for RFC4106 */ static inline int crypto_rfc4106_check_authsize(unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* * validate assoclen for RFC4106/RFC4543 */ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) { switch (assoclen) { case 16: case 20: break; default: return -EINVAL; } return 0; } struct aesgcm_ctx { be128 ghash_key; struct crypto_aes_ctx aes_ctx; unsigned int authsize; }; int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key, unsigned int keysize, unsigned int authsize); void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], u8 *authtag); bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, int crypt_len, const u8 *assoc, int assoc_len, const u8 iv[GCM_AES_IV_SIZE], const u8 *authtag); #endif
3 1 1 3 3 3 3 3 3 3 2 1 1 1 1 1 1 1 1 1 1 6 2 1 3 3 3 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 // SPDX-License-Identifier: GPL-2.0+ /* * adutux - driver for ADU devices from Ontrak Control Systems * This is an experimental driver. Use at your own risk. * This driver is not supported by Ontrak Control Systems. * * Copyright (c) 2003 John Homppi (SCO, leave this notice here) * * derived from the Lego USB Tower driver 0.56: * Copyright (c) 2003 David Glance <davidgsf@sourceforge.net> * 2001 Juergen Stuber <stuber@loria.fr> * that was derived from USB Skeleton driver - 0.5 * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com) * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/uaccess.h> #define DRIVER_AUTHOR "John Homppi" #define DRIVER_DESC "adutux (see www.ontrak.net)" /* Define these values to match your device */ #define ADU_VENDOR_ID 0x0a07 #define ADU_PRODUCT_ID 0x0064 /* table of devices that work with this driver */ static const struct usb_device_id device_table[] = { { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+100) }, /* ADU200 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+108) }, /* ADU208 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+118) }, /* ADU218 */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, device_table); #ifdef CONFIG_USB_DYNAMIC_MINORS #define ADU_MINOR_BASE 0 #else #define ADU_MINOR_BASE 67 #endif /* we can have up to this number of device plugged in at once */ #define MAX_DEVICES 16 #define COMMAND_TIMEOUT (2*HZ) /* * The locking scheme is a vanilla 3-lock: * adu_device.buflock: A spinlock, covers what IRQs touch. * adutux_mutex: A Static lock to cover open_count. It would also cover * any globals, but we don't have them in 2.6. * adu_device.mtx: A mutex to hold across sleepers like copy_from_user. * It covers all of adu_device, except the open_count * and what .buflock covers. */ /* Structure to hold all of our device specific stuff */ struct adu_device { struct mutex mtx; struct usb_device *udev; /* save off the usb device pointer */ struct usb_interface *interface; unsigned int minor; /* the starting minor number for this device */ char serial_number[8]; int open_count; /* number of times this port has been opened */ unsigned long disconnected:1; char *read_buffer_primary; int read_buffer_length; char *read_buffer_secondary; int secondary_head; int secondary_tail; spinlock_t buflock; wait_queue_head_t read_wait; wait_queue_head_t write_wait; char *interrupt_in_buffer; struct usb_endpoint_descriptor *interrupt_in_endpoint; struct urb *interrupt_in_urb; int read_urb_finished; char *interrupt_out_buffer; struct usb_endpoint_descriptor *interrupt_out_endpoint; struct urb *interrupt_out_urb; int out_urb_finished; }; static DEFINE_MUTEX(adutux_mutex); static struct usb_driver adu_driver; static inline void adu_debug_data(struct device *dev, const char *function, int size, const unsigned char *data) { dev_dbg(dev, "%s - length = %d, data = %*ph\n", function, size, size, data); } /* * adu_abort_transfers * aborts transfers and frees associated data structures */ static void adu_abort_transfers(struct adu_device *dev) { unsigned long flags; if (dev->disconnected) return; /* shutdown transfer */ /* XXX Anchor these instead */ spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); usb_kill_urb(dev->interrupt_in_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); wait_event_timeout(dev->write_wait, dev->out_urb_finished, COMMAND_TIMEOUT); usb_kill_urb(dev->interrupt_out_urb); } else spin_unlock_irqrestore(&dev->buflock, flags); } static void adu_delete(struct adu_device *dev) { /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); kfree(dev->read_buffer_primary); kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); usb_put_dev(dev->udev); kfree(dev); } static void adu_interrupt_in_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; unsigned long flags; adu_debug_data(&dev->udev->dev, __func__, urb->actual_length, urb->transfer_buffer); spin_lock_irqsave(&dev->buflock, flags); if (status != 0) { if ((status != -ENOENT) && (status != -ECONNRESET) && (status != -ESHUTDOWN)) { dev_dbg(&dev->udev->dev, "%s : nonzero status received: %d\n", __func__, status); } goto exit; } if (urb->actual_length > 0 && dev->interrupt_in_buffer[0] != 0x00) { if (dev->read_buffer_length < (4 * usb_endpoint_maxp(dev->interrupt_in_endpoint)) - (urb->actual_length)) { memcpy (dev->read_buffer_primary + dev->read_buffer_length, dev->interrupt_in_buffer, urb->actual_length); dev->read_buffer_length += urb->actual_length; dev_dbg(&dev->udev->dev, "%s reading %d\n", __func__, urb->actual_length); } else { dev_dbg(&dev->udev->dev, "%s : read_buffer overflow\n", __func__); } } exit: dev->read_urb_finished = 1; spin_unlock_irqrestore(&dev->buflock, flags); /* always wake up so we recover from errors */ wake_up_interruptible(&dev->read_wait); } static void adu_interrupt_out_callback(struct urb *urb) { struct adu_device *dev = urb->context; int status = urb->status; unsigned long flags; adu_debug_data(&dev->udev->dev, __func__, urb->actual_length, urb->transfer_buffer); if (status != 0) { if ((status != -ENOENT) && (status != -ESHUTDOWN) && (status != -ECONNRESET)) { dev_dbg(&dev->udev->dev, "%s :nonzero status received: %d\n", __func__, status); } return; } spin_lock_irqsave(&dev->buflock, flags); dev->out_urb_finished = 1; wake_up(&dev->write_wait); spin_unlock_irqrestore(&dev->buflock, flags); } static int adu_open(struct inode *inode, struct file *file) { struct adu_device *dev = NULL; struct usb_interface *interface; int subminor; int retval; subminor = iminor(inode); retval = mutex_lock_interruptible(&adutux_mutex); if (retval) goto exit_no_lock; interface = usb_find_interface(&adu_driver, subminor); if (!interface) { pr_err("%s - error, can't find device for minor %d\n", __func__, subminor); retval = -ENODEV; goto exit_no_device; } dev = usb_get_intfdata(interface); if (!dev) { retval = -ENODEV; goto exit_no_device; } /* check that nobody else is using the device */ if (dev->open_count) { retval = -EBUSY; goto exit_no_device; } ++dev->open_count; dev_dbg(&dev->udev->dev, "%s: open count %d\n", __func__, dev->open_count); /* save device in the file's private structure */ file->private_data = dev; /* initialize in direction */ dev->read_buffer_length = 0; /* fixup first read by having urb waiting for it */ usb_fill_int_urb(dev->interrupt_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); dev->read_urb_finished = 0; if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL)) dev->read_urb_finished = 1; /* we ignore failure */ /* end of fixup for first read */ /* initialize out direction */ dev->out_urb_finished = 1; retval = 0; exit_no_device: mutex_unlock(&adutux_mutex); exit_no_lock: return retval; } static void adu_release_internal(struct adu_device *dev) { /* decrement our usage count for the device */ --dev->open_count; dev_dbg(&dev->udev->dev, "%s : open count %d\n", __func__, dev->open_count); if (dev->open_count <= 0) { adu_abort_transfers(dev); dev->open_count = 0; } } static int adu_release(struct inode *inode, struct file *file) { struct adu_device *dev; int retval = 0; if (file == NULL) { retval = -ENODEV; goto exit; } dev = file->private_data; if (dev == NULL) { retval = -ENODEV; goto exit; } mutex_lock(&adutux_mutex); /* not interruptible */ if (dev->open_count <= 0) { dev_dbg(&dev->udev->dev, "%s : device not opened\n", __func__); retval = -ENODEV; goto unlock; } adu_release_internal(dev); if (dev->disconnected) { /* the device was unplugged before the file was released */ if (!dev->open_count) /* ... and we're the last user */ adu_delete(dev); } unlock: mutex_unlock(&adutux_mutex); exit: return retval; } static ssize_t adu_read(struct file *file, __user char *buffer, size_t count, loff_t *ppos) { struct adu_device *dev; size_t bytes_read = 0; size_t bytes_to_read = count; int retval = 0; int timeout = 0; int should_submit = 0; unsigned long flags; DECLARE_WAITQUEUE(wait, current); dev = file->private_data; if (mutex_lock_interruptible(&dev->mtx)) return -ERESTARTSYS; /* verify that the device wasn't unplugged */ if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; } /* verify that some data was requested */ if (count == 0) { dev_dbg(&dev->udev->dev, "%s : read request of 0 bytes\n", __func__); goto exit; } timeout = COMMAND_TIMEOUT; dev_dbg(&dev->udev->dev, "%s : about to start looping\n", __func__); while (bytes_to_read) { size_t data_in_secondary = dev->secondary_tail - dev->secondary_head; dev_dbg(&dev->udev->dev, "%s : while, data_in_secondary=%zu, status=%d\n", __func__, data_in_secondary, dev->interrupt_in_urb->status); if (data_in_secondary) { /* drain secondary buffer */ size_t amount = min(bytes_to_read, data_in_secondary); if (copy_to_user(buffer, dev->read_buffer_secondary+dev->secondary_head, amount)) { retval = -EFAULT; goto exit; } dev->secondary_head += amount; bytes_read += amount; bytes_to_read -= amount; } else { /* we check the primary buffer */ spin_lock_irqsave (&dev->buflock, flags); if (dev->read_buffer_length) { /* we secure access to the primary */ dev_dbg(&dev->udev->dev, "%s : swap, read_buffer_length = %d\n", __func__, dev->read_buffer_length); swap(dev->read_buffer_primary, dev->read_buffer_secondary); dev->secondary_head = 0; dev->secondary_tail = dev->read_buffer_length; dev->read_buffer_length = 0; spin_unlock_irqrestore(&dev->buflock, flags); /* we have a free buffer so use it */ should_submit = 1; } else { /* even the primary was empty - we may need to do IO */ if (!dev->read_urb_finished) { /* somebody is doing IO */ spin_unlock_irqrestore(&dev->buflock, flags); dev_dbg(&dev->udev->dev, "%s : submitted already\n", __func__); } else { /* we must initiate input */ dev_dbg(&dev->udev->dev, "%s : initiate input\n", __func__); dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); retval = usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL); if (retval) { dev->read_urb_finished = 1; if (retval == -ENOMEM) { retval = bytes_read ? bytes_read : -ENOMEM; } dev_dbg(&dev->udev->dev, "%s : submit failed\n", __func__); goto exit; } } /* we wait for I/O to complete */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&dev->read_wait, &wait); spin_lock_irqsave(&dev->buflock, flags); if (!dev->read_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); timeout = schedule_timeout(COMMAND_TIMEOUT); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); } remove_wait_queue(&dev->read_wait, &wait); if (timeout <= 0) { dev_dbg(&dev->udev->dev, "%s : timeout\n", __func__); retval = bytes_read ? bytes_read : -ETIMEDOUT; goto exit; } if (signal_pending(current)) { dev_dbg(&dev->udev->dev, "%s : signal pending\n", __func__); retval = bytes_read ? bytes_read : -EINTR; goto exit; } } } } retval = bytes_read; /* if the primary buffer is empty then use it */ spin_lock_irqsave(&dev->buflock, flags); if (should_submit && dev->read_urb_finished) { dev->read_urb_finished = 0; spin_unlock_irqrestore(&dev->buflock, flags); usb_fill_int_urb(dev->interrupt_in_urb, dev->udev, usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress), dev->interrupt_in_buffer, usb_endpoint_maxp(dev->interrupt_in_endpoint), adu_interrupt_in_callback, dev, dev->interrupt_in_endpoint->bInterval); if (usb_submit_urb(dev->interrupt_in_urb, GFP_KERNEL) != 0) dev->read_urb_finished = 1; /* we ignore failure */ } else { spin_unlock_irqrestore(&dev->buflock, flags); } exit: /* unlock the device */ mutex_unlock(&dev->mtx); return retval; } static ssize_t adu_write(struct file *file, const __user char *buffer, size_t count, loff_t *ppos) { DECLARE_WAITQUEUE(waita, current); struct adu_device *dev; size_t bytes_written = 0; size_t bytes_to_write; size_t buffer_size; unsigned long flags; int retval; dev = file->private_data; retval = mutex_lock_interruptible(&dev->mtx); if (retval) goto exit_nolock; /* verify that the device wasn't unplugged */ if (dev->disconnected) { retval = -ENODEV; pr_err("No device or device unplugged %d\n", retval); goto exit; } /* verify that we actually have some data to write */ if (count == 0) { dev_dbg(&dev->udev->dev, "%s : write request of 0 bytes\n", __func__); goto exit; } while (count > 0) { add_wait_queue(&dev->write_wait, &waita); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&dev->buflock, flags); if (!dev->out_urb_finished) { spin_unlock_irqrestore(&dev->buflock, flags); mutex_unlock(&dev->mtx); if (signal_pending(current)) { dev_dbg(&dev->udev->dev, "%s : interrupted\n", __func__); set_current_state(TASK_RUNNING); retval = -EINTR; goto exit_onqueue; } if (schedule_timeout(COMMAND_TIMEOUT) == 0) { dev_dbg(&dev->udev->dev, "%s - command timed out.\n", __func__); retval = -ETIMEDOUT; goto exit_onqueue; } remove_wait_queue(&dev->write_wait, &waita); retval = mutex_lock_interruptible(&dev->mtx); if (retval) { retval = bytes_written ? bytes_written : retval; goto exit_nolock; } dev_dbg(&dev->udev->dev, "%s : in progress, count = %zd\n", __func__, count); } else { spin_unlock_irqrestore(&dev->buflock, flags); set_current_state(TASK_RUNNING); remove_wait_queue(&dev->write_wait, &waita); dev_dbg(&dev->udev->dev, "%s : sending, count = %zd\n", __func__, count); /* write the data into interrupt_out_buffer from userspace */ buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); bytes_to_write = count > buffer_size ? buffer_size : count; dev_dbg(&dev->udev->dev, "%s : buffer_size = %zd, count = %zd, bytes_to_write = %zd\n", __func__, buffer_size, count, bytes_to_write); if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) { retval = -EFAULT; goto exit; } /* send off the urb */ usb_fill_int_urb( dev->interrupt_out_urb, dev->udev, usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress), dev->interrupt_out_buffer, bytes_to_write, adu_interrupt_out_callback, dev, dev->interrupt_out_endpoint->bInterval); dev->interrupt_out_urb->actual_length = bytes_to_write; dev->out_urb_finished = 0; retval = usb_submit_urb(dev->interrupt_out_urb, GFP_KERNEL); if (retval < 0) { dev->out_urb_finished = 1; dev_err(&dev->udev->dev, "Couldn't submit " "interrupt_out_urb %d\n", retval); goto exit; } buffer += bytes_to_write; count -= bytes_to_write; bytes_written += bytes_to_write; } } mutex_unlock(&dev->mtx); return bytes_written; exit: mutex_unlock(&dev->mtx); exit_nolock: return retval; exit_onqueue: remove_wait_queue(&dev->write_wait, &waita); return retval; } /* file operations needed when we register this driver */ static const struct file_operations adu_fops = { .owner = THIS_MODULE, .read = adu_read, .write = adu_write, .open = adu_open, .release = adu_release, .llseek = noop_llseek, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with devfs and the driver core */ static struct usb_class_driver adu_class = { .name = "usb/adutux%d", .fops = &adu_fops, .minor_base = ADU_MINOR_BASE, }; /* * adu_probe * * Called by the usb core when a new device is connected that it thinks * this driver might be interested in. */ static int adu_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct adu_device *dev = NULL; int retval = -ENOMEM; int in_end_size; int out_end_size; int res; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); dev->udev = usb_get_dev(udev); init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); res = usb_find_common_endpoints_reverse(interface->cur_altsetting, NULL, NULL, &dev->interrupt_in_endpoint, &dev->interrupt_out_endpoint); if (res) { dev_err(&interface->dev, "interrupt endpoints not found\n"); retval = res; goto error; } in_end_size = usb_endpoint_maxp(dev->interrupt_in_endpoint); out_end_size = usb_endpoint_maxp(dev->interrupt_out_endpoint); dev->read_buffer_primary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_primary) goto error; /* debug code prime the buffer */ memset(dev->read_buffer_primary, 'a', in_end_size); memset(dev->read_buffer_primary + in_end_size, 'b', in_end_size); memset(dev->read_buffer_primary + (2 * in_end_size), 'c', in_end_size); memset(dev->read_buffer_primary + (3 * in_end_size), 'd', in_end_size); dev->read_buffer_secondary = kmalloc((4 * in_end_size), GFP_KERNEL); if (!dev->read_buffer_secondary) goto error; /* debug code prime the buffer */ memset(dev->read_buffer_secondary, 'e', in_end_size); memset(dev->read_buffer_secondary + in_end_size, 'f', in_end_size); memset(dev->read_buffer_secondary + (2 * in_end_size), 'g', in_end_size); memset(dev->read_buffer_secondary + (3 * in_end_size), 'h', in_end_size); dev->interrupt_in_buffer = kmalloc(in_end_size, GFP_KERNEL); if (!dev->interrupt_in_buffer) goto error; /* debug code prime the buffer */ memset(dev->interrupt_in_buffer, 'i', in_end_size); dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_in_urb) goto error; dev->interrupt_out_buffer = kmalloc(out_end_size, GFP_KERNEL); if (!dev->interrupt_out_buffer) goto error; dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->interrupt_out_urb) goto error; if (!usb_string(udev, udev->descriptor.iSerialNumber, dev->serial_number, sizeof(dev->serial_number))) { dev_err(&interface->dev, "Could not retrieve serial number\n"); retval = -EIO; goto error; } dev_dbg(&interface->dev, "serial_number=%s", dev->serial_number); /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); retval = usb_register_dev(interface, &adu_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } dev->minor = interface->minor; /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, (dev->minor - ADU_MINOR_BASE)); return 0; error: adu_delete(dev); return retval; } /* * adu_disconnect * * Called by the usb core when the device is removed from the system. */ static void adu_disconnect(struct usb_interface *interface) { struct adu_device *dev; dev = usb_get_intfdata(interface); usb_deregister_dev(interface, &adu_class); usb_poison_urb(dev->interrupt_in_urb); usb_poison_urb(dev->interrupt_out_urb); mutex_lock(&adutux_mutex); usb_set_intfdata(interface, NULL); mutex_lock(&dev->mtx); /* not interruptible */ dev->disconnected = 1; mutex_unlock(&dev->mtx); /* if the device is not opened, then we clean up right now */ if (!dev->open_count) adu_delete(dev); mutex_unlock(&adutux_mutex); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver adu_driver = { .name = "adutux", .probe = adu_probe, .disconnect = adu_disconnect, .id_table = device_table, }; module_usb_driver(adu_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
15 9 9 52 1 1 5 45 44 14 1 11 2 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 // SPDX-License-Identifier: GPL-2.0-or-later /** -*- linux-c -*- *********************************************************** * Linux PPP over X/Ethernet (PPPoX/PPPoE) Sockets * * PPPoX --- Generic PPP encapsulation socket family * PPPoE --- PPP over Ethernet (RFC 2516) * * Version: 0.5.2 * * Author: Michal Ostrowski <mostrows@speakeasy.net> * * 051000 : Initialization cleanup * * License: */ #include <linux/string.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/compat.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/net.h> #include <linux/init.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <linux/ppp-ioctl.h> #include <linux/ppp_channel.h> #include <linux/kmod.h> #include <net/sock.h> #include <linux/uaccess.h> static const struct pppox_proto *pppox_protos[PX_MAX_PROTO + 1]; int register_pppox_proto(int proto_num, const struct pppox_proto *pp) { if (proto_num < 0 || proto_num > PX_MAX_PROTO) return -EINVAL; if (pppox_protos[proto_num]) return -EALREADY; pppox_protos[proto_num] = pp; return 0; } void unregister_pppox_proto(int proto_num) { if (proto_num >= 0 && proto_num <= PX_MAX_PROTO) pppox_protos[proto_num] = NULL; } void pppox_unbind_sock(struct sock *sk) { /* Clear connection to ppp device, if attached. */ if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) { ppp_unregister_channel(&pppox_sk(sk)->chan); sk->sk_state = PPPOX_DEAD; } } EXPORT_SYMBOL(register_pppox_proto); EXPORT_SYMBOL(unregister_pppox_proto); EXPORT_SYMBOL(pppox_unbind_sock); int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); int rc; lock_sock(sk); switch (cmd) { case PPPIOCGCHAN: { int index; rc = -ENOTCONN; if (!(sk->sk_state & PPPOX_CONNECTED)) break; rc = -EINVAL; index = ppp_channel_index(&po->chan); if (put_user(index , (int __user *) arg)) break; rc = 0; sk->sk_state |= PPPOX_BOUND; break; } default: rc = pppox_protos[sk->sk_protocol]->ioctl ? pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY; } release_sock(sk); return rc; } EXPORT_SYMBOL(pppox_ioctl); #ifdef CONFIG_COMPAT int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { if (cmd == PPPOEIOCSFWD32) cmd = PPPOEIOCSFWD; return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } EXPORT_SYMBOL(pppox_compat_ioctl); #endif static int pppox_create(struct net *net, struct socket *sock, int protocol, int kern) { int rc = -EPROTOTYPE; if (protocol < 0 || protocol > PX_MAX_PROTO) goto out; rc = -EPROTONOSUPPORT; if (!pppox_protos[protocol]) request_module("net-pf-%d-proto-%d", PF_PPPOX, protocol); if (!pppox_protos[protocol] || !try_module_get(pppox_protos[protocol]->owner)) goto out; rc = pppox_protos[protocol]->create(net, sock, kern); module_put(pppox_protos[protocol]->owner); out: return rc; } static const struct net_proto_family pppox_proto_family = { .family = PF_PPPOX, .create = pppox_create, .owner = THIS_MODULE, }; static int __init pppox_init(void) { return sock_register(&pppox_proto_family); } static void __exit pppox_exit(void) { sock_unregister(PF_PPPOX); } module_init(pppox_init); module_exit(pppox_exit); MODULE_AUTHOR("Michal Ostrowski <mostrows@speakeasy.net>"); MODULE_DESCRIPTION("PPP over Ethernet driver (generic socket layer)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_PPPOX);
2 2 1 1 2 1 1 2 1 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 // SPDX-License-Identifier: GPL-2.0-or-later /* * dlmfs.c * * Code which implements the kernel side of a minimal userspace * interface to our DLM. This file handles the virtual file system * used for communication with userspace. Credit should go to ramfs, * which was a template for the fs side of this module. * * Copyright (C) 2003, 2004 Oracle. All rights reserved. */ /* Simple VFS hooks based on: */ /* * Resizable simple ram filesystem for Linux. * * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/pagemap.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/poll.h> #include <linux/uaccess.h> #include "../stackglue.h" #include "userdlm.h" #define MLOG_MASK_PREFIX ML_DLMFS #include "../cluster/masklog.h" static const struct super_operations dlmfs_ops; static const struct file_operations dlmfs_file_operations; static const struct inode_operations dlmfs_dir_inode_operations; static const struct inode_operations dlmfs_root_inode_operations; static const struct inode_operations dlmfs_file_inode_operations; static struct kmem_cache *dlmfs_inode_cache; struct workqueue_struct *user_dlm_worker; /* * These are the ABI capabilities of dlmfs. * * Over time, dlmfs has added some features that were not part of the * initial ABI. Unfortunately, some of these features are not detectable * via standard usage. For example, Linux's default poll always returns * EPOLLIN, so there is no way for a caller of poll(2) to know when dlmfs * added poll support. Instead, we provide this list of new capabilities. * * Capabilities is a read-only attribute. We do it as a module parameter * so we can discover it whether dlmfs is built in, loaded, or even not * loaded. * * The ABI features are local to this machine's dlmfs mount. This is * distinct from the locking protocol, which is concerned with inter-node * interaction. * * Capabilities: * - bast : EPOLLIN against the file descriptor of a held lock * signifies a bast fired on the lock. */ #define DLMFS_CAPABILITIES "bast stackglue" static int param_set_dlmfs_capabilities(const char *val, const struct kernel_param *kp) { printk(KERN_ERR "%s: readonly parameter\n", kp->name); return -EINVAL; } static int param_get_dlmfs_capabilities(char *buffer, const struct kernel_param *kp) { return sysfs_emit(buffer, DLMFS_CAPABILITIES); } module_param_call(capabilities, param_set_dlmfs_capabilities, param_get_dlmfs_capabilities, NULL, 0444); MODULE_PARM_DESC(capabilities, DLMFS_CAPABILITIES); /* * decodes a set of open flags into a valid lock level and a set of flags. * returns < 0 if we have invalid flags * flags which mean something to us: * O_RDONLY -> PRMODE level * O_WRONLY -> EXMODE level * * O_NONBLOCK -> NOQUEUE */ static int dlmfs_decode_open_flags(int open_flags, int *level, int *flags) { if (open_flags & (O_WRONLY|O_RDWR)) *level = DLM_LOCK_EX; else *level = DLM_LOCK_PR; *flags = 0; if (open_flags & O_NONBLOCK) *flags |= DLM_LKF_NOQUEUE; return 0; } static int dlmfs_file_open(struct inode *inode, struct file *file) { int status, level, flags; struct dlmfs_filp_private *fp = NULL; struct dlmfs_inode_private *ip; if (S_ISDIR(inode->i_mode)) BUG(); mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, file->f_flags); status = dlmfs_decode_open_flags(file->f_flags, &level, &flags); if (status < 0) goto bail; /* We don't want to honor O_APPEND at read/write time as it * doesn't make sense for LVB writes. */ file->f_flags &= ~O_APPEND; fp = kmalloc(sizeof(*fp), GFP_NOFS); if (!fp) { status = -ENOMEM; goto bail; } fp->fp_lock_level = level; ip = DLMFS_I(inode); status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags); if (status < 0) { /* this is a strange error to return here but I want * to be able userspace to be able to distinguish a * valid lock request from one that simply couldn't be * granted. */ if (flags & DLM_LKF_NOQUEUE && status == -EAGAIN) status = -ETXTBSY; kfree(fp); goto bail; } file->private_data = fp; bail: return status; } static int dlmfs_file_release(struct inode *inode, struct file *file) { int level; struct dlmfs_inode_private *ip = DLMFS_I(inode); struct dlmfs_filp_private *fp = file->private_data; if (S_ISDIR(inode->i_mode)) BUG(); mlog(0, "close called on inode %lu\n", inode->i_ino); if (fp) { level = fp->fp_lock_level; if (level != DLM_LOCK_IV) user_dlm_cluster_unlock(&ip->ip_lockres, level); kfree(fp); file->private_data = NULL; } return 0; } /* * We do ->setattr() just to override size changes. Our size is the size * of the LVB and nothing else. */ static int dlmfs_file_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { int error; struct inode *inode = d_inode(dentry); attr->ia_valid &= ~ATTR_SIZE; error = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (error) return error; setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait) { __poll_t event = 0; struct inode *inode = file_inode(file); struct dlmfs_inode_private *ip = DLMFS_I(inode); poll_wait(file, &ip->ip_lockres.l_event, wait); spin_lock(&ip->ip_lockres.l_lock); if (ip->ip_lockres.l_flags & USER_LOCK_BLOCKED) event = EPOLLIN | EPOLLRDNORM; spin_unlock(&ip->ip_lockres.l_lock); return event; } static ssize_t dlmfs_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char lvb[DLM_LVB_LEN]; if (!user_dlm_read_lvb(file_inode(file), lvb)) return 0; return simple_read_from_buffer(buf, count, ppos, lvb, sizeof(lvb)); } static ssize_t dlmfs_file_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { char lvb_buf[DLM_LVB_LEN]; int bytes_left; struct inode *inode = file_inode(filp); mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", inode->i_ino, count, *ppos); if (*ppos >= DLM_LVB_LEN) return -ENOSPC; /* don't write past the lvb */ if (count > DLM_LVB_LEN - *ppos) count = DLM_LVB_LEN - *ppos; if (!count) return 0; bytes_left = copy_from_user(lvb_buf, buf, count); count -= bytes_left; if (count) user_dlm_write_lvb(inode, lvb_buf, count); *ppos = *ppos + count; mlog(0, "wrote %zu bytes\n", count); return count; } static void dlmfs_init_once(void *foo) { struct dlmfs_inode_private *ip = (struct dlmfs_inode_private *) foo; ip->ip_conn = NULL; ip->ip_parent = NULL; inode_init_once(&ip->ip_vfs_inode); } static struct inode *dlmfs_alloc_inode(struct super_block *sb) { struct dlmfs_inode_private *ip; ip = alloc_inode_sb(sb, dlmfs_inode_cache, GFP_NOFS); if (!ip) return NULL; return &ip->ip_vfs_inode; } static void dlmfs_free_inode(struct inode *inode) { kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode)); } static void dlmfs_evict_inode(struct inode *inode) { int status; struct dlmfs_inode_private *ip; struct user_lock_res *lockres; int teardown; clear_inode(inode); mlog(0, "inode %lu\n", inode->i_ino); ip = DLMFS_I(inode); lockres = &ip->ip_lockres; if (S_ISREG(inode->i_mode)) { spin_lock(&lockres->l_lock); teardown = !!(lockres->l_flags & USER_LOCK_IN_TEARDOWN); spin_unlock(&lockres->l_lock); if (!teardown) { status = user_dlm_destroy_lock(lockres); if (status < 0) mlog_errno(status); } iput(ip->ip_parent); goto clear_fields; } mlog(0, "we're a directory, ip->ip_conn = 0x%p\n", ip->ip_conn); /* we must be a directory. If required, lets unregister the * dlm context now. */ if (ip->ip_conn) user_dlm_unregister(ip->ip_conn); clear_fields: ip->ip_parent = NULL; ip->ip_conn = NULL; } static struct inode *dlmfs_get_root_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); umode_t mode = S_IFDIR | 0755; if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(&nop_mnt_idmap, inode, NULL, mode); simple_inode_init_ts(inode); inc_nlink(inode); inode->i_fop = &simple_dir_operations; inode->i_op = &dlmfs_root_inode_operations; } return inode; } static struct inode *dlmfs_get_inode(struct inode *parent, struct dentry *dentry, umode_t mode) { struct super_block *sb = parent->i_sb; struct inode * inode = new_inode(sb); struct dlmfs_inode_private *ip; if (!inode) return NULL; inode->i_ino = get_next_ino(); inode_init_owner(&nop_mnt_idmap, inode, parent, mode); simple_inode_init_ts(inode); ip = DLMFS_I(inode); ip->ip_conn = DLMFS_I(parent)->ip_conn; switch (mode & S_IFMT) { default: /* for now we don't support anything other than * directories and regular files. */ BUG(); break; case S_IFREG: inode->i_op = &dlmfs_file_inode_operations; inode->i_fop = &dlmfs_file_operations; i_size_write(inode, DLM_LVB_LEN); user_dlm_lock_res_init(&ip->ip_lockres, dentry); /* released at clear_inode time, this insures that we * get to drop the dlm reference on each lock *before* * we call the unregister code for releasing parent * directories. */ ip->ip_parent = igrab(parent); BUG_ON(!ip->ip_parent); break; case S_IFDIR: inode->i_op = &dlmfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == * 2 (for "." entry) */ inc_nlink(inode); break; } return inode; } /* * File creation. Allocate an inode, and we're done.. */ /* SMP-safe */ static struct dentry *dlmfs_mkdir(struct mnt_idmap * idmap, struct inode * dir, struct dentry * dentry, umode_t mode) { int status; struct inode *inode = NULL; const struct qstr *domain = &dentry->d_name; struct dlmfs_inode_private *ip; struct ocfs2_cluster_connection *conn; mlog(0, "mkdir %.*s\n", domain->len, domain->name); /* verify that we have a proper domain */ if (domain->len >= GROUP_NAME_MAX) { status = -EINVAL; mlog(ML_ERROR, "invalid domain name for directory.\n"); goto bail; } inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR); if (!inode) { status = -ENOMEM; mlog_errno(status); goto bail; } ip = DLMFS_I(inode); conn = user_dlm_register(domain); if (IS_ERR(conn)) { status = PTR_ERR(conn); mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", status, domain->len, domain->name); goto bail; } ip->ip_conn = conn; inc_nlink(dir); d_make_persistent(dentry, inode); status = 0; bail: if (status < 0) iput(inode); return ERR_PTR(status); } static int dlmfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { int status = 0; struct inode *inode; const struct qstr *name = &dentry->d_name; mlog(0, "create %.*s\n", name->len, name->name); /* verify name is valid and doesn't contain any dlm reserved * characters */ if (name->len >= USER_DLM_LOCK_ID_MAX_LEN || name->name[0] == '$') { status = -EINVAL; mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len, name->name); goto bail; } inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG); if (!inode) { status = -ENOMEM; mlog_errno(status); goto bail; } d_make_persistent(dentry, inode); bail: return status; } static int dlmfs_unlink(struct inode *dir, struct dentry *dentry) { int status; struct inode *inode = d_inode(dentry); mlog(0, "unlink inode %lu\n", inode->i_ino); /* if there are no current holders, or none that are waiting * to acquire a lock, this basically destroys our lockres. */ status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres); if (status < 0) { mlog(ML_ERROR, "unlink %pd, error %d from destroy\n", dentry, status); goto bail; } status = simple_unlink(dir, dentry); bail: return status; } static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = DLMFS_MAGIC; sb->s_op = &dlmfs_ops; sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); if (!sb->s_root) return -ENOMEM; return 0; } static const struct file_operations dlmfs_file_operations = { .open = dlmfs_file_open, .release = dlmfs_file_release, .poll = dlmfs_file_poll, .read = dlmfs_file_read, .write = dlmfs_file_write, .llseek = default_llseek, }; static const struct inode_operations dlmfs_dir_inode_operations = { .create = dlmfs_create, .lookup = simple_lookup, .unlink = dlmfs_unlink, }; /* this way we can restrict mkdir to only the toplevel of the fs. */ static const struct inode_operations dlmfs_root_inode_operations = { .lookup = simple_lookup, .mkdir = dlmfs_mkdir, .rmdir = simple_rmdir, }; static const struct super_operations dlmfs_ops = { .statfs = simple_statfs, .alloc_inode = dlmfs_alloc_inode, .free_inode = dlmfs_free_inode, .evict_inode = dlmfs_evict_inode, .drop_inode = inode_just_drop, }; static const struct inode_operations dlmfs_file_inode_operations = { .getattr = simple_getattr, .setattr = dlmfs_file_setattr, }; static int dlmfs_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, dlmfs_fill_super); } static const struct fs_context_operations dlmfs_context_ops = { .get_tree = dlmfs_get_tree, }; static int dlmfs_init_fs_context(struct fs_context *fc) { fc->ops = &dlmfs_context_ops; return 0; } static struct file_system_type dlmfs_fs_type = { .owner = THIS_MODULE, .name = "ocfs2_dlmfs", .kill_sb = kill_anon_super, .init_fs_context = dlmfs_init_fs_context, }; MODULE_ALIAS_FS("ocfs2_dlmfs"); static int __init init_dlmfs_fs(void) { int status; int cleanup_inode = 0, cleanup_worker = 0; dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", sizeof(struct dlmfs_inode_private), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), dlmfs_init_once); if (!dlmfs_inode_cache) { status = -ENOMEM; goto bail; } cleanup_inode = 1; user_dlm_worker = alloc_workqueue("user_dlm", WQ_MEM_RECLAIM | WQ_PERCPU, 0); if (!user_dlm_worker) { status = -ENOMEM; goto bail; } cleanup_worker = 1; user_dlm_set_locking_protocol(); status = register_filesystem(&dlmfs_fs_type); bail: if (status) { if (cleanup_inode) kmem_cache_destroy(dlmfs_inode_cache); if (cleanup_worker) destroy_workqueue(user_dlm_worker); } else printk("OCFS2 User DLM kernel interface loaded\n"); return status; } static void __exit exit_dlmfs_fs(void) { unregister_filesystem(&dlmfs_fs_type); destroy_workqueue(user_dlm_worker); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(dlmfs_inode_cache); } MODULE_AUTHOR("Oracle"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("OCFS2 DLM-Filesystem"); module_init(init_dlmfs_fs) module_exit(exit_dlmfs_fs)
304 305 303 1 318 54 265 55 21 31 4 315 28 306 23 1 22 8 1 7 7 7 8 7 1 4 4 4 280 10 1 4 4 3 2 2 2 1 1 1 5 6 9 21 1 17 16 16 16 3 19 19 4 3 4 3 3 3 1 1 1 3 21 17 4 16 1 1 13 3 3 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 // SPDX-License-Identifier: GPL-2.0-only /* * irq.c: API for in kernel interrupt controller * Copyright (c) 2007, Intel Corporation. * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/export.h> #include <linux/kvm_host.h> #include <linux/kvm_irqfd.h> #include "hyperv.h" #include "ioapic.h" #include "irq.h" #include "trace.h" #include "x86.h" #include "xen.h" /* * check if there are pending timer events * to be processed. */ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { int r = 0; if (lapic_in_kernel(vcpu)) r = apic_has_pending_timer(vcpu); if (kvm_xen_timer_enabled(vcpu)) r += kvm_xen_has_pending_timer(vcpu); return r; } /* * check if there is a pending userspace external interrupt */ static int pending_userspace_extint(struct kvm_vcpu *v) { return v->arch.pending_external_vector != -1; } static int get_userspace_extint(struct kvm_vcpu *vcpu) { int vector = vcpu->arch.pending_external_vector; vcpu->arch.pending_external_vector = -1; return vector; } /* * check if there is pending interrupt from * non-APIC source without intack. */ int kvm_cpu_has_extint(struct kvm_vcpu *v) { /* * FIXME: interrupt.injected represents an interrupt whose * side-effects have already been applied (e.g. bit from IRR * already moved to ISR). Therefore, it is incorrect to rely * on interrupt.injected to know if there is a pending * interrupt in the user-mode LAPIC. * This leads to nVMX/nSVM not be able to distinguish * if it should exit from L2 to L1 on EXTERNAL_INTERRUPT on * pending interrupt or should re-inject an injected * interrupt. */ if (!lapic_in_kernel(v)) return v->arch.interrupt.injected; if (kvm_xen_has_interrupt(v)) return 1; if (!kvm_apic_accept_pic_intr(v)) return 0; #ifdef CONFIG_KVM_IOAPIC if (pic_in_kernel(v->kvm)) return v->kvm->arch.vpic->output; #endif WARN_ON_ONCE(!irqchip_split(v->kvm)); return pending_userspace_extint(v); } /* * check if there is injectable interrupt: * when virtual interrupt delivery enabled, * interrupt from apic will handled by hardware, * we don't need to check it here. */ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) { if (kvm_cpu_has_extint(v)) return 1; if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v)) return 0; return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_injectable_intr); /* * check if there is pending interrupt without * intack. */ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) { if (kvm_cpu_has_extint(v)) return 1; if (lapic_in_kernel(v) && v->arch.apic->guest_apic_protected) return kvm_x86_call(protected_apic_has_interrupt)(v); return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_interrupt); /* * Read pending interrupt(from non-APIC source) * vector and intack. */ int kvm_cpu_get_extint(struct kvm_vcpu *v) { if (!kvm_cpu_has_extint(v)) { WARN_ON(!lapic_in_kernel(v)); return -1; } if (!lapic_in_kernel(v)) return v->arch.interrupt.nr; #ifdef CONFIG_KVM_XEN if (kvm_xen_has_interrupt(v)) return v->kvm->arch.xen.upcall_vector; #endif #ifdef CONFIG_KVM_IOAPIC if (pic_in_kernel(v->kvm)) return kvm_pic_read_irq(v->kvm); /* PIC */ #endif WARN_ON_ONCE(!irqchip_split(v->kvm)); return get_userspace_extint(v); } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_get_extint); /* * Read pending interrupt vector and intack. */ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { int vector = kvm_cpu_get_extint(v); if (vector != -1) return vector; /* PIC */ vector = kvm_apic_has_interrupt(v); /* APIC */ if (vector != -1) kvm_apic_ack_interrupt(v, vector); return vector; } void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { if (lapic_in_kernel(vcpu)) kvm_inject_apic_timer_irqs(vcpu); if (kvm_xen_timer_enabled(vcpu)) kvm_xen_inject_timer_irqs(vcpu); } void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); #ifdef CONFIG_KVM_IOAPIC __kvm_migrate_pit_timer(vcpu); #endif kvm_x86_call(migrate_timers)(vcpu); } bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args) { bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE; return resample ? irqchip_full(kvm) : irqchip_in_kernel(kvm); } bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) { return irqchip_in_kernel(kvm); } static void kvm_msi_to_lapic_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, struct kvm_lapic_irq *irq) { struct msi_msg msg = { .address_lo = e->msi.address_lo, .address_hi = e->msi.address_hi, .data = e->msi.data }; trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? (u64)msg.address_hi << 32 : 0), msg.data); irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); irq->vector = msg.arch_data.vector; irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical); irq->trig_mode = msg.arch_data.is_level; irq->delivery_mode = msg.arch_data.delivery_mode << 8; irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint; irq->level = 1; irq->shorthand = APIC_DEST_NOSHORT; } static inline bool kvm_msi_route_invalid(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e) { return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); } int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_lapic_irq irq; if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; if (!level) return -1; kvm_msi_to_lapic_irq(kvm, e, &irq); return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); } int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_lapic_irq irq; int r; switch (e->type) { #ifdef CONFIG_KVM_HYPERV case KVM_IRQ_ROUTING_HV_SINT: return kvm_hv_synic_set_irq(e, kvm, irq_source_id, level, line_status); #endif case KVM_IRQ_ROUTING_MSI: if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; kvm_msi_to_lapic_irq(kvm, e, &irq); if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) return r; break; #ifdef CONFIG_KVM_XEN case KVM_IRQ_ROUTING_XEN_EVTCHN: if (!level) return -1; return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm); #endif default: break; } return -EWOULDBLOCK; } int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; } bool kvm_arch_can_set_irq_routing(struct kvm *kvm) { return irqchip_in_kernel(kvm); } int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { /* We can't check irqchip_in_kernel() here as some callers are * currently initializing the irqchip. Other callers should therefore * check kvm_arch_can_set_irq_routing() before calling this function. */ switch (ue->type) { #ifdef CONFIG_KVM_IOAPIC case KVM_IRQ_ROUTING_IRQCHIP: if (irqchip_split(kvm)) return -EINVAL; e->irqchip.pin = ue->u.irqchip.pin; switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE: e->irqchip.pin += PIC_NUM_PINS / 2; fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL; e->set = kvm_pic_set_irq; break; case KVM_IRQCHIP_IOAPIC: if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) return -EINVAL; e->set = kvm_ioapic_set_irq; break; default: return -EINVAL; } e->irqchip.irqchip = ue->u.irqchip.irqchip; break; #endif case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; e->msi.address_lo = ue->u.msi.address_lo; e->msi.address_hi = ue->u.msi.address_hi; e->msi.data = ue->u.msi.data; if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; break; #ifdef CONFIG_KVM_HYPERV case KVM_IRQ_ROUTING_HV_SINT: e->set = kvm_hv_synic_set_irq; e->hv_sint.vcpu = ue->u.hv_sint.vcpu; e->hv_sint.sint = ue->u.hv_sint.sint; break; #endif #ifdef CONFIG_KVM_XEN case KVM_IRQ_ROUTING_XEN_EVTCHN: return kvm_xen_setup_evtchn(kvm, e, ue); #endif default: return -EINVAL; } return 0; } void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode, u8 vector, unsigned long *ioapic_handled_vectors) { /* * Intercept EOI if the vCPU is the target of the new IRQ routing, or * the vCPU has a pending IRQ from the old routing, i.e. if the vCPU * may receive a level-triggered IRQ in the future, or already received * level-triggered IRQ. The EOI needs to be intercepted and forwarded * to I/O APIC emulation so that the IRQ can be de-asserted. */ if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, dest_id, dest_mode)) { __set_bit(vector, ioapic_handled_vectors); } else if (kvm_apic_pending_eoi(vcpu, vector)) { __set_bit(vector, ioapic_handled_vectors); /* * Track the highest pending EOI for which the vCPU is NOT the * target in the new routing. Only the EOI for the IRQ that is * in-flight (for the old routing) needs to be intercepted, any * future IRQs that arrive on this vCPU will be coincidental to * the level-triggered routing and don't need to be intercepted. */ if ((int)vector > vcpu->arch.highest_stale_pending_ioapic_eoi) vcpu->arch.highest_stale_pending_ioapic_eoi = vector; } } void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) { struct kvm *kvm = vcpu->kvm; struct kvm_kernel_irq_routing_entry *entry; struct kvm_irq_routing_table *table; u32 i, nr_ioapic_pins; int idx; idx = srcu_read_lock(&kvm->irq_srcu); table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); nr_ioapic_pins = min_t(u32, table->nr_rt_entries, kvm->arch.nr_reserved_ioapic_pins); for (i = 0; i < nr_ioapic_pins; ++i) { hlist_for_each_entry(entry, &table->map[i], link) { struct kvm_lapic_irq irq; if (entry->type != KVM_IRQ_ROUTING_MSI) continue; kvm_msi_to_lapic_irq(vcpu->kvm, entry, &irq); if (!irq.trig_mode) continue; kvm_scan_ioapic_irq(vcpu, irq.dest_id, irq.dest_mode, irq.vector, ioapic_handled_vectors); } } srcu_read_unlock(&kvm->irq_srcu, idx); } void kvm_arch_irq_routing_update(struct kvm *kvm) { #ifdef CONFIG_KVM_HYPERV kvm_hv_irq_routing_update(kvm); #endif if (irqchip_split(kvm)) kvm_make_scan_ioapic_request(kvm); } static int kvm_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm_kernel_irq_routing_entry *entry) { unsigned int host_irq = irqfd->producer->irq; struct kvm *kvm = irqfd->kvm; struct kvm_vcpu *vcpu = NULL; struct kvm_lapic_irq irq; int r; if (WARN_ON_ONCE(!irqchip_in_kernel(kvm) || !kvm_arch_has_irq_bypass())) return -EINVAL; if (entry && entry->type == KVM_IRQ_ROUTING_MSI) { kvm_msi_to_lapic_irq(kvm, entry, &irq); /* * Force remapped mode if hardware doesn't support posting the * virtual interrupt to a vCPU. Only IRQs are postable (NMIs, * SMIs, etc. are not), and neither AMD nor Intel IOMMUs support * posting multicast/broadcast IRQs. If the interrupt can't be * posted, the device MSI needs to be routed to the host so that * the guest's desired interrupt can be synthesized by KVM. * * This means that KVM can only post lowest-priority interrupts * if they have a single CPU as the destination, e.g. only if * the guest has affined the interrupt to a single vCPU. */ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || !kvm_irq_is_postable(&irq)) vcpu = NULL; } if (!irqfd->irq_bypass_vcpu && !vcpu) return 0; r = kvm_x86_call(pi_update_irte)(irqfd, irqfd->kvm, host_irq, irqfd->gsi, vcpu, irq.vector); if (r) { WARN_ON_ONCE(irqfd->irq_bypass_vcpu && !vcpu); irqfd->irq_bypass_vcpu = NULL; return r; } irqfd->irq_bypass_vcpu = vcpu; trace_kvm_pi_irte_update(host_irq, vcpu, irqfd->gsi, irq.vector, !!vcpu); return 0; } int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; int ret = 0; spin_lock_irq(&kvm->irqfds.lock); irqfd->producer = prod; if (!kvm->arch.nr_possible_bypass_irqs++) kvm_x86_call(pi_start_bypass)(kvm); if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { ret = kvm_pi_update_irte(irqfd, &irqfd->irq_entry); if (ret) kvm->arch.nr_possible_bypass_irqs--; } spin_unlock_irq(&kvm->irqfds.lock); return ret; } void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, struct irq_bypass_producer *prod) { struct kvm_kernel_irqfd *irqfd = container_of(cons, struct kvm_kernel_irqfd, consumer); struct kvm *kvm = irqfd->kvm; int ret; WARN_ON(irqfd->producer != prod); /* * If the producer of an IRQ that is currently being posted to a vCPU * is unregistered, change the associated IRTE back to remapped mode as * the IRQ has been released (or repurposed) by the device driver, i.e. * KVM must relinquish control of the IRTE. */ spin_lock_irq(&kvm->irqfds.lock); if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { ret = kvm_pi_update_irte(irqfd, NULL); if (ret) pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n", irqfd->consumer.eventfd, ret); } irqfd->producer = NULL; kvm->arch.nr_possible_bypass_irqs--; spin_unlock_irq(&kvm->irqfds.lock); } void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, struct kvm_kernel_irq_routing_entry *old, struct kvm_kernel_irq_routing_entry *new) { if (new->type != KVM_IRQ_ROUTING_MSI && old->type != KVM_IRQ_ROUTING_MSI) return; if (old->type == KVM_IRQ_ROUTING_MSI && new->type == KVM_IRQ_ROUTING_MSI && !memcmp(&old->msi, &new->msi, sizeof(new->msi))) return; kvm_pi_update_irte(irqfd, new); } #ifdef CONFIG_KVM_IOAPIC #define IOAPIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) #define PIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } #define ROUTING_ENTRY2(irq) \ IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) static const struct kvm_irq_routing_entry default_routing[] = { ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), }; int kvm_setup_default_ioapic_and_pic_routing(struct kvm *kvm) { return kvm_set_irq_routing(kvm, default_routing, ARRAY_SIZE(default_routing), 0); } int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { struct kvm_pic *pic = kvm->arch.vpic; int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: memcpy(&chip->chip.pic, &pic->pics[0], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_PIC_SLAVE: memcpy(&chip->chip.pic, &pic->pics[1], sizeof(struct kvm_pic_state)); break; case KVM_IRQCHIP_IOAPIC: kvm_get_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } return r; } int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) { struct kvm_pic *pic = kvm->arch.vpic; int r; r = 0; switch (chip->chip_id) { case KVM_IRQCHIP_PIC_MASTER: spin_lock(&pic->lock); memcpy(&pic->pics[0], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic->lock); break; case KVM_IRQCHIP_PIC_SLAVE: spin_lock(&pic->lock); memcpy(&pic->pics[1], &chip->chip.pic, sizeof(struct kvm_pic_state)); spin_unlock(&pic->lock); break; case KVM_IRQCHIP_IOAPIC: kvm_set_ioapic(kvm, &chip->chip.ioapic); break; default: r = -EINVAL; break; } kvm_pic_update_irq(pic); return r; } #endif
1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 // SPDX-License-Identifier: MIT /* * Copyright 2020 Noralf Trønnes */ #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/lz4.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/string_helpers.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <drm/clients/drm_client_setup.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fbdev_shmem.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/gud.h> #include "gud_internal.h" /* Only used internally */ static const struct drm_format_info gud_drm_format_r1 = { .format = GUD_DRM_FORMAT_R1, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 8, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static const struct drm_format_info gud_drm_format_xrgb1111 = { .format = GUD_DRM_FORMAT_XRGB1111, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static int gud_usb_control_msg(struct usb_interface *intf, bool in, u8 request, u16 value, void *buf, size_t len) { u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE; u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber; struct usb_device *usb = interface_to_usbdev(intf); unsigned int pipe; if (len && !buf) return -EINVAL; if (in) { pipe = usb_rcvctrlpipe(usb, 0); requesttype |= USB_DIR_IN; } else { pipe = usb_sndctrlpipe(usb, 0); requesttype |= USB_DIR_OUT; } return usb_control_msg(usb, pipe, request, requesttype, value, ifnum, buf, len, USB_CTRL_GET_TIMEOUT); } static int gud_get_display_descriptor(struct usb_interface *intf, struct gud_display_descriptor_req *desc) { void *buf; int ret; buf = kmalloc(sizeof(*desc), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc)); memcpy(desc, buf, sizeof(*desc)); kfree(buf); if (ret < 0) return ret; if (ret != sizeof(*desc)) return -EIO; if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC)) return -ENODATA; DRM_DEV_DEBUG_DRIVER(&intf->dev, "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n", desc->version, le32_to_cpu(desc->flags), desc->compression, le32_to_cpu(desc->max_buffer_size)); if (!desc->version || !desc->max_width || !desc->max_height || le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) || le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height)) return -EINVAL; return 0; } static int gud_status_to_errno(u8 status) { switch (status) { case GUD_STATUS_OK: return 0; case GUD_STATUS_BUSY: return -EBUSY; case GUD_STATUS_REQUEST_NOT_SUPPORTED: return -EOPNOTSUPP; case GUD_STATUS_PROTOCOL_ERROR: return -EPROTO; case GUD_STATUS_INVALID_PARAMETER: return -EINVAL; case GUD_STATUS_ERROR: return -EREMOTEIO; default: return -EREMOTEIO; } } static int gud_usb_get_status(struct usb_interface *intf) { int ret, status = -EIO; u8 *buf; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf)); if (ret == sizeof(*buf)) status = gud_status_to_errno(*buf); kfree(buf); if (ret < 0) return ret; return status; } static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index, void *buf, size_t len) { struct usb_interface *intf = to_usb_interface(gdrm->drm.dev); int idx, ret; drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n", in ? "get" : "set", request, index, len); if (!drm_dev_enter(&gdrm->drm, &idx)) return -ENODEV; mutex_lock(&gdrm->ctrl_lock); ret = gud_usb_control_msg(intf, in, request, index, buf, len); if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) { int status; status = gud_usb_get_status(intf); if (status < 0) { ret = status; } else if (ret < 0) { dev_err_once(gdrm->drm.dev, "Unexpected status OK for failed transfer\n"); ret = -EPIPE; } } if (ret < 0) { drm_dbg(&gdrm->drm, "ret=%d\n", ret); gdrm->stats_num_errors++; } mutex_unlock(&gdrm->ctrl_lock); drm_dev_exit(idx); return ret; } /* * @buf cannot be allocated on the stack. * Returns number of bytes received or negative error code on failure. */ int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len) { return gud_usb_transfer(gdrm, true, request, index, buf, max_len); } /* * @buf can be allocated on the stack or NULL. * Returns zero on success or negative error code on failure. */ int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len) { void *trbuf = NULL; int ret; if (buf && len) { trbuf = kmemdup(buf, len, GFP_KERNEL); if (!trbuf) return -ENOMEM; } ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len); kfree(trbuf); if (ret < 0) return ret; return ret != len ? -EIO : 0; } /* * @val can be allocated on the stack. * Returns zero on success or negative error code on failure. */ int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val) { u8 *buf; int ret; buf = kmalloc(sizeof(*val), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val)); *val = *buf; kfree(buf); if (ret < 0) return ret; return ret != sizeof(*val) ? -EIO : 0; } /* Returns zero on success or negative error code on failure. */ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val) { return gud_usb_set(gdrm, request, 0, &val, sizeof(val)); } static int gud_plane_add_properties(struct gud_device *gdrm) { struct gud_property_req *properties; unsigned int i, num_properties; int ret; properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL); if (!properties) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0, properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties)); if (ret <= 0) goto out; if (ret % sizeof(*properties)) { ret = -EIO; goto out; } num_properties = ret / sizeof(*properties); ret = 0; gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties), GFP_KERNEL); if (!gdrm->properties) { ret = -ENOMEM; goto out; } for (i = 0; i < num_properties; i++) { u16 prop = le16_to_cpu(properties[i].prop); u64 val = le64_to_cpu(properties[i].val); switch (prop) { case GUD_PROPERTY_ROTATION: /* * DRM UAPI matches the protocol so use the value directly, * but mask out any additions on future devices. */ val &= GUD_ROTATION_MASK; ret = drm_plane_create_rotation_property(&gdrm->plane, DRM_MODE_ROTATE_0, val); break; default: /* New ones might show up in future devices, skip those we don't know. */ drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop); continue; } if (ret) goto out; gdrm->properties[gdrm->num_properties++] = prop; } out: kfree(properties); return ret; } static int gud_stats_debugfs(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; struct gud_device *gdrm = to_gud_device(entry->dev); char buf[10]; string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf)); seq_printf(m, "Max buffer size: %s\n", buf); seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors); seq_puts(m, "Compression: "); if (gdrm->compression & GUD_COMPRESSION_LZ4) seq_puts(m, " lz4"); if (!gdrm->compression) seq_puts(m, " none"); seq_puts(m, "\n"); if (gdrm->compression) { u64 remainder; u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length, &remainder); u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length); seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac); } return 0; } static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = { .atomic_check = drm_crtc_helper_atomic_check }; static const struct drm_crtc_funcs gud_crtc_funcs = { .reset = drm_atomic_helper_crtc_reset, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; static const struct drm_plane_helper_funcs gud_plane_helper_funcs = { DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, .atomic_check = gud_plane_atomic_check, .atomic_update = gud_plane_atomic_update, }; static const struct drm_plane_funcs gud_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, DRM_GEM_SHADOW_PLANE_FUNCS, }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const u64 gud_plane_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; DEFINE_DRM_GEM_FOPS(gud_fops); static const struct drm_driver gud_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = "gud", .desc = "Generic USB Display", .major = 1, .minor = 0, }; static int gud_alloc_bulk_buffer(struct gud_device *gdrm) { unsigned int i, num_pages; struct page **pages; void *ptr; int ret; gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len); if (!gdrm->bulk_buf) return -ENOMEM; num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE); pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -ENOMEM; for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE) pages[i] = vmalloc_to_page(ptr); ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages, 0, gdrm->bulk_len, GFP_KERNEL); kfree(pages); return ret; } static void gud_free_buffers_and_mutex(void *data) { struct gud_device *gdrm = data; vfree(gdrm->compress_buf); gdrm->compress_buf = NULL; sg_free_table(&gdrm->bulk_sgt); vfree(gdrm->bulk_buf); gdrm->bulk_buf = NULL; mutex_destroy(&gdrm->ctrl_lock); } static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct drm_format_info *xrgb8888_emulation_format = NULL; bool rgb565_supported = false, xrgb8888_supported = false; unsigned int num_formats_dev, num_formats = 0; struct usb_endpoint_descriptor *bulk_out; struct gud_display_descriptor_req desc; struct device *dev = &intf->dev; size_t max_buffer_size = 0; struct gud_device *gdrm; struct drm_device *drm; struct device *dma_dev; u8 *formats_dev; u32 *formats; int ret, i; ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out); if (ret) return ret; ret = gud_get_display_descriptor(intf, &desc); if (ret) { DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret); return -ENODEV; } if (desc.version > 1) { dev_err(dev, "Protocol version %u is not supported\n", desc.version); return -ENODEV; } gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm); if (IS_ERR(gdrm)) return PTR_ERR(gdrm); drm = &gdrm->drm; gdrm->flags = le32_to_cpu(desc.flags); gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4; if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression) return -EINVAL; mutex_init(&gdrm->ctrl_lock); mutex_init(&gdrm->damage_lock); INIT_WORK(&gdrm->work, gud_flush_work); gud_clear_damage(gdrm); ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm); if (ret) return ret; usb_set_intfdata(intf, gdrm); dma_dev = usb_intf_get_dma_device(intf); if (dma_dev) { drm_dev_set_dma_dev(drm, dma_dev); put_device(dma_dev); } else { dev_warn(dev, "buffer sharing not supported"); /* not an error */ } /* Mode config init */ ret = drmm_mode_config_init(drm); if (ret) return ret; drm->mode_config.min_width = le32_to_cpu(desc.min_width); drm->mode_config.max_width = le32_to_cpu(desc.max_width); drm->mode_config.min_height = le32_to_cpu(desc.min_height); drm->mode_config.max_height = le32_to_cpu(desc.max_height); drm->mode_config.funcs = &gud_mode_config_funcs; /* Format init */ formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL); /* Add room for emulated XRGB8888 */ formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL); if (!formats_dev || !formats) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM); if (ret < 0) return ret; num_formats_dev = ret; for (i = 0; i < num_formats_dev; i++) { const struct drm_format_info *info; size_t fmt_buf_size; u32 format; format = gud_to_fourcc(formats_dev[i]); if (!format) { drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]); continue; } if (format == GUD_DRM_FORMAT_R1) info = &gud_drm_format_r1; else if (format == GUD_DRM_FORMAT_XRGB1111) info = &gud_drm_format_xrgb1111; else info = drm_format_info(format); switch (format) { case GUD_DRM_FORMAT_R1: fallthrough; case DRM_FORMAT_R8: fallthrough; case GUD_DRM_FORMAT_XRGB1111: fallthrough; case DRM_FORMAT_RGB332: fallthrough; case DRM_FORMAT_RGB888: if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_RGB565: rgb565_supported = true; if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_XRGB8888: xrgb8888_supported = true; break; } fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) * drm->mode_config.max_height; max_buffer_size = max(max_buffer_size, fmt_buf_size); if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111) continue; /* Internal not for userspace */ formats[num_formats++] = format; } if (!num_formats && !xrgb8888_emulation_format) { dev_err(dev, "No supported pixel formats found\n"); return -EINVAL; } /* Prefer speed over color depth */ if (rgb565_supported) drm->mode_config.preferred_depth = 16; if (!xrgb8888_supported && xrgb8888_emulation_format) { gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format; formats[num_formats++] = DRM_FORMAT_XRGB8888; } if (desc.max_buffer_size) max_buffer_size = le32_to_cpu(desc.max_buffer_size); /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */ if (max_buffer_size > SZ_64M) max_buffer_size = SZ_64M; gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out)); gdrm->bulk_len = max_buffer_size; ret = gud_alloc_bulk_buffer(gdrm); if (ret) return ret; if (gdrm->compression & GUD_COMPRESSION_LZ4) { gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL); if (!gdrm->lz4_comp_mem) return -ENOMEM; gdrm->compress_buf = vmalloc(gdrm->bulk_len); if (!gdrm->compress_buf) return -ENOMEM; } /* Pipeline init */ ret = drm_universal_plane_init(drm, &gdrm->plane, 0, &gud_plane_funcs, formats, num_formats, gud_plane_modifiers, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) return ret; drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs); drm_plane_enable_fb_damage_clips(&gdrm->plane); ret = gud_plane_add_properties(gdrm); if (ret) { dev_err(dev, "Failed to add properties (error=%d)\n", ret); return ret; } ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL, &gud_crtc_funcs, NULL); if (ret) return ret; drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs); ret = gud_get_connectors(gdrm); if (ret) { dev_err(dev, "Failed to get connectors (error=%d)\n", ret); return ret; } drm_mode_config_reset(drm); drm_kms_helper_poll_init(drm); drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); ret = drm_dev_register(drm, 0); if (ret) return ret; devm_kfree(dev, formats); devm_kfree(dev, formats_dev); drm_client_setup(drm, NULL); return 0; } static void gud_disconnect(struct usb_interface *interface) { struct gud_device *gdrm = usb_get_intfdata(interface); struct drm_device *drm = &gdrm->drm; drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); } static int gud_suspend(struct usb_interface *intf, pm_message_t message) { struct gud_device *gdrm = usb_get_intfdata(intf); return drm_mode_config_helper_suspend(&gdrm->drm); } static int gud_resume(struct usb_interface *intf) { struct gud_device *gdrm = usb_get_intfdata(intf); drm_mode_config_helper_resume(&gdrm->drm); return 0; } static const struct usb_device_id gud_id_table[] = { { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) }, { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) }, { } }; MODULE_DEVICE_TABLE(usb, gud_id_table); static struct usb_driver gud_usb_driver = { .name = "gud", .probe = gud_probe, .disconnect = gud_disconnect, .id_table = gud_id_table, .suspend = gud_suspend, .resume = gud_resume, .reset_resume = gud_resume, }; module_usb_driver(gud_usb_driver); MODULE_AUTHOR("Noralf Trønnes"); MODULE_DESCRIPTION("GUD USB Display driver"); MODULE_LICENSE("Dual MIT/GPL");
273 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 #ifndef IO_URING_MEMMAP_H #define IO_URING_MEMMAP_H #define IORING_MAP_OFF_PARAM_REGION 0x20000000ULL #define IORING_MAP_OFF_ZCRX_REGION 0x30000000ULL #define IORING_OFF_ZCRX_SHIFT 16 struct page **io_pin_pages(unsigned long uaddr, unsigned long len, int *npages); #ifndef CONFIG_MMU unsigned int io_uring_nommu_mmap_capabilities(struct file *file); #endif unsigned long io_uring_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); int io_uring_mmap(struct file *file, struct vm_area_struct *vma); void io_free_region(struct user_struct *user, struct io_mapped_region *mr); int io_create_region(struct io_ring_ctx *ctx, struct io_mapped_region *mr, struct io_uring_region_desc *reg, unsigned long mmap_offset); static inline void *io_region_get_ptr(struct io_mapped_region *mr) { return mr->ptr; } static inline bool io_region_is_set(struct io_mapped_region *mr) { return !!mr->nr_pages; } static inline void io_region_publish(struct io_ring_ctx *ctx, struct io_mapped_region *src_region, struct io_mapped_region *dst_region) { /* * Once published mmap can find it without holding only the ->mmap_lock * and not ->uring_lock. */ guard(mutex)(&ctx->mmap_lock); *dst_region = *src_region; } static inline size_t io_region_size(struct io_mapped_region *mr) { return (size_t) mr->nr_pages << PAGE_SHIFT; } #endif
3 3 3 3 4 4 4 1 1 2 2 2 2 2 2 2 6 6 6 16 16 16 17 17 17 16 9 2 7 1 1 5 15 15 3 12 12 1 6 6 9 3 3 2 1 3 1 2 2 1 1 2 2 1 1 11 1 1 9 8 1 6 6 4 4 6 1 5 1 1 1 2 1 1 17 17 1 6 1 4 1 1 1 4 1 1 4 1 1 1 1 8 8 1 4 1 1 1 21 21 4 17 7 7 1 5 1 5 5 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 /* RFCOMM implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* * RFCOMM sockets. */ #include <linux/compat.h> #include <linux/export.h> #include <linux/debugfs.h> #include <linux/sched/signal.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/rfcomm.h> static const struct proto_ops rfcomm_sock_ops; static struct bt_sock_list rfcomm_sk_list = { .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) }; static void rfcomm_sock_close(struct sock *sk); static void rfcomm_sock_kill(struct sock *sk); /* ---- DLC callbacks ---- * * called under rfcomm_dlc_lock() */ static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) { struct sock *sk = d->owner; if (!sk) return; atomic_add(skb->len, &sk->sk_rmem_alloc); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) rfcomm_dlc_throttle(d); } static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) { struct sock *sk = d->owner, *parent; if (!sk) return; BT_DBG("dlc %p state %ld err %d", d, d->state, err); lock_sock(sk); if (err) sk->sk_err = err; sk->sk_state = d->state; parent = bt_sk(sk)->parent; if (parent) { if (d->state == BT_CLOSED) { sock_set_flag(sk, SOCK_ZAPPED); bt_accept_unlink(sk); } parent->sk_data_ready(parent); } else { if (d->state == BT_CONNECTED) rfcomm_session_getaddr(d->session, &rfcomm_pi(sk)->src, NULL); sk->sk_state_change(sk); } release_sock(sk); if (parent && sock_flag(sk, SOCK_ZAPPED)) { /* We have to drop DLC lock here, otherwise * rfcomm_sock_destruct() will dead lock. */ rfcomm_dlc_unlock(d); rfcomm_sock_kill(sk); rfcomm_dlc_lock(d); } } /* ---- Socket functions ---- */ static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src) { struct sock *sk = NULL; sk_for_each(sk, &rfcomm_sk_list.head) { if (rfcomm_pi(sk)->channel != channel) continue; if (bacmp(&rfcomm_pi(sk)->src, src)) continue; if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN) break; } return sk ? sk : NULL; } /* Find socket with channel and source bdaddr. * Returns closest match. */ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) { struct sock *sk = NULL, *sk1 = NULL; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { if (state && sk->sk_state != state) continue; if (rfcomm_pi(sk)->channel == channel) { /* Exact match. */ if (!bacmp(&rfcomm_pi(sk)->src, src)) break; /* Closest match */ if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY)) sk1 = sk; } } read_unlock(&rfcomm_sk_list.lock); return sk ? sk : sk1; } static void rfcomm_sock_destruct(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p dlc %p", sk, d); skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_write_queue); rfcomm_dlc_lock(d); rfcomm_pi(sk)->dlc = NULL; /* Detach DLC if it's owned by this socket */ if (d->owner == sk) d->owner = NULL; rfcomm_dlc_unlock(d); rfcomm_dlc_put(d); } static void rfcomm_sock_cleanup_listen(struct sock *parent) { struct sock *sk; BT_DBG("parent %p", parent); /* Close not yet accepted dlcs */ while ((sk = bt_accept_dequeue(parent, NULL))) { rfcomm_sock_close(sk); rfcomm_sock_kill(sk); } parent->sk_state = BT_CLOSED; sock_set_flag(parent, SOCK_ZAPPED); } /* Kill socket (only if zapped and orphan) * Must be called on unlocked socket. */ static void rfcomm_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); /* Kill poor orphan */ bt_sock_unlink(&rfcomm_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); } static void __rfcomm_sock_close(struct sock *sk) { struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: rfcomm_sock_cleanup_listen(sk); break; case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); fallthrough; default: sock_set_flag(sk, SOCK_ZAPPED); break; } } /* Close socket. * Must be called on unlocked socket. */ static void rfcomm_sock_close(struct sock *sk) { lock_sock(sk); __rfcomm_sock_close(sk); release_sock(sk); } static void rfcomm_sock_init(struct sock *sk, struct sock *parent) { struct rfcomm_pinfo *pi = rfcomm_pi(sk); BT_DBG("sk %p", sk); if (parent) { sk->sk_type = parent->sk_type; pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags); pi->sec_level = rfcomm_pi(parent)->sec_level; pi->role_switch = rfcomm_pi(parent)->role_switch; security_sk_clone(parent, sk); } else { pi->dlc->defer_setup = 0; pi->sec_level = BT_SECURITY_LOW; pi->role_switch = 0; } pi->dlc->sec_level = pi->sec_level; pi->dlc->role_switch = pi->role_switch; } static struct proto rfcomm_proto = { .name = "RFCOMM", .owner = THIS_MODULE, .obj_size = sizeof(struct rfcomm_pinfo) }; static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) { struct rfcomm_dlc *d; struct sock *sk; d = rfcomm_dlc_alloc(prio); if (!d) return NULL; sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); if (!sk) { rfcomm_dlc_free(d); return NULL; } d->data_ready = rfcomm_sk_data_ready; d->state_change = rfcomm_sk_state_change; rfcomm_pi(sk)->dlc = d; d->owner = sk; sk->sk_destruct = rfcomm_sock_destruct; sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; bt_sock_link(&rfcomm_sk_list, sk); BT_DBG("sk %p", sk); return sk; } static int rfcomm_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); sock->state = SS_UNCONNECTED; if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &rfcomm_sock_ops; sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); if (!sk) return -ENOMEM; rfcomm_sock_init(sk, NULL); return 0; } static int rfcomm_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len) { struct sockaddr_rc sa; struct sock *sk = sock->sk; int len, err = 0; if (!addr || addr_len < offsetofend(struct sockaddr, sa_family) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; memset(&sa, 0, sizeof(sa)); len = min_t(unsigned int, sizeof(sa), addr_len); memcpy(&sa, addr, len); BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr); lock_sock(sk); if (sk->sk_state != BT_OPEN) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } write_lock(&rfcomm_sk_list.lock); if (sa.rc_channel && __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) { err = -EADDRINUSE; } else { /* Save source address */ bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr); rfcomm_pi(sk)->channel = sa.rc_channel; sk->sk_state = BT_BOUND; } write_unlock(&rfcomm_sk_list.lock); done: release_sock(sk); return err; } static int rfcomm_sock_connect(struct socket *sock, struct sockaddr_unsized *addr, int alen, int flags) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int err = 0; BT_DBG("sk %p", sk); if (alen < sizeof(struct sockaddr_rc) || addr->sa_family != AF_BLUETOOTH) return -EINVAL; sock_hold(sk); lock_sock(sk); if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } sk->sk_state = BT_CONNECT; bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr); rfcomm_pi(sk)->channel = sa->rc_channel; d->sec_level = rfcomm_pi(sk)->sec_level; d->role_switch = rfcomm_pi(sk)->role_switch; /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ release_sock(sk); err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, sa->rc_channel); lock_sock(sk); if (!err && !sock_flag(sk, SOCK_ZAPPED)) err = bt_sock_wait_state(sk, BT_CONNECTED, sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); sock_put(sk); return err; } static int rfcomm_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sk %p backlog %d", sk, backlog); lock_sock(sk); if (sk->sk_state != BT_BOUND) { err = -EBADFD; goto done; } if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } if (!rfcomm_pi(sk)->channel) { bdaddr_t *src = &rfcomm_pi(sk)->src; u8 channel; err = -EINVAL; write_lock(&rfcomm_sk_list.lock); for (channel = 1; channel < 31; channel++) if (!__rfcomm_get_listen_sock_by_addr(channel, src)) { rfcomm_pi(sk)->channel = channel; err = 0; break; } write_unlock(&rfcomm_sk_list.lock); if (err < 0) goto done; } sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = BT_LISTEN; done: release_sock(sk); return err; } static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct sock *sk = sock->sk, *nsk; long timeo; int err = 0; lock_sock_nested(sk, SINGLE_DEPTH_NESTING); if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; goto done; } timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK); BT_DBG("sk %p timeo %ld", sk, timeo); /* Wait for an incoming connection. (wake-one). */ add_wait_queue_exclusive(sk_sleep(sk), &wait); while (1) { if (sk->sk_state != BT_LISTEN) { err = -EBADFD; break; } nsk = bt_accept_dequeue(sk, newsock); if (nsk) break; if (!timeo) { err = -EAGAIN; break; } if (signal_pending(current)) { err = sock_intr_errno(timeo); break; } release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); if (err) goto done; newsock->state = SS_CONNECTED; BT_DBG("new socket %p", nsk); done: release_sock(sk); return err; } static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) { struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; struct sock *sk = sock->sk; BT_DBG("sock %p, sk %p", sock, sk); if (peer && sk->sk_state != BT_CONNECTED && sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) return -ENOTCONN; memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst); else bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src); return sizeof(struct sockaddr_rc); } static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; struct sk_buff *skb; int sent; if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) return -ENOTCONN; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; if (sk->sk_shutdown & SEND_SHUTDOWN) return -EPIPE; BT_DBG("sock %p, sk %p", sock, sk); lock_sock(sk); sent = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); if (sent) return sent; skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, RFCOMM_SKB_TAIL_RESERVE); if (IS_ERR(skb)) return PTR_ERR(skb); sent = rfcomm_dlc_send(d, skb); if (sent < 0) kfree_skb(skb); return sent; } static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; int len; if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); return 0; } len = bt_sock_stream_recvmsg(sock, msg, size, flags); lock_sock(sk); if (!(flags & MSG_PEEK) && len > 0) atomic_sub(len, &sk->sk_rmem_alloc); if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); release_sock(sk); return len; } static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; u32 opt; BT_DBG("sk %p", sk); lock_sock(sk); switch (optname) { case RFCOMM_LM: err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt & RFCOMM_LM_FIPS) { err = -EINVAL; break; } if (opt & RFCOMM_LM_AUTH) rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; if (opt & RFCOMM_LM_ENCRYPT) rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; if (opt & RFCOMM_LM_SECURE) rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; int err = 0; u32 opt; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = BT_SECURITY_LOW; err = copy_safe_from_sockptr(&sec, sizeof(sec), optval, optlen); if (err) break; if (sec.level > BT_SECURITY_HIGH) { err = -EINVAL; break; } rfcomm_pi(sk)->sec_level = sec.level; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (err) break; if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); else clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct sock *l2cap_sk; struct l2cap_conn *conn; struct rfcomm_conninfo cinfo; int err = 0; size_t len; u32 opt; BT_DBG("sk %p", sk); if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case RFCOMM_LM: switch (rfcomm_pi(sk)->sec_level) { case BT_SECURITY_LOW: opt = RFCOMM_LM_AUTH; break; case BT_SECURITY_MEDIUM: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; break; case BT_SECURITY_HIGH: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE; break; case BT_SECURITY_FIPS: opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | RFCOMM_LM_SECURE | RFCOMM_LM_FIPS; break; default: opt = 0; break; } if (rfcomm_pi(sk)->role_switch) opt |= RFCOMM_LM_MASTER; if (put_user(opt, (u32 __user *) optval)) err = -EFAULT; break; case RFCOMM_CONNINFO: if (sk->sk_state != BT_CONNECTED && !rfcomm_pi(sk)->dlc->defer_setup) { err = -ENOTCONN; break; } l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; conn = l2cap_pi(l2cap_sk)->chan->conn; memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct bt_security sec; int err = 0; size_t len; BT_DBG("sk %p", sk); if (level == SOL_RFCOMM) return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); if (level != SOL_BLUETOOTH) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; lock_sock(sk); switch (optname) { case BT_SECURITY: if (sk->sk_type != SOCK_STREAM) { err = -EINVAL; break; } sec.level = rfcomm_pi(sk)->sec_level; sec.key_size = 0; len = min(len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; break; case BT_DEFER_SETUP: if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { err = -EINVAL; break; } if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), (u32 __user *) optval)) err = -EFAULT; break; default: err = -ENOPROTOOPT; break; } release_sock(sk); return err; } static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk __maybe_unused = sock->sk; int err; BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); err = bt_sock_ioctl(sock, cmd, arg); if (err == -ENOIOCTLCMD) { #ifdef CONFIG_BT_RFCOMM_TTY err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); #else err = -EOPNOTSUPP; #endif } return err; } #ifdef CONFIG_COMPAT static int rfcomm_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return rfcomm_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); } #endif static int rfcomm_sock_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int err = 0; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; lock_sock(sk); if (!sk->sk_shutdown) { sk->sk_shutdown = SHUTDOWN_MASK; release_sock(sk); __rfcomm_sock_close(sk); lock_sock(sk); if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && !(current->flags & PF_EXITING)) err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); } release_sock(sk); return err; } static int rfcomm_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err; BT_DBG("sock %p, sk %p", sock, sk); if (!sk) return 0; err = rfcomm_sock_shutdown(sock, 2); sock_orphan(sk); rfcomm_sock_kill(sk); return err; } /* ---- RFCOMM core layer callbacks ---- * * called under rfcomm_lock() */ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) { struct sock *sk, *parent; bdaddr_t src, dst; int result = 0; BT_DBG("session %p channel %d", s, channel); rfcomm_session_getaddr(s, &src, &dst); /* Check if we have socket listening on channel */ parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); if (!parent) return 0; lock_sock(parent); /* Check for backlog size */ if (sk_acceptq_is_full(parent)) { BT_DBG("backlog full %d", parent->sk_ack_backlog); goto done; } sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0); if (!sk) goto done; bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM); rfcomm_sock_init(sk, parent); bacpy(&rfcomm_pi(sk)->src, &src); bacpy(&rfcomm_pi(sk)->dst, &dst); rfcomm_pi(sk)->channel = channel; sk->sk_state = BT_CONFIG; bt_accept_enqueue(parent, sk, true); /* Accept connection and return socket DLC */ *d = rfcomm_pi(sk)->dlc; result = 1; done: release_sock(parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) parent->sk_state_change(parent); return result; } static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&rfcomm_sk_list.lock); sk_for_each(sk, &rfcomm_sk_list.head) { seq_printf(f, "%pMR %pMR %d %d\n", &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst, sk->sk_state, rfcomm_pi(sk)->channel); } read_unlock(&rfcomm_sk_list.lock); return 0; } DEFINE_SHOW_ATTRIBUTE(rfcomm_sock_debugfs); static struct dentry *rfcomm_sock_debugfs; static const struct proto_ops rfcomm_sock_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .release = rfcomm_sock_release, .bind = rfcomm_sock_bind, .connect = rfcomm_sock_connect, .listen = rfcomm_sock_listen, .accept = rfcomm_sock_accept, .getname = rfcomm_sock_getname, .sendmsg = rfcomm_sock_sendmsg, .recvmsg = rfcomm_sock_recvmsg, .shutdown = rfcomm_sock_shutdown, .setsockopt = rfcomm_sock_setsockopt, .getsockopt = rfcomm_sock_getsockopt, .ioctl = rfcomm_sock_ioctl, .gettstamp = sock_gettstamp, .poll = bt_sock_poll, .socketpair = sock_no_socketpair, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = rfcomm_sock_compat_ioctl, #endif }; static const struct net_proto_family rfcomm_sock_family_ops = { .family = PF_BLUETOOTH, .owner = THIS_MODULE, .create = rfcomm_sock_create }; int __init rfcomm_init_sockets(void) { int err; BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr)); err = proto_register(&rfcomm_proto, 0); if (err < 0) return err; err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); if (err < 0) { BT_ERR("RFCOMM socket layer registration failed"); goto error; } err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL); if (err < 0) { BT_ERR("Failed to create RFCOMM proc file"); bt_sock_unregister(BTPROTO_RFCOMM); goto error; } BT_INFO("RFCOMM socket layer initialized"); if (IS_ERR_OR_NULL(bt_debugfs)) return 0; rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, bt_debugfs, NULL, &rfcomm_sock_debugfs_fops); return 0; error: proto_unregister(&rfcomm_proto); return err; } void __exit rfcomm_cleanup_sockets(void) { bt_procfs_cleanup(&init_net, "rfcomm"); debugfs_remove(rfcomm_sock_debugfs); bt_sock_unregister(BTPROTO_RFCOMM); proto_unregister(&rfcomm_proto); }
2 2 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 // SPDX-License-Identifier: GPL-2.0-or-later /* * DVB USB framework * * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> */ #include "dvb_usb_common.h" static int dvb_usb_v2_generic_io(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret, actual_length; if (!wbuf || !wlen || !d->props->generic_bulk_ctrl_endpoint || !d->props->generic_bulk_ctrl_endpoint_response) { dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, -EINVAL); return -EINVAL; } dev_dbg(&d->udev->dev, "%s: >>> %*ph\n", __func__, wlen, wbuf); ret = usb_bulk_msg(d->udev, usb_sndbulkpipe(d->udev, d->props->generic_bulk_ctrl_endpoint), wbuf, wlen, &actual_length, 2000); if (ret) { dev_err(&d->udev->dev, "%s: usb_bulk_msg() failed=%d\n", KBUILD_MODNAME, ret); return ret; } if (actual_length != wlen) { dev_err(&d->udev->dev, "%s: usb_bulk_msg() write length=%d, actual=%d\n", KBUILD_MODNAME, wlen, actual_length); return -EIO; } /* an answer is expected */ if (rbuf && rlen) { if (d->props->generic_bulk_ctrl_delay) usleep_range(d->props->generic_bulk_ctrl_delay, d->props->generic_bulk_ctrl_delay + 20000); ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev, d->props->generic_bulk_ctrl_endpoint_response), rbuf, rlen, &actual_length, 2000); if (ret) dev_err(&d->udev->dev, "%s: 2nd usb_bulk_msg() failed=%d\n", KBUILD_MODNAME, ret); dev_dbg(&d->udev->dev, "%s: <<< %*ph\n", __func__, actual_length, rbuf); } return ret; } int dvb_usbv2_generic_rw(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret; mutex_lock(&d->usb_mutex); ret = dvb_usb_v2_generic_io(d, wbuf, wlen, rbuf, rlen); mutex_unlock(&d->usb_mutex); return ret; } EXPORT_SYMBOL(dvb_usbv2_generic_rw); int dvb_usbv2_generic_write(struct dvb_usb_device *d, u8 *buf, u16 len) { int ret; mutex_lock(&d->usb_mutex); ret = dvb_usb_v2_generic_io(d, buf, len, NULL, 0); mutex_unlock(&d->usb_mutex); return ret; } EXPORT_SYMBOL(dvb_usbv2_generic_write); int dvb_usbv2_generic_rw_locked(struct dvb_usb_device *d, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { return dvb_usb_v2_generic_io(d, wbuf, wlen, rbuf, rlen); } EXPORT_SYMBOL(dvb_usbv2_generic_rw_locked); int dvb_usbv2_generic_write_locked(struct dvb_usb_device *d, u8 *buf, u16 len) { return dvb_usb_v2_generic_io(d, buf, len, NULL, 0); } EXPORT_SYMBOL(dvb_usbv2_generic_write_locked);
84 40 12 28 198 25 38 13 13 16 3 38 3 3 1 1 6 1 34 12 4 1 1 2 2 17 1 5 1 4 16 65 3 65 12 20 1 1 2 19 1 15 2 1 65 65 4 37 1 1 1 65 4 50 2 35 25 26 4 2 2 42 19 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH * Copyright (C) 2018, 2020-2025 Intel Corporation */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cfg80211 #if !defined(__RDEV_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __RDEV_OPS_TRACE #include <linux/tracepoint.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <net/cfg80211.h> #include "core.h" #define MAC_ENTRY(entry_mac) __array(u8, entry_mac, ETH_ALEN) #define MAC_ASSIGN(entry_mac, given_mac) do { \ if (given_mac) \ memcpy(__entry->entry_mac, given_mac, ETH_ALEN); \ else \ eth_zero_addr(__entry->entry_mac); \ } while (0) #define MAXNAME 32 #define WIPHY_ENTRY __array(char, wiphy_name, 32) #define WIPHY_ASSIGN strscpy(__entry->wiphy_name, wiphy_name(wiphy), MAXNAME) #define WIPHY_PR_FMT "%s" #define WIPHY_PR_ARG __entry->wiphy_name #define WDEV_ENTRY __field(u32, id) #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ ? wdev->identifier : 0) #define WDEV_PR_FMT "wdev(%u)" #define WDEV_PR_ARG (__entry->id) #define NETDEV_ENTRY __array(char, name, IFNAMSIZ) \ __field(int, ifindex) #define NETDEV_ASSIGN \ do { \ memcpy(__entry->name, netdev->name, IFNAMSIZ); \ (__entry->ifindex) = (netdev->ifindex); \ } while (0) #define NETDEV_PR_FMT "netdev:%s(%d)" #define NETDEV_PR_ARG __entry->name, __entry->ifindex #define MESH_CFG_ENTRY __field(u16, dot11MeshRetryTimeout) \ __field(u16, dot11MeshConfirmTimeout) \ __field(u16, dot11MeshHoldingTimeout) \ __field(u16, dot11MeshMaxPeerLinks) \ __field(u8, dot11MeshMaxRetries) \ __field(u8, dot11MeshTTL) \ __field(u8, element_ttl) \ __field(bool, auto_open_plinks) \ __field(u32, dot11MeshNbrOffsetMaxNeighbor) \ __field(u8, dot11MeshHWMPmaxPREQretries) \ __field(u32, path_refresh_time) \ __field(u32, dot11MeshHWMPactivePathTimeout) \ __field(u16, min_discovery_timeout) \ __field(u16, dot11MeshHWMPpreqMinInterval) \ __field(u16, dot11MeshHWMPperrMinInterval) \ __field(u16, dot11MeshHWMPnetDiameterTraversalTime) \ __field(u8, dot11MeshHWMPRootMode) \ __field(u16, dot11MeshHWMPRannInterval) \ __field(bool, dot11MeshGateAnnouncementProtocol) \ __field(bool, dot11MeshForwarding) \ __field(s32, rssi_threshold) \ __field(u16, ht_opmode) \ __field(u32, dot11MeshHWMPactivePathToRootTimeout) \ __field(u16, dot11MeshHWMProotInterval) \ __field(u16, dot11MeshHWMPconfirmationInterval) \ __field(bool, dot11MeshNolearn) #define MESH_CFG_ASSIGN \ do { \ __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \ __entry->dot11MeshConfirmTimeout = \ conf->dot11MeshConfirmTimeout; \ __entry->dot11MeshHoldingTimeout = \ conf->dot11MeshHoldingTimeout; \ __entry->dot11MeshMaxPeerLinks = conf->dot11MeshMaxPeerLinks; \ __entry->dot11MeshMaxRetries = conf->dot11MeshMaxRetries; \ __entry->dot11MeshTTL = conf->dot11MeshTTL; \ __entry->element_ttl = conf->element_ttl; \ __entry->auto_open_plinks = conf->auto_open_plinks; \ __entry->dot11MeshNbrOffsetMaxNeighbor = \ conf->dot11MeshNbrOffsetMaxNeighbor; \ __entry->dot11MeshHWMPmaxPREQretries = \ conf->dot11MeshHWMPmaxPREQretries; \ __entry->path_refresh_time = conf->path_refresh_time; \ __entry->dot11MeshHWMPactivePathTimeout = \ conf->dot11MeshHWMPactivePathTimeout; \ __entry->min_discovery_timeout = conf->min_discovery_timeout; \ __entry->dot11MeshHWMPpreqMinInterval = \ conf->dot11MeshHWMPpreqMinInterval; \ __entry->dot11MeshHWMPperrMinInterval = \ conf->dot11MeshHWMPperrMinInterval; \ __entry->dot11MeshHWMPnetDiameterTraversalTime = \ conf->dot11MeshHWMPnetDiameterTraversalTime; \ __entry->dot11MeshHWMPRootMode = conf->dot11MeshHWMPRootMode; \ __entry->dot11MeshHWMPRannInterval = \ conf->dot11MeshHWMPRannInterval; \ __entry->dot11MeshGateAnnouncementProtocol = \ conf->dot11MeshGateAnnouncementProtocol; \ __entry->dot11MeshForwarding = conf->dot11MeshForwarding; \ __entry->rssi_threshold = conf->rssi_threshold; \ __entry->ht_opmode = conf->ht_opmode; \ __entry->dot11MeshHWMPactivePathToRootTimeout = \ conf->dot11MeshHWMPactivePathToRootTimeout; \ __entry->dot11MeshHWMProotInterval = \ conf->dot11MeshHWMProotInterval; \ __entry->dot11MeshHWMPconfirmationInterval = \ conf->dot11MeshHWMPconfirmationInterval; \ __entry->dot11MeshNolearn = conf->dot11MeshNolearn; \ } while (0) #define CHAN_ENTRY __field(enum nl80211_band, band) \ __field(u32, center_freq) \ __field(u16, freq_offset) #define CHAN_ASSIGN(chan) \ do { \ if (chan) { \ __entry->band = chan->band; \ __entry->center_freq = chan->center_freq; \ __entry->freq_offset = chan->freq_offset; \ } else { \ __entry->band = 0; \ __entry->center_freq = 0; \ __entry->freq_offset = 0; \ } \ } while (0) #define CHAN_PR_FMT "band: %d, freq: %u.%03u" #define CHAN_PR_ARG __entry->band, __entry->center_freq, __entry->freq_offset #define CHAN_DEF_ENTRY __field(enum nl80211_band, band) \ __field(u32, control_freq) \ __field(u32, freq_offset) \ __field(u32, width) \ __field(u32, center_freq1) \ __field(u32, freq1_offset) \ __field(u32, center_freq2) \ __field(u16, punctured) #define CHAN_DEF_ASSIGN(chandef) \ do { \ if ((chandef) && (chandef)->chan) { \ __entry->band = (chandef)->chan->band; \ __entry->control_freq = \ (chandef)->chan->center_freq; \ __entry->freq_offset = \ (chandef)->chan->freq_offset; \ __entry->width = (chandef)->width; \ __entry->center_freq1 = (chandef)->center_freq1;\ __entry->freq1_offset = (chandef)->freq1_offset;\ __entry->center_freq2 = (chandef)->center_freq2;\ __entry->punctured = (chandef)->punctured; \ } else { \ __entry->band = 0; \ __entry->control_freq = 0; \ __entry->freq_offset = 0; \ __entry->width = 0; \ __entry->center_freq1 = 0; \ __entry->freq1_offset = 0; \ __entry->center_freq2 = 0; \ __entry->punctured = 0; \ } \ } while (0) #define CHAN_DEF_PR_FMT \ "band: %d, control freq: %u.%03u, width: %d, cf1: %u.%03u, cf2: %u, punct: 0x%x" #define CHAN_DEF_PR_ARG __entry->band, __entry->control_freq, \ __entry->freq_offset, __entry->width, \ __entry->center_freq1, __entry->freq1_offset, \ __entry->center_freq2, __entry->punctured #define FILS_AAD_ASSIGN(fa) \ do { \ if (fa) { \ ether_addr_copy(__entry->macaddr, fa->macaddr); \ __entry->kek_len = fa->kek_len; \ } else { \ eth_zero_addr(__entry->macaddr); \ __entry->kek_len = 0; \ } \ } while (0) #define FILS_AAD_PR_FMT \ "macaddr: %pM, kek_len: %d" #define SINFO_ENTRY __field(int, generation) \ __field(u32, connected_time) \ __field(u32, inactive_time) \ __field(u32, rx_bytes) \ __field(u32, tx_bytes) \ __field(u32, rx_packets) \ __field(u32, tx_packets) \ __field(u32, tx_retries) \ __field(u32, tx_failed) \ __field(u32, rx_dropped_misc) \ __field(u32, beacon_loss_count) \ __field(u16, llid) \ __field(u16, plid) \ __field(u8, plink_state) #define SINFO_ASSIGN \ do { \ __entry->generation = sinfo->generation; \ __entry->connected_time = sinfo->connected_time; \ __entry->inactive_time = sinfo->inactive_time; \ __entry->rx_bytes = sinfo->rx_bytes; \ __entry->tx_bytes = sinfo->tx_bytes; \ __entry->rx_packets = sinfo->rx_packets; \ __entry->tx_packets = sinfo->tx_packets; \ __entry->tx_retries = sinfo->tx_retries; \ __entry->tx_failed = sinfo->tx_failed; \ __entry->rx_dropped_misc = sinfo->rx_dropped_misc; \ __entry->beacon_loss_count = sinfo->beacon_loss_count; \ __entry->llid = sinfo->llid; \ __entry->plid = sinfo->plid; \ __entry->plink_state = sinfo->plink_state; \ } while (0) #define BOOL_TO_STR(bo) (bo) ? "true" : "false" #define QOS_MAP_ENTRY __field(u8, num_des) \ __array(u8, dscp_exception, \ 2 * IEEE80211_QOS_MAP_MAX_EX) \ __array(u8, up, IEEE80211_QOS_MAP_LEN_MIN) #define QOS_MAP_ASSIGN(qos_map) \ do { \ if ((qos_map)) { \ __entry->num_des = (qos_map)->num_des; \ memcpy(__entry->dscp_exception, \ &(qos_map)->dscp_exception, \ 2 * IEEE80211_QOS_MAP_MAX_EX); \ memcpy(__entry->up, &(qos_map)->up, \ IEEE80211_QOS_MAP_LEN_MIN); \ } else { \ __entry->num_des = 0; \ memset(__entry->dscp_exception, 0, \ 2 * IEEE80211_QOS_MAP_MAX_EX); \ memset(__entry->up, 0, \ IEEE80211_QOS_MAP_LEN_MIN); \ } \ } while (0) /************************************************************* * wiphy work traces * *************************************************************/ DECLARE_EVENT_CLASS(wiphy_work_event, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), TP_ARGS(wiphy, work), TP_STRUCT__entry( WIPHY_ENTRY __field(void *, instance) __field(void *, func) ), TP_fast_assign( WIPHY_ASSIGN; __entry->instance = work; __entry->func = work ? work->func : NULL; ), TP_printk(WIPHY_PR_FMT " instance=%p func=%pS", WIPHY_PR_ARG, __entry->instance, __entry->func) ); DEFINE_EVENT(wiphy_work_event, wiphy_work_queue, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), TP_ARGS(wiphy, work) ); DEFINE_EVENT(wiphy_work_event, wiphy_work_run, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), TP_ARGS(wiphy, work) ); DEFINE_EVENT(wiphy_work_event, wiphy_work_cancel, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), TP_ARGS(wiphy, work) ); DEFINE_EVENT(wiphy_work_event, wiphy_work_flush, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), TP_ARGS(wiphy, work) ); TRACE_EVENT(wiphy_delayed_work_queue, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work, unsigned long delay), TP_ARGS(wiphy, work, delay), TP_STRUCT__entry( WIPHY_ENTRY __field(void *, instance) __field(void *, func) __field(unsigned long, delay) ), TP_fast_assign( WIPHY_ASSIGN; __entry->instance = work; __entry->func = work->func; __entry->delay = delay; ), TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%ld", WIPHY_PR_ARG, __entry->instance, __entry->func, __entry->delay) ); TRACE_EVENT(wiphy_hrtimer_work_queue, TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work, ktime_t delay), TP_ARGS(wiphy, work, delay), TP_STRUCT__entry( WIPHY_ENTRY __field(void *, instance) __field(void *, func) __field(ktime_t, delay) ), TP_fast_assign( WIPHY_ASSIGN; __entry->instance = work; __entry->func = work->func; __entry->delay = delay; ), TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%llu", WIPHY_PR_ARG, __entry->instance, __entry->func, __entry->delay) ); TRACE_EVENT(wiphy_work_worker_start, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); /************************************************************* * rdev->ops traces * *************************************************************/ TRACE_EVENT(rdev_suspend, TP_PROTO(struct wiphy *wiphy, struct cfg80211_wowlan *wow), TP_ARGS(wiphy, wow), TP_STRUCT__entry( WIPHY_ENTRY __field(bool, any) __field(bool, disconnect) __field(bool, magic_pkt) __field(bool, gtk_rekey_failure) __field(bool, eap_identity_req) __field(bool, four_way_handshake) __field(bool, rfkill_release) __field(bool, valid_wow) ), TP_fast_assign( WIPHY_ASSIGN; if (wow) { __entry->any = wow->any; __entry->disconnect = wow->disconnect; __entry->magic_pkt = wow->magic_pkt; __entry->gtk_rekey_failure = wow->gtk_rekey_failure; __entry->eap_identity_req = wow->eap_identity_req; __entry->four_way_handshake = wow->four_way_handshake; __entry->rfkill_release = wow->rfkill_release; __entry->valid_wow = true; } else { __entry->valid_wow = false; } ), TP_printk(WIPHY_PR_FMT ", wow%s - any: %d, disconnect: %d, " "magic pkt: %d, gtk rekey failure: %d, eap identify req: %d, " "four way handshake: %d, rfkill release: %d.", WIPHY_PR_ARG, __entry->valid_wow ? "" : "(Not configured!)", __entry->any, __entry->disconnect, __entry->magic_pkt, __entry->gtk_rekey_failure, __entry->eap_identity_req, __entry->four_way_handshake, __entry->rfkill_release) ); TRACE_EVENT(rdev_return_int, TP_PROTO(struct wiphy *wiphy, int ret), TP_ARGS(wiphy, ret), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret) ); TRACE_EVENT(rdev_scan, TP_PROTO(struct wiphy *wiphy, struct cfg80211_scan_request_int *request), TP_ARGS(wiphy, request), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); DECLARE_EVENT_CLASS(wiphy_only_evt, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); DEFINE_EVENT(wiphy_only_evt, rdev_resume, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DEFINE_EVENT(wiphy_only_evt, rdev_return_void, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); TRACE_EVENT(rdev_get_antenna, TP_PROTO(struct wiphy *wiphy, int radio_idx), TP_ARGS(wiphy, radio_idx), TP_STRUCT__entry( WIPHY_ENTRY __field(int, radio_idx) ), TP_fast_assign( WIPHY_ASSIGN; __entry->radio_idx = radio_idx; ), TP_printk(WIPHY_PR_FMT ", radio_idx: %d", WIPHY_PR_ARG, __entry->radio_idx) ); DEFINE_EVENT(wiphy_only_evt, rdev_rfkill_poll, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy) ); DECLARE_EVENT_CLASS(wiphy_enabled_evt, TP_PROTO(struct wiphy *wiphy, bool enabled), TP_ARGS(wiphy, enabled), TP_STRUCT__entry( WIPHY_ENTRY __field(bool, enabled) ), TP_fast_assign( WIPHY_ASSIGN; __entry->enabled = enabled; ), TP_printk(WIPHY_PR_FMT ", %senabled ", WIPHY_PR_ARG, __entry->enabled ? "" : "not ") ); DEFINE_EVENT(wiphy_enabled_evt, rdev_set_wakeup, TP_PROTO(struct wiphy *wiphy, bool enabled), TP_ARGS(wiphy, enabled) ); TRACE_EVENT(rdev_add_virtual_intf, TP_PROTO(struct wiphy *wiphy, char *name, enum nl80211_iftype type), TP_ARGS(wiphy, name, type), TP_STRUCT__entry( WIPHY_ENTRY __string(vir_intf_name, name ? name : "<noname>") __field(enum nl80211_iftype, type) ), TP_fast_assign( WIPHY_ASSIGN; __assign_str(vir_intf_name); __entry->type = type; ), TP_printk(WIPHY_PR_FMT ", virtual intf name: %s, type: %d", WIPHY_PR_ARG, __get_str(vir_intf_name), __entry->type) ); DECLARE_EVENT_CLASS(wiphy_wdev_evt, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); DECLARE_EVENT_CLASS(wiphy_wdev_cookie_evt, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %lld", WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_del_virtual_intf, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_change_virtual_intf, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, enum nl80211_iftype type), TP_ARGS(wiphy, netdev, type), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(enum nl80211_iftype, type) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->type = type; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", type: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->type) ); DECLARE_EVENT_CLASS(key_handle, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(mac_addr) __field(int, link_id) __field(u8, key_index) __field(bool, pairwise) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); __entry->link_id = link_id; __entry->key_index = key_index; __entry->pairwise = pairwise; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " "key_index: %u, pairwise: %s, mac addr: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->key_index, BOOL_TO_STR(__entry->pairwise), __entry->mac_addr) ); DEFINE_EVENT(key_handle, rdev_get_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr) ); DEFINE_EVENT(key_handle, rdev_del_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr), TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr) ); TRACE_EVENT(rdev_add_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, u8 mode), TP_ARGS(wiphy, netdev, link_id, key_index, pairwise, mac_addr, mode), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(mac_addr) __field(int, link_id) __field(u8, key_index) __field(bool, pairwise) __field(u8, mode) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); __entry->link_id = link_id; __entry->key_index = key_index; __entry->pairwise = pairwise; __entry->mode = mode; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " "key_index: %u, mode: %u, pairwise: %s, " "mac addr: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->key_index, __entry->mode, BOOL_TO_STR(__entry->pairwise), __entry->mac_addr) ); TRACE_EVENT(rdev_set_default_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool unicast, bool multicast), TP_ARGS(wiphy, netdev, link_id, key_index, unicast, multicast), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, link_id) __field(u8, key_index) __field(bool, unicast) __field(bool, multicast) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; __entry->key_index = key_index; __entry->unicast = unicast; __entry->multicast = multicast; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " "key index: %u, unicast: %s, multicast: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->key_index, BOOL_TO_STR(__entry->unicast), BOOL_TO_STR(__entry->multicast)) ); TRACE_EVENT(rdev_set_default_mgmt_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index), TP_ARGS(wiphy, netdev, link_id, key_index), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, link_id) __field(u8, key_index) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; __entry->key_index = key_index; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " "key index: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->key_index) ); TRACE_EVENT(rdev_set_default_beacon_key, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index), TP_ARGS(wiphy, netdev, link_id, key_index), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, link_id) __field(u8, key_index) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; __entry->key_index = key_index; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, " "key index: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->key_index) ); TRACE_EVENT(rdev_start_ap, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ap_settings *settings), TP_ARGS(wiphy, netdev, settings), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(int, beacon_interval) __field(int, dtim_period) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(enum nl80211_hidden_ssid, hidden_ssid) __field(u32, wpa_ver) __field(bool, privacy) __field(enum nl80211_auth_type, auth_type) __field(int, inactivity_timeout) __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(&settings->chandef); __entry->beacon_interval = settings->beacon_interval; __entry->dtim_period = settings->dtim_period; __entry->hidden_ssid = settings->hidden_ssid; __entry->wpa_ver = settings->crypto.wpa_versions; __entry->privacy = settings->privacy; __entry->auth_type = settings->auth_type; __entry->inactivity_timeout = settings->inactivity_timeout; memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, settings->ssid, settings->ssid_len); __entry->link_id = settings->beacon.link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", AP settings - ssid: %s, " CHAN_DEF_PR_FMT ", beacon interval: %d, dtim period: %d, " "hidden ssid: %d, wpa versions: %u, privacy: %s, " "auth type: %d, inactivity timeout: %d, link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ssid, CHAN_DEF_PR_ARG, __entry->beacon_interval, __entry->dtim_period, __entry->hidden_ssid, __entry->wpa_ver, BOOL_TO_STR(__entry->privacy), __entry->auth_type, __entry->inactivity_timeout, __entry->link_id) ); TRACE_EVENT(rdev_change_beacon, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ap_update *info), TP_ARGS(wiphy, netdev, info), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, link_id) __dynamic_array(u8, head, info->beacon.head_len) __dynamic_array(u8, tail, info->beacon.tail_len) __dynamic_array(u8, beacon_ies, info->beacon.beacon_ies_len) __dynamic_array(u8, proberesp_ies, info->beacon.proberesp_ies_len) __dynamic_array(u8, assocresp_ies, info->beacon.assocresp_ies_len) __dynamic_array(u8, probe_resp, info->beacon.probe_resp_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = info->beacon.link_id; if (info->beacon.head) memcpy(__get_dynamic_array(head), info->beacon.head, info->beacon.head_len); if (info->beacon.tail) memcpy(__get_dynamic_array(tail), info->beacon.tail, info->beacon.tail_len); if (info->beacon.beacon_ies) memcpy(__get_dynamic_array(beacon_ies), info->beacon.beacon_ies, info->beacon.beacon_ies_len); if (info->beacon.proberesp_ies) memcpy(__get_dynamic_array(proberesp_ies), info->beacon.proberesp_ies, info->beacon.proberesp_ies_len); if (info->beacon.assocresp_ies) memcpy(__get_dynamic_array(assocresp_ies), info->beacon.assocresp_ies, info->beacon.assocresp_ies_len); if (info->beacon.probe_resp) memcpy(__get_dynamic_array(probe_resp), info->beacon.probe_resp, info->beacon.probe_resp_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id:%d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id) ); TRACE_EVENT(rdev_stop_ap, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, unsigned int link_id), TP_ARGS(wiphy, netdev, link_id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id) ); DECLARE_EVENT_CLASS(wiphy_netdev_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_set_rekey_data, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_get_mesh_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_mesh, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ibss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_leave_ocb, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); DEFINE_EVENT(wiphy_netdev_evt, rdev_flush_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev), TP_ARGS(wiphy, netdev) ); TRACE_EVENT(rdev_end_cac, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, unsigned int link_id), TP_ARGS(wiphy, netdev, link_id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id) ); DECLARE_EVENT_CLASS(station_add_change, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(u32, sta_flags_mask) __field(u32, sta_flags_set) __field(u32, sta_modify_mask) __field(int, listen_interval) __field(u16, capability) __field(u16, aid) __field(u8, plink_action) __field(u8, plink_state) __field(u8, uapsd_queues) __field(u8, max_sp) __field(u8, opmode_notif) __field(bool, opmode_notif_used) __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap)) __array(u8, vht_capa, (int)sizeof(struct ieee80211_vht_cap)) __array(char, vlan, IFNAMSIZ) __dynamic_array(u8, supported_rates, params->link_sta_params.supported_rates_len) __dynamic_array(u8, ext_capab, params->ext_capab_len) __dynamic_array(u8, supported_channels, params->supported_channels_len) __dynamic_array(u8, supported_oper_classes, params->supported_oper_classes_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); __entry->sta_flags_mask = params->sta_flags_mask; __entry->sta_flags_set = params->sta_flags_set; __entry->sta_modify_mask = params->sta_modify_mask; __entry->listen_interval = params->listen_interval; __entry->aid = params->aid; __entry->plink_action = params->plink_action; __entry->plink_state = params->plink_state; __entry->uapsd_queues = params->uapsd_queues; memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap)); if (params->link_sta_params.ht_capa) memcpy(__entry->ht_capa, params->link_sta_params.ht_capa, sizeof(struct ieee80211_ht_cap)); memset(__entry->vht_capa, 0, sizeof(struct ieee80211_vht_cap)); if (params->link_sta_params.vht_capa) memcpy(__entry->vht_capa, params->link_sta_params.vht_capa, sizeof(struct ieee80211_vht_cap)); memset(__entry->vlan, 0, sizeof(__entry->vlan)); if (params->vlan) memcpy(__entry->vlan, params->vlan->name, IFNAMSIZ); if (params->link_sta_params.supported_rates && params->link_sta_params.supported_rates_len) memcpy(__get_dynamic_array(supported_rates), params->link_sta_params.supported_rates, params->link_sta_params.supported_rates_len); if (params->ext_capab && params->ext_capab_len) memcpy(__get_dynamic_array(ext_capab), params->ext_capab, params->ext_capab_len); if (params->supported_channels && params->supported_channels_len) memcpy(__get_dynamic_array(supported_channels), params->supported_channels, params->supported_channels_len); if (params->supported_oper_classes && params->supported_oper_classes_len) memcpy(__get_dynamic_array(supported_oper_classes), params->supported_oper_classes, params->supported_oper_classes_len); __entry->max_sp = params->max_sp; __entry->capability = params->capability; __entry->opmode_notif = params->link_sta_params.opmode_notif; __entry->opmode_notif_used = params->link_sta_params.opmode_notif_used; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" ", station flags mask: 0x%x, station flags set: 0x%x, " "station modify mask: 0x%x, listen interval: %d, aid: %u, " "plink action: %u, plink state: %u, uapsd queues: %u, vlan:%s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac, __entry->sta_flags_mask, __entry->sta_flags_set, __entry->sta_modify_mask, __entry->listen_interval, __entry->aid, __entry->plink_action, __entry->plink_state, __entry->uapsd_queues, __entry->vlan) ); DEFINE_EVENT(station_add_change, rdev_add_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params) ); DEFINE_EVENT(station_add_change, rdev_change_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *mac, struct station_parameters *params), TP_ARGS(wiphy, netdev, mac, params) ); DECLARE_EVENT_CLASS(wiphy_netdev_mac_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac) ); DECLARE_EVENT_CLASS(station_del, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct station_del_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(u8, subtype) __field(u16, reason_code) __field(int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, params->mac); __entry->subtype = params->subtype; __entry->reason_code = params->reason_code; __entry->link_id = params->link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" ", subtype: %u, reason_code: %u, link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac, __entry->subtype, __entry->reason_code, __entry->link_id) ); DEFINE_EVENT(station_del, rdev_del_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct station_del_parameters *params), TP_ARGS(wiphy, netdev, params) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_get_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac) ); DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_del_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *mac), TP_ARGS(wiphy, netdev, mac) ); TRACE_EVENT(rdev_dump_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *mac), TP_ARGS(wiphy, netdev, _idx, mac), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(sta_mac) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(sta_mac, mac); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM, idx: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->sta_mac, __entry->idx) ); TRACE_EVENT(rdev_return_int_station_info, TP_PROTO(struct wiphy *wiphy, int ret, struct station_info *sinfo), TP_ARGS(wiphy, ret, sinfo), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) SINFO_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; SINFO_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", returned %d" , WIPHY_PR_ARG, __entry->ret) ); DECLARE_EVENT_CLASS(mpath_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(next_hop) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(next_hop, next_hop); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: %pM, next hop: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dst, __entry->next_hop) ); DEFINE_EVENT(mpath_evt, rdev_add_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); DEFINE_EVENT(mpath_evt, rdev_change_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); DEFINE_EVENT(mpath_evt, rdev_get_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, dst, next_hop) ); TRACE_EVENT(rdev_dump_mpath, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *dst, u8 *next_hop), TP_ARGS(wiphy, netdev, _idx, dst, next_hop), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(next_hop) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(next_hop, next_hop); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: %pM, next hop: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, __entry->dst, __entry->next_hop) ); TRACE_EVENT(rdev_get_mpp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *dst, u8 *mpp), TP_ARGS(wiphy, netdev, dst, mpp), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(mpp) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(mpp, mpp); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", destination: %pM" ", mpp: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dst, __entry->mpp) ); TRACE_EVENT(rdev_dump_mpp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *dst, u8 *mpp), TP_ARGS(wiphy, netdev, _idx, dst, mpp), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dst) MAC_ENTRY(mpp) __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dst, dst); MAC_ASSIGN(mpp, mpp); __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: %pM, mpp: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx, __entry->dst, __entry->mpp) ); TRACE_EVENT(rdev_return_int_mpath_info, TP_PROTO(struct wiphy *wiphy, int ret, struct mpath_info *pinfo), TP_ARGS(wiphy, ret, pinfo), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(int, generation) __field(u32, filled) __field(u32, frame_qlen) __field(u32, sn) __field(u32, metric) __field(u32, exptime) __field(u32, discovery_timeout) __field(u8, discovery_retries) __field(u8, flags) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->generation = pinfo->generation; __entry->filled = pinfo->filled; __entry->frame_qlen = pinfo->frame_qlen; __entry->sn = pinfo->sn; __entry->metric = pinfo->metric; __entry->exptime = pinfo->exptime; __entry->discovery_timeout = pinfo->discovery_timeout; __entry->discovery_retries = pinfo->discovery_retries; __entry->flags = pinfo->flags; ), TP_printk(WIPHY_PR_FMT ", returned %d. mpath info - generation: %d, " "filled: %u, frame qlen: %u, sn: %u, metric: %u, exptime: %u," " discovery timeout: %u, discovery retries: %u, flags: 0x%x", WIPHY_PR_ARG, __entry->ret, __entry->generation, __entry->filled, __entry->frame_qlen, __entry->sn, __entry->metric, __entry->exptime, __entry->discovery_timeout, __entry->discovery_retries, __entry->flags) ); TRACE_EVENT(rdev_return_int_mesh_config, TP_PROTO(struct wiphy *wiphy, int ret, struct mesh_config *conf), TP_ARGS(wiphy, ret, conf), TP_STRUCT__entry( WIPHY_ENTRY MESH_CFG_ENTRY __field(int, ret) ), TP_fast_assign( WIPHY_ASSIGN; MESH_CFG_ASSIGN; __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", returned: %d", WIPHY_PR_ARG, __entry->ret) ); TRACE_EVENT(rdev_update_mesh_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 mask, const struct mesh_config *conf), TP_ARGS(wiphy, netdev, mask, conf), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MESH_CFG_ENTRY __field(u32, mask) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MESH_CFG_ASSIGN; __entry->mask = mask; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mask: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mask) ); TRACE_EVENT(rdev_join_mesh, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const struct mesh_config *conf, const struct mesh_setup *setup), TP_ARGS(wiphy, netdev, conf, setup), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MESH_CFG_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MESH_CFG_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); TRACE_EVENT(rdev_change_bss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct bss_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, use_cts_prot) __field(int, use_short_preamble) __field(int, use_short_slot_time) __field(int, ap_isolate) __field(int, ht_opmode) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->use_cts_prot = params->use_cts_prot; __entry->use_short_preamble = params->use_short_preamble; __entry->use_short_slot_time = params->use_short_slot_time; __entry->ap_isolate = params->ap_isolate; __entry->ht_opmode = params->ht_opmode; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", use cts prot: %d, " "use short preamble: %d, use short slot time: %d, " "ap isolate: %d, ht opmode: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->use_cts_prot, __entry->use_short_preamble, __entry->use_short_slot_time, __entry->ap_isolate, __entry->ht_opmode) ); TRACE_EVENT(rdev_inform_bss, TP_PROTO(struct wiphy *wiphy, struct cfg80211_bss *bss), TP_ARGS(wiphy, bss), TP_STRUCT__entry( WIPHY_ENTRY MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; MAC_ASSIGN(bssid, bss->bssid); CHAN_ASSIGN(bss->channel); ), TP_printk(WIPHY_PR_FMT ", %pM, " CHAN_PR_FMT, WIPHY_PR_ARG, __entry->bssid, CHAN_PR_ARG) ); TRACE_EVENT(rdev_set_txq_params, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_txq_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(enum nl80211_ac, ac) __field(u16, txop) __field(u16, cwmin) __field(u16, cwmax) __field(u8, aifs) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->ac = params->ac; __entry->txop = params->txop; __entry->cwmin = params->cwmin; __entry->cwmax = params->cwmax; __entry->aifs = params->aifs; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", ac: %d, txop: %u, cwmin: %u, cwmax: %u, aifs: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->ac, __entry->txop, __entry->cwmin, __entry->cwmax, __entry->aifs) ); TRACE_EVENT(rdev_libertas_set_mesh_channel, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_channel *chan), TP_ARGS(wiphy, netdev, chan), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_ASSIGN(chan); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_PR_ARG) ); TRACE_EVENT(rdev_set_monitor_channel, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, netdev, chandef), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG) ); TRACE_EVENT(rdev_auth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_auth_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(enum nl80211_auth_type, auth_type) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else eth_zero_addr(__entry->bssid); __entry->auth_type = req->auth_type; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", auth type: %d, bssid: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->auth_type, __entry->bssid) ); TRACE_EVENT(rdev_assoc, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_assoc_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) MAC_ENTRY(prev_bssid) __field(bool, use_mfp) __field(u32, flags) __dynamic_array(u8, elements, req->ie_len) __array(u8, ht_capa, sizeof(struct ieee80211_ht_cap)) __array(u8, ht_capa_mask, sizeof(struct ieee80211_ht_cap)) __array(u8, vht_capa, sizeof(struct ieee80211_vht_cap)) __array(u8, vht_capa_mask, sizeof(struct ieee80211_vht_cap)) __dynamic_array(u8, fils_kek, req->fils_kek_len) __dynamic_array(u8, fils_nonces, req->fils_nonces ? 2 * FILS_NONCE_LEN : 0) __field(u16, ext_mld_capa_ops) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (req->bss) MAC_ASSIGN(bssid, req->bss->bssid); else eth_zero_addr(__entry->bssid); MAC_ASSIGN(prev_bssid, req->prev_bssid); __entry->use_mfp = req->use_mfp; __entry->flags = req->flags; if (req->ie) memcpy(__get_dynamic_array(elements), req->ie, req->ie_len); memcpy(__entry->ht_capa, &req->ht_capa, sizeof(req->ht_capa)); memcpy(__entry->ht_capa_mask, &req->ht_capa_mask, sizeof(req->ht_capa_mask)); memcpy(__entry->vht_capa, &req->vht_capa, sizeof(req->vht_capa)); memcpy(__entry->vht_capa_mask, &req->vht_capa_mask, sizeof(req->vht_capa_mask)); if (req->fils_kek) memcpy(__get_dynamic_array(fils_kek), req->fils_kek, req->fils_kek_len); if (req->fils_nonces) memcpy(__get_dynamic_array(fils_nonces), req->fils_nonces, 2 * FILS_NONCE_LEN); __entry->ext_mld_capa_ops = req->ext_mld_capa_ops; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM" ", previous bssid: %pM, use mfp: %s, flags: 0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->prev_bssid, BOOL_TO_STR(__entry->use_mfp), __entry->flags) ); TRACE_EVENT(rdev_deauth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_deauth_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(u16, reason_code) __field(bool, local_state_change) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, req->bssid); __entry->reason_code = req->reason_code; __entry->local_state_change = req->local_state_change; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, reason: %u, local_state_change:%d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->reason_code, __entry->local_state_change) ); TRACE_EVENT(rdev_disassoc, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_disassoc_request *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __field(u16, reason_code) __field(bool, local_state_change) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, req->ap_addr); __entry->reason_code = req->reason_code; __entry->local_state_change = req->local_state_change; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM" ", reason: %u, local state change: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->reason_code, BOOL_TO_STR(__entry->local_state_change)) ); TRACE_EVENT(rdev_mgmt_tx_cancel_wait, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu ", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_set_power_mgmt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, bool enabled, int timeout), TP_ARGS(wiphy, netdev, enabled, timeout), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(bool, enabled) __field(int, timeout) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->enabled = enabled; __entry->timeout = timeout; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %senabled, timeout: %d ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->enabled ? "" : "not ", __entry->timeout) ); TRACE_EVENT(rdev_connect, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_connect_params *sme), TP_ARGS(wiphy, netdev, sme), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(enum nl80211_auth_type, auth_type) __field(bool, privacy) __field(u32, wpa_versions) __field(u32, flags) MAC_ENTRY(prev_bssid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, sme->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, sme->ssid, sme->ssid_len); __entry->auth_type = sme->auth_type; __entry->privacy = sme->privacy; __entry->wpa_versions = sme->crypto.wpa_versions; __entry->flags = sme->flags; MAC_ASSIGN(prev_bssid, sme->prev_bssid); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM" ", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, " "flags: 0x%x, previous bssid: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid, __entry->auth_type, BOOL_TO_STR(__entry->privacy), __entry->wpa_versions, __entry->flags, __entry->prev_bssid) ); TRACE_EVENT(rdev_update_connect_params, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_connect_params *sme, u32 changed), TP_ARGS(wiphy, netdev, sme, changed), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, changed) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->changed = changed; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->changed) ); TRACE_EVENT(rdev_set_cqm_rssi_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, s32 rssi_thold, u32 rssi_hyst), TP_ARGS(wiphy, netdev, rssi_thold, rssi_hyst), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(s32, rssi_thold) __field(u32, rssi_hyst) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rssi_thold = rssi_thold; __entry->rssi_hyst = rssi_hyst; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rssi_thold: %d, rssi_hyst: %u ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rssi_thold, __entry->rssi_hyst) ); TRACE_EVENT(rdev_set_cqm_rssi_range_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, s32 low, s32 high), TP_ARGS(wiphy, netdev, low, high), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(s32, rssi_low) __field(s32, rssi_high) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rssi_low = low; __entry->rssi_high = high; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", range: %d - %d ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rssi_low, __entry->rssi_high) ); TRACE_EVENT(rdev_set_cqm_txe_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u32 rate, u32 pkts, u32 intvl), TP_ARGS(wiphy, netdev, rate, pkts, intvl), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, rate) __field(u32, pkts) __field(u32, intvl) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->rate = rate; __entry->pkts = pkts; __entry->intvl = intvl; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", rate: %u, packets: %u, interval: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts, __entry->intvl) ); TRACE_EVENT(rdev_disconnect, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u16 reason_code), TP_ARGS(wiphy, netdev, reason_code), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->reason_code = reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", reason code: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->reason_code) ); TRACE_EVENT(rdev_join_ibss, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ibss_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(char, ssid, IEEE80211_MAX_SSID_LEN + 1) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, params->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, params->ssid, params->ssid_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM, ssid: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid) ); TRACE_EVENT(rdev_join_ocb, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const struct ocb_setup *setup), TP_ARGS(wiphy, netdev, setup), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); TRACE_EVENT(rdev_set_wiphy_params, TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 changed), TP_ARGS(wiphy, radio_idx, changed), TP_STRUCT__entry( WIPHY_ENTRY __field(int, radio_idx) __field(u32, changed) ), TP_fast_assign( WIPHY_ASSIGN; __entry->radio_idx = radio_idx; __entry->changed = changed; ), TP_printk(WIPHY_PR_FMT ", radio_idx: %d, changed: %u", WIPHY_PR_ARG, __entry->radio_idx, __entry->changed) ); DECLARE_EVENT_CLASS(wiphy_wdev_link_evt, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id), TP_ARGS(wiphy, wdev, link_id), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id) ); TRACE_EVENT(rdev_get_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, unsigned int link_id), TP_ARGS(wiphy, wdev, radio_idx, link_id), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(int, radio_idx) __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->radio_idx = radio_idx; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", radio_idx: %d, link_id: %u", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->radio_idx, __entry->link_id) ); TRACE_EVENT(rdev_set_tx_power, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, int radio_idx, enum nl80211_tx_power_setting type, int mbm), TP_ARGS(wiphy, wdev, radio_idx, type, mbm), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(int, radio_idx) __field(enum nl80211_tx_power_setting, type) __field(int, mbm) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->radio_idx = radio_idx; __entry->type = type; __entry->mbm = mbm; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", radio_idx: %d, type: %u, mbm: %d", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->radio_idx, __entry->type, __entry->mbm) ); TRACE_EVENT(rdev_return_int_int, TP_PROTO(struct wiphy *wiphy, int func_ret, int func_fill), TP_ARGS(wiphy, func_ret, func_fill), TP_STRUCT__entry( WIPHY_ENTRY __field(int, func_ret) __field(int, func_fill) ), TP_fast_assign( WIPHY_ASSIGN; __entry->func_ret = func_ret; __entry->func_fill = func_fill; ), TP_printk(WIPHY_PR_FMT ", function returns: %d, function filled: %d", WIPHY_PR_ARG, __entry->func_ret, __entry->func_fill) ); #ifdef CONFIG_NL80211_TESTMODE TRACE_EVENT(rdev_testmode_cmd, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(rdev_testmode_dump, TP_PROTO(struct wiphy *wiphy), TP_ARGS(wiphy), TP_STRUCT__entry( WIPHY_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; ), TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) ); #endif /* CONFIG_NL80211_TESTMODE */ TRACE_EVENT(rdev_set_bitrate_mask, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, unsigned int link_id, const u8 *peer, const struct cfg80211_bitrate_mask *mask), TP_ARGS(wiphy, netdev, link_id, peer, mask), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(unsigned int, link_id) MAC_ENTRY(peer) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->link_id = link_id; MAC_ASSIGN(peer, peer); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", link_id: %d, peer: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->link_id, __entry->peer) ); TRACE_EVENT(rdev_update_mgmt_frame_registrations, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd), TP_ARGS(wiphy, wdev, upd), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u16, global_stypes) __field(u16, interface_stypes) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->global_stypes = upd->global_stypes; __entry->interface_stypes = upd->interface_stypes; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", global: 0x%.2x, intf: 0x%.2x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->global_stypes, __entry->interface_stypes) ); TRACE_EVENT(rdev_return_int_tx_rx, TP_PROTO(struct wiphy *wiphy, int ret, u32 tx, u32 rx), TP_ARGS(wiphy, ret, tx, rx), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(u32, tx) __field(u32, rx) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->tx = tx; __entry->rx = rx; ), TP_printk(WIPHY_PR_FMT ", returned %d, tx: %u, rx: %u", WIPHY_PR_ARG, __entry->ret, __entry->tx, __entry->rx) ); TRACE_EVENT(rdev_return_void_tx_rx, TP_PROTO(struct wiphy *wiphy, u32 tx, u32 tx_max, u32 rx, u32 rx_max), TP_ARGS(wiphy, tx, tx_max, rx, rx_max), TP_STRUCT__entry( WIPHY_ENTRY __field(u32, tx) __field(u32, tx_max) __field(u32, rx) __field(u32, rx_max) ), TP_fast_assign( WIPHY_ASSIGN; __entry->tx = tx; __entry->tx_max = tx_max; __entry->rx = rx; __entry->rx_max = rx_max; ), TP_printk(WIPHY_PR_FMT ", tx: %u, tx_max: %u, rx: %u, rx_max: %u ", WIPHY_PR_ARG, __entry->tx, __entry->tx_max, __entry->rx, __entry->rx_max) ); TRACE_EVENT(rdev_set_antenna, TP_PROTO(struct wiphy *wiphy, int radio_idx, u32 tx, u32 rx), TP_ARGS(wiphy, radio_idx, tx, rx), TP_STRUCT__entry( WIPHY_ENTRY __field(int, radio_idx) __field(u32, tx) __field(u32, rx) ), TP_fast_assign( WIPHY_ASSIGN; __entry->radio_idx = radio_idx; __entry->tx = tx; __entry->rx = rx; ), TP_printk(WIPHY_PR_FMT ", radio_idx: %d, tx: %u, rx: %u ", WIPHY_PR_ARG, __entry->radio_idx, __entry->tx, __entry->rx) ); DECLARE_EVENT_CLASS(wiphy_netdev_id_evt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->id = id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", id: %llu", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->id) ); DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_start, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id) ); DEFINE_EVENT(wiphy_netdev_id_evt, rdev_sched_scan_stop, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u64 id), TP_ARGS(wiphy, netdev, id) ); TRACE_EVENT(rdev_tdls_mgmt, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *peer, int link_id, u8 action_code, u8 dialog_token, u16 status_code, u32 peer_capability, bool initiator, const u8 *buf, size_t len), TP_ARGS(wiphy, netdev, peer, link_id, action_code, dialog_token, status_code, peer_capability, initiator, buf, len), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(int, link_id) __field(u8, action_code) __field(u8, dialog_token) __field(u16, status_code) __field(u32, peer_capability) __field(bool, initiator) __dynamic_array(u8, buf, len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->link_id = link_id; __entry->action_code = action_code; __entry->dialog_token = dialog_token; __entry->status_code = status_code; __entry->peer_capability = peer_capability; __entry->initiator = initiator; memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM" ", link_id: %d, action_code: %u " "dialog_token: %u, status_code: %u, peer_capability: %u " "initiator: %s buf: %#.2x ", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->link_id, __entry->action_code, __entry->dialog_token, __entry->status_code, __entry->peer_capability, BOOL_TO_STR(__entry->initiator), ((u8 *)__get_dynamic_array(buf))[0]) ); TRACE_EVENT(rdev_dump_survey, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx), TP_ARGS(wiphy, netdev, _idx), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(int, idx) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->idx = _idx; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx) ); TRACE_EVENT(rdev_return_int_survey_info, TP_PROTO(struct wiphy *wiphy, int ret, struct survey_info *info), TP_ARGS(wiphy, ret, info), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY __field(int, ret) __field(u64, time) __field(u64, time_busy) __field(u64, time_ext_busy) __field(u64, time_rx) __field(u64, time_tx) __field(u64, time_scan) __field(u32, filled) __field(s8, noise) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(info->channel); __entry->ret = ret; __entry->time = info->time; __entry->time_busy = info->time_busy; __entry->time_ext_busy = info->time_ext_busy; __entry->time_rx = info->time_rx; __entry->time_tx = info->time_tx; __entry->time_scan = info->time_scan; __entry->filled = info->filled; __entry->noise = info->noise; ), TP_printk(WIPHY_PR_FMT ", returned: %d, " CHAN_PR_FMT ", channel time: %llu, channel time busy: %llu, " "channel time extension busy: %llu, channel time rx: %llu, " "channel time tx: %llu, scan time: %llu, filled: %u, noise: %d", WIPHY_PR_ARG, __entry->ret, CHAN_PR_ARG, __entry->time, __entry->time_busy, __entry->time_ext_busy, __entry->time_rx, __entry->time_tx, __entry->time_scan, __entry->filled, __entry->noise) ); TRACE_EVENT(rdev_tdls_oper, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 *peer, enum nl80211_tdls_operation oper), TP_ARGS(wiphy, netdev, peer, oper), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(enum nl80211_tdls_operation, oper) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->oper = oper; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, oper: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->oper) ); DECLARE_EVENT_CLASS(rdev_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, pmksa->bssid); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid) ); TRACE_EVENT(rdev_probe_client, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer), TP_ARGS(wiphy, netdev, peer), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer) ); DEFINE_EVENT(rdev_pmksa, rdev_set_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa) ); DEFINE_EVENT(rdev_pmksa, rdev_del_pmksa, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa), TP_ARGS(wiphy, netdev, pmksa) ); TRACE_EVENT(rdev_remain_on_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration), TP_ARGS(wiphy, wdev, chan, duration), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY CHAN_ENTRY __field(unsigned int, duration) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; CHAN_ASSIGN(chan); __entry->duration = duration; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", duration: %u", WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, __entry->duration) ); TRACE_EVENT(rdev_return_int_cookie, TP_PROTO(struct wiphy *wiphy, int ret, u64 cookie), TP_ARGS(wiphy, ret, cookie), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; __entry->ret = ret; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", returned %d, cookie: %llu", WIPHY_PR_ARG, __entry->ret, __entry->cookie) ); TRACE_EVENT(rdev_cancel_remain_on_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_mgmt_tx, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params), TP_ARGS(wiphy, wdev, params), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY CHAN_ENTRY __field(bool, offchan) __field(unsigned int, wait) __field(bool, no_cck) __field(bool, dont_wait_for_ack) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; CHAN_ASSIGN(params->chan); __entry->offchan = params->offchan; __entry->wait = params->wait; __entry->no_cck = params->no_cck; __entry->dont_wait_for_ack = params->dont_wait_for_ack; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", " CHAN_PR_FMT ", offchan: %s," " wait: %u, no cck: %s, dont wait for ack: %s", WIPHY_PR_ARG, WDEV_PR_ARG, CHAN_PR_ARG, BOOL_TO_STR(__entry->offchan), __entry->wait, BOOL_TO_STR(__entry->no_cck), BOOL_TO_STR(__entry->dont_wait_for_ack)) ); TRACE_EVENT(rdev_tx_control_port, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *buf, size_t len, const u8 *dest, __be16 proto, bool unencrypted, int link_id), TP_ARGS(wiphy, netdev, buf, len, dest, proto, unencrypted, link_id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dest) __field(__be16, proto) __field(bool, unencrypted) __field(int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dest, dest); __entry->proto = proto; __entry->unencrypted = unencrypted; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM," " proto: 0x%x, unencrypted: %s, link: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dest, be16_to_cpu(__entry->proto), BOOL_TO_STR(__entry->unencrypted), __entry->link_id) ); TRACE_EVENT(rdev_set_noack_map, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u16 noack_map), TP_ARGS(wiphy, netdev, noack_map), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, noack_map) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->noack_map = noack_map; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map) ); DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_channel, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id), TP_ARGS(wiphy, wdev, link_id) ); TRACE_EVENT(rdev_return_chandef, TP_PROTO(struct wiphy *wiphy, int ret, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, ret, chandef), TP_STRUCT__entry( WIPHY_ENTRY __field(int, ret) CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; if (ret == 0) CHAN_DEF_ASSIGN(chandef); else CHAN_DEF_ASSIGN((struct cfg80211_chan_def *)NULL); __entry->ret = ret; ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", ret: %d", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->ret) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_start_p2p_device, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_p2p_device, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_start_nan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf), TP_ARGS(wiphy, wdev, conf), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) __field(u8, bands) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", master preference: %u, bands: 0x%0x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, __entry->bands) ); TRACE_EVENT(rdev_nan_change_conf, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_nan_conf *conf, u32 changes), TP_ARGS(wiphy, wdev, conf, changes), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, master_pref) __field(u8, bands) __field(u32, changes) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->master_pref = conf->master_pref; __entry->bands = conf->bands; __entry->changes = changes; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", master preference: %u, bands: 0x%0x, changes: %x", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref, __entry->bands, __entry->changes) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_nan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_add_nan_func, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, const struct cfg80211_nan_func *func), TP_ARGS(wiphy, wdev, func), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u8, func_type) __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->func_type = func->type; __entry->cookie = func->cookie ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type=%u, cookie=%llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->func_type, __entry->cookie) ); TRACE_EVENT(rdev_del_nan_func, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie=%llu", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); TRACE_EVENT(rdev_set_mac_acl, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_acl_data *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u32, acl_policy) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->acl_policy = params->acl_policy; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy) ); TRACE_EVENT(rdev_update_ft_ies, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_update_ft_ies_params *ftie), TP_ARGS(wiphy, netdev, ftie), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, md) __dynamic_array(u8, ie, ftie->ie_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->md = ftie->md; memcpy(__get_dynamic_array(ie), ftie->ie, ftie->ie_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", md: 0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md) ); TRACE_EVENT(rdev_crit_proto_start, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id protocol, u16 duration), TP_ARGS(wiphy, wdev, protocol, duration), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u16, proto) __field(u16, duration) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->proto = protocol; __entry->duration = duration; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", proto=%x, duration=%u", WIPHY_PR_ARG, WDEV_PR_ARG, __entry->proto, __entry->duration) ); TRACE_EVENT(rdev_crit_proto_stop, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(rdev_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_csa_settings *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(bool, radar_required) __field(bool, block_tx) __field(u8, count) __dynamic_array(u16, bcn_ofs, params->n_counter_offsets_beacon) __dynamic_array(u16, pres_ofs, params->n_counter_offsets_presp) __field(u8, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(&params->chandef); __entry->radar_required = params->radar_required; __entry->block_tx = params->block_tx; __entry->count = params->count; memcpy(__get_dynamic_array(bcn_ofs), params->counter_offsets_beacon, params->n_counter_offsets_beacon * sizeof(u16)); /* probe response offsets are optional */ if (params->n_counter_offsets_presp) memcpy(__get_dynamic_array(pres_ofs), params->counter_offsets_presp, params->n_counter_offsets_presp * sizeof(u16)); __entry->link_id = params->link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", block_tx: %d, count: %u, radar_required: %d, link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->block_tx, __entry->count, __entry->radar_required, __entry->link_id) ); TRACE_EVENT(rdev_set_qos_map, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_qos_map *qos_map), TP_ARGS(wiphy, netdev, qos_map), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY QOS_MAP_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; QOS_MAP_ASSIGN(qos_map); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", num_des: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->num_des) ); TRACE_EVENT(rdev_set_ap_chanwidth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, unsigned int link_id, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, netdev, link_id, chandef), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d", WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id) ); TRACE_EVENT(rdev_add_tx_ts, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time), TP_ARGS(wiphy, netdev, tsid, peer, user_prio, admitted_time), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, tsid) __field(u8, user_prio) __field(u16, admitted_time) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->tsid = tsid; __entry->user_prio = user_prio; __entry->admitted_time = admitted_time; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, TSID %d, UP %d, time %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->tsid, __entry->user_prio, __entry->admitted_time) ); TRACE_EVENT(rdev_del_tx_ts, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, u8 tsid, const u8 *peer), TP_ARGS(wiphy, netdev, tsid, peer), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, tsid) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->tsid = tsid; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM, TSID %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->tsid) ); TRACE_EVENT(rdev_tdls_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *addr, u8 oper_class, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, netdev, addr, oper_class, chandef), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(addr) __field(u8, oper_class) CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); CHAN_DEF_ASSIGN(chandef); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM" " oper class %d, " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->addr, __entry->oper_class, CHAN_DEF_PR_ARG) ); TRACE_EVENT(rdev_tdls_cancel_channel_switch, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *addr), TP_ARGS(wiphy, netdev, addr), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(addr) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->addr) ); TRACE_EVENT(rdev_set_pmk, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmk_conf *pmk_conf), TP_ARGS(wiphy, netdev, pmk_conf), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(aa) __field(u8, pmk_len) __field(u8, pmk_r0_name_len) __dynamic_array(u8, pmk, pmk_conf->pmk_len) __dynamic_array(u8, pmk_r0_name, WLAN_PMK_NAME_LEN) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(aa, pmk_conf->aa); __entry->pmk_len = pmk_conf->pmk_len; __entry->pmk_r0_name_len = pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0; memcpy(__get_dynamic_array(pmk), pmk_conf->pmk, pmk_conf->pmk_len); memcpy(__get_dynamic_array(pmk_r0_name), pmk_conf->pmk_r0_name, pmk_conf->pmk_r0_name ? WLAN_PMK_NAME_LEN : 0); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM" "pmk_len=%u, pmk: %s pmk_r0_name: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->aa, __entry->pmk_len, __print_array(__get_dynamic_array(pmk), __get_dynamic_array_len(pmk), 1), __entry->pmk_r0_name_len ? __print_array(__get_dynamic_array(pmk_r0_name), __get_dynamic_array_len(pmk_r0_name), 1) : "") ); TRACE_EVENT(rdev_del_pmk, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *aa), TP_ARGS(wiphy, netdev, aa), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(aa) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(aa, aa); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->aa) ); TRACE_EVENT(rdev_external_auth, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_external_auth_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry(WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(bssid) __array(u8, ssid, IEEE80211_MAX_SSID_LEN + 1) __field(u16, status) MAC_ENTRY(mld_addr) ), TP_fast_assign(WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(bssid, params->bssid); memset(__entry->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); memcpy(__entry->ssid, params->ssid.ssid, params->ssid.ssid_len); __entry->status = params->status; MAC_ASSIGN(mld_addr, params->mld_addr); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM" ", ssid: %s, status: %u, mld_addr: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->bssid, __entry->ssid, __entry->status, __entry->mld_addr) ); TRACE_EVENT(rdev_start_radar_detection, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_chan_def *chandef, u32 cac_time_ms, int link_id), TP_ARGS(wiphy, netdev, chandef, cac_time_ms, link_id), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY CHAN_DEF_ENTRY __field(u32, cac_time_ms) __field(int, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->cac_time_ms = cac_time_ms; __entry->link_id = link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", cac_time_ms=%u, link_id=%d", WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->cac_time_ms, __entry->link_id) ); TRACE_EVENT(rdev_set_mcast_rate, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int *mcast_rate), TP_ARGS(wiphy, netdev, mcast_rate), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __array(int, mcast_rate, NUM_NL80211_BANDS) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memcpy(__entry->mcast_rate, mcast_rate, sizeof(int) * NUM_NL80211_BANDS); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " "mcast_rates [2.4GHz=0x%x, 5.2GHz=0x%x, 6GHz=0x%x, 60GHz=0x%x]", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mcast_rate[NL80211_BAND_2GHZ], __entry->mcast_rate[NL80211_BAND_5GHZ], __entry->mcast_rate[NL80211_BAND_6GHZ], __entry->mcast_rate[NL80211_BAND_60GHZ]) ); TRACE_EVENT(rdev_set_coalesce, TP_PROTO(struct wiphy *wiphy, struct cfg80211_coalesce *coalesce), TP_ARGS(wiphy, coalesce), TP_STRUCT__entry( WIPHY_ENTRY __field(int, n_rules) ), TP_fast_assign( WIPHY_ASSIGN; __entry->n_rules = coalesce ? coalesce->n_rules : 0; ), TP_printk(WIPHY_PR_FMT ", n_rules=%d", WIPHY_PR_ARG, __entry->n_rules) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_set_multicast_to_unicast, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const bool enabled), TP_ARGS(wiphy, netdev, enabled), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(bool, enabled) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->enabled = enabled; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", unicast: %s", WIPHY_PR_ARG, NETDEV_PR_ARG, BOOL_TO_STR(__entry->enabled)) ); DEFINE_EVENT(wiphy_wdev_evt, rdev_get_txq_stats, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev) ); TRACE_EVENT(rdev_get_ftm_responder_stats, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ftm_responder_stats *ftm_stats), TP_ARGS(wiphy, netdev, ftm_stats), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u64, timestamp) __field(u32, success_num) __field(u32, partial_num) __field(u32, failed_num) __field(u32, asap_num) __field(u32, non_asap_num) __field(u64, duration) __field(u32, unknown_triggers) __field(u32, reschedule) __field(u32, out_of_window) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->success_num = ftm_stats->success_num; __entry->partial_num = ftm_stats->partial_num; __entry->failed_num = ftm_stats->failed_num; __entry->asap_num = ftm_stats->asap_num; __entry->non_asap_num = ftm_stats->non_asap_num; __entry->duration = ftm_stats->total_duration_ms; __entry->unknown_triggers = ftm_stats->unknown_triggers_num; __entry->reschedule = ftm_stats->reschedule_requests_num; __entry->out_of_window = ftm_stats->out_of_window_triggers_num; ), TP_printk(WIPHY_PR_FMT "Ftm responder stats: success %u, partial %u, " "failed %u, asap %u, non asap %u, total duration %llu, unknown " "triggers %u, rescheduled %u, out of window %u", WIPHY_PR_ARG, __entry->success_num, __entry->partial_num, __entry->failed_num, __entry->asap_num, __entry->non_asap_num, __entry->duration, __entry->unknown_triggers, __entry->reschedule, __entry->out_of_window) ); DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_start_pmsr, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie) ); DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_abort_pmsr, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie) ); TRACE_EVENT(rdev_set_fils_aad, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_fils_aad *fils_aad), TP_ARGS(wiphy, netdev, fils_aad), TP_STRUCT__entry(WIPHY_ENTRY NETDEV_ENTRY __array(u8, macaddr, ETH_ALEN) __field(u8, kek_len) ), TP_fast_assign(WIPHY_ASSIGN; NETDEV_ASSIGN; FILS_AAD_ASSIGN(fils_aad); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " FILS_AAD_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, __entry->kek_len) ); TRACE_EVENT(rdev_update_owe_info, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_update_owe_info *owe_info), TP_ARGS(wiphy, netdev, owe_info), TP_STRUCT__entry(WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u16, status) __dynamic_array(u8, ie, owe_info->ie_len)), TP_fast_assign(WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, owe_info->peer); __entry->status = owe_info->status; memcpy(__get_dynamic_array(ie), owe_info->ie, owe_info->ie_len);), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM" " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->status) ); TRACE_EVENT(rdev_probe_mesh_link, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *dest, const u8 *buf, size_t len), TP_ARGS(wiphy, netdev, dest, buf, len), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(dest) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(dest, dest); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->dest) ); TRACE_EVENT(rdev_set_tid_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_tid_config *tid_conf), TP_ARGS(wiphy, netdev, tid_conf), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, tid_conf->peer); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer) ); TRACE_EVENT(rdev_reset_tid_config, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer, u8 tids), TP_ARGS(wiphy, netdev, peer, tids), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(u8, tids) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->tids = tids; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM, tids: 0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->tids) ); TRACE_EVENT(rdev_set_sar_specs, TP_PROTO(struct wiphy *wiphy, struct cfg80211_sar_specs *sar), TP_ARGS(wiphy, sar), TP_STRUCT__entry( WIPHY_ENTRY __field(u16, type) __field(u16, num) ), TP_fast_assign( WIPHY_ASSIGN; __entry->type = sar->type; __entry->num = sar->num_sub_specs; ), TP_printk(WIPHY_PR_FMT ", Set type:%d, num_specs:%d", WIPHY_PR_ARG, __entry->type, __entry->num) ); TRACE_EVENT(rdev_color_change, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_color_change_settings *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u8, count) __field(u16, bcn_ofs) __field(u16, pres_ofs) __field(u8, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->count = params->count; __entry->bcn_ofs = params->counter_offset_beacon; __entry->pres_ofs = params->counter_offset_presp; __entry->link_id = params->link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", count: %u, link_id: %d", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->count, __entry->link_id) ); TRACE_EVENT(rdev_set_radar_background, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef), TP_ARGS(wiphy, chandef), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef) ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT, WIPHY_PR_ARG, CHAN_DEF_PR_ARG) ); DEFINE_EVENT(wiphy_wdev_link_evt, rdev_add_intf_link, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id), TP_ARGS(wiphy, wdev, link_id) ); DEFINE_EVENT(wiphy_wdev_link_evt, rdev_del_intf_link, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, unsigned int link_id), TP_ARGS(wiphy, wdev, link_id) ); TRACE_EVENT(rdev_del_link_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct link_station_del_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __array(u8, mld_mac, 6) __field(u32, link_id) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memset(__entry->mld_mac, 0, 6); if (params->mld_mac) memcpy(__entry->mld_mac, params->mld_mac, 6); __entry->link_id = params->link_id; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" ", link id: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac, __entry->link_id) ); TRACE_EVENT(rdev_set_hw_timestamp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_set_hw_timestamp *hwts), TP_ARGS(wiphy, netdev, hwts), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(macaddr) __field(bool, enable) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(macaddr, hwts->macaddr); __entry->enable = hwts->enable; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, __entry->enable) ); TRACE_EVENT(rdev_set_ttlm, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ttlm_params *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __array(u8, dlink, sizeof(u16) * 8) __array(u8, ulink, sizeof(u16) * 8) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memcpy(__entry->dlink, params->dlink, sizeof(params->dlink)); memcpy(__entry->ulink, params->ulink, sizeof(params->ulink)); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, WIPHY_PR_ARG, NETDEV_PR_ARG) ); TRACE_EVENT(rdev_set_epcs, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, bool val), TP_ARGS(wiphy, netdev, val), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(bool, val) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; __entry->val = val; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", config=%u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->val) ); /************************************************************* * cfg80211 exported functions traces * *************************************************************/ TRACE_EVENT(cfg80211_return_bool, TP_PROTO(bool ret), TP_ARGS(ret), TP_STRUCT__entry( __field(bool, ret) ), TP_fast_assign( __entry->ret = ret; ), TP_printk("returned %s", BOOL_TO_STR(__entry->ret)) ); DECLARE_EVENT_CLASS(cfg80211_netdev_mac_evt, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(macaddr) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(macaddr, macaddr); ), TP_printk(NETDEV_PR_FMT ", mac: %pM", NETDEV_PR_ARG, __entry->macaddr) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_notify_new_peer_candidate, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); TRACE_EVENT(cfg80211_send_rx_assoc, TP_PROTO(struct net_device *netdev, const struct cfg80211_rx_assoc_resp_data *data), TP_ARGS(netdev, data), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(ap_addr) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(ap_addr, data->ap_mld_addr ?: data->links[0].bss->bssid); ), TP_printk(NETDEV_PR_FMT ", %pM", NETDEV_PR_ARG, __entry->ap_addr) ); DECLARE_EVENT_CLASS(netdev_frame_event, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len), TP_STRUCT__entry( NETDEV_ENTRY __dynamic_array(u8, frame, len) ), TP_fast_assign( NETDEV_ASSIGN; memcpy(__get_dynamic_array(frame), buf, len); ), TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x", NETDEV_PR_ARG, le16_to_cpup((__le16 *)__get_dynamic_array(frame))) ); DEFINE_EVENT(netdev_frame_event, cfg80211_rx_unprot_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len) ); DEFINE_EVENT(netdev_frame_event, cfg80211_rx_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len), TP_ARGS(netdev, buf, len) ); TRACE_EVENT(cfg80211_tx_mlme_mgmt, TP_PROTO(struct net_device *netdev, const u8 *buf, int len, bool reconnect), TP_ARGS(netdev, buf, len, reconnect), TP_STRUCT__entry( NETDEV_ENTRY __dynamic_array(u8, frame, len) __field(int, reconnect) ), TP_fast_assign( NETDEV_ASSIGN; memcpy(__get_dynamic_array(frame), buf, len); __entry->reconnect = reconnect; ), TP_printk(NETDEV_PR_FMT ", ftype:0x%.2x reconnect:%d", NETDEV_PR_ARG, le16_to_cpup((__le16 *)__get_dynamic_array(frame)), __entry->reconnect) ); DECLARE_EVENT_CLASS(netdev_mac_evt, TP_PROTO(struct net_device *netdev, const u8 *mac), TP_ARGS(netdev, mac), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(mac) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(mac, mac) ), TP_printk(NETDEV_PR_FMT ", mac: %pM", NETDEV_PR_ARG, __entry->mac) ); DEFINE_EVENT(netdev_mac_evt, cfg80211_send_auth_timeout, TP_PROTO(struct net_device *netdev, const u8 *mac), TP_ARGS(netdev, mac) ); TRACE_EVENT(cfg80211_send_assoc_failure, TP_PROTO(struct net_device *netdev, struct cfg80211_assoc_failure *data), TP_ARGS(netdev, data), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(ap_addr) __field(bool, timeout) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(ap_addr, data->ap_mld_addr ?: data->bss[0]->bssid); __entry->timeout = data->timeout; ), TP_printk(NETDEV_PR_FMT ", mac: %pM, timeout: %d", NETDEV_PR_ARG, __entry->ap_addr, __entry->timeout) ); TRACE_EVENT(cfg80211_michael_mic_failure, TP_PROTO(struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc), TP_ARGS(netdev, addr, key_type, key_id, tsc), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) __field(enum nl80211_key_type, key_type) __field(int, key_id) __array(u8, tsc, 6) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); __entry->key_type = key_type; __entry->key_id = key_id; if (tsc) memcpy(__entry->tsc, tsc, 6); ), TP_printk(NETDEV_PR_FMT ", %pM, key type: %d, key id: %d, tsc: %pm", NETDEV_PR_ARG, __entry->addr, __entry->key_type, __entry->key_id, __entry->tsc) ); TRACE_EVENT(cfg80211_ready_on_channel, TP_PROTO(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan, unsigned int duration), TP_ARGS(wdev, cookie, chan, duration), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) CHAN_ENTRY __field(unsigned int, duration) ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; CHAN_ASSIGN(chan); __entry->duration = duration; ), TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT ", duration: %u", WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG, __entry->duration) ); TRACE_EVENT(cfg80211_ready_on_channel_expired, TP_PROTO(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan), TP_ARGS(wdev, cookie, chan), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) CHAN_ENTRY ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; CHAN_ASSIGN(chan); ), TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT, WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_tx_mgmt_expired, TP_PROTO(struct wireless_dev *wdev, u64 cookie, struct ieee80211_channel *chan), TP_ARGS(wdev, cookie, chan), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) CHAN_ENTRY ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; CHAN_ASSIGN(chan); ), TP_printk(WDEV_PR_FMT ", cookie: %llu, " CHAN_PR_FMT, WDEV_PR_ARG, __entry->cookie, CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_new_sta, TP_PROTO(struct net_device *netdev, const u8 *mac_addr, struct station_info *sinfo), TP_ARGS(netdev, mac_addr, sinfo), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(mac_addr) SINFO_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(mac_addr, mac_addr); SINFO_ASSIGN; ), TP_printk(NETDEV_PR_FMT ", %pM", NETDEV_PR_ARG, __entry->mac_addr) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); TRACE_EVENT(cfg80211_rx_mgmt, TP_PROTO(struct wireless_dev *wdev, struct cfg80211_rx_info *info), TP_ARGS(wdev, info), TP_STRUCT__entry( WDEV_ENTRY __field(int, freq) __field(int, sig_dbm) ), TP_fast_assign( WDEV_ASSIGN; __entry->freq = info->freq; __entry->sig_dbm = info->sig_dbm; ), TP_printk(WDEV_PR_FMT ", freq: "KHZ_F", sig dbm: %d", WDEV_PR_ARG, PR_KHZ(__entry->freq), __entry->sig_dbm) ); TRACE_EVENT(cfg80211_mgmt_tx_status, TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack), TP_ARGS(wdev, cookie, ack), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) __field(bool, ack) ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; __entry->ack = ack; ), TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s", WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack)) ); TRACE_EVENT(cfg80211_control_port_tx_status, TP_PROTO(struct wireless_dev *wdev, u64 cookie, bool ack), TP_ARGS(wdev, cookie, ack), TP_STRUCT__entry( WDEV_ENTRY __field(u64, cookie) __field(bool, ack) ), TP_fast_assign( WDEV_ASSIGN; __entry->cookie = cookie; __entry->ack = ack; ), TP_printk(WDEV_PR_FMT", cookie: %llu, ack: %s", WDEV_PR_ARG, __entry->cookie, BOOL_TO_STR(__entry->ack)) ); TRACE_EVENT(cfg80211_rx_control_port, TP_PROTO(struct net_device *netdev, struct sk_buff *skb, bool unencrypted, int link_id), TP_ARGS(netdev, skb, unencrypted, link_id), TP_STRUCT__entry( NETDEV_ENTRY __field(int, len) MAC_ENTRY(from) __field(u16, proto) __field(bool, unencrypted) __field(int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; __entry->len = skb->len; MAC_ASSIGN(from, eth_hdr(skb)->h_source); __entry->proto = be16_to_cpu(skb->protocol); __entry->unencrypted = unencrypted; __entry->link_id = link_id; ), TP_printk(NETDEV_PR_FMT ", len=%d, %pM, proto: 0x%x, unencrypted: %s, link: %d", NETDEV_PR_ARG, __entry->len, __entry->from, __entry->proto, BOOL_TO_STR(__entry->unencrypted), __entry->link_id) ); TRACE_EVENT(cfg80211_cqm_rssi_notify, TP_PROTO(struct net_device *netdev, enum nl80211_cqm_rssi_threshold_event rssi_event, s32 rssi_level), TP_ARGS(netdev, rssi_event, rssi_level), TP_STRUCT__entry( NETDEV_ENTRY __field(enum nl80211_cqm_rssi_threshold_event, rssi_event) __field(s32, rssi_level) ), TP_fast_assign( NETDEV_ASSIGN; __entry->rssi_event = rssi_event; __entry->rssi_level = rssi_level; ), TP_printk(NETDEV_PR_FMT ", rssi event: %d, level: %d", NETDEV_PR_ARG, __entry->rssi_event, __entry->rssi_level) ); TRACE_EVENT(cfg80211_reg_can_beacon, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype, u32 prohibited_flags, u32 permitting_flags), TP_ARGS(wiphy, chandef, iftype, prohibited_flags, permitting_flags), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY __field(enum nl80211_iftype, iftype) __field(u32, prohibited_flags) __field(u32, permitting_flags) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->iftype = iftype; __entry->prohibited_flags = prohibited_flags; __entry->permitting_flags = permitting_flags; ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d prohibited_flags=0x%x permitting_flags=0x%x", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, __entry->prohibited_flags, __entry->permitting_flags) ); TRACE_EVENT(cfg80211_ch_switch_notify, TP_PROTO(struct net_device *netdev, struct cfg80211_chan_def *chandef, unsigned int link_id), TP_ARGS(netdev, chandef, link_id), TP_STRUCT__entry( NETDEV_ENTRY CHAN_DEF_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->link_id = link_id; ), TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d", NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id) ); TRACE_EVENT(cfg80211_ch_switch_started_notify, TP_PROTO(struct net_device *netdev, struct cfg80211_chan_def *chandef, unsigned int link_id), TP_ARGS(netdev, chandef, link_id), TP_STRUCT__entry( NETDEV_ENTRY CHAN_DEF_ENTRY __field(unsigned int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->link_id = link_id; ), TP_printk(NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT ", link:%d", NETDEV_PR_ARG, CHAN_DEF_PR_ARG, __entry->link_id) ); TRACE_EVENT(cfg80211_radar_event, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, bool offchan), TP_ARGS(wiphy, chandef, offchan), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY __field(bool, offchan) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->offchan = offchan; ), TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", offchan %d", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->offchan) ); TRACE_EVENT(cfg80211_cac_event, TP_PROTO(struct net_device *netdev, enum nl80211_radar_event evt, unsigned int link_id), TP_ARGS(netdev, evt, link_id), TP_STRUCT__entry( NETDEV_ENTRY __field(enum nl80211_radar_event, evt) __field(unsigned int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; __entry->evt = evt; __entry->link_id = link_id; ), TP_printk(NETDEV_PR_FMT ", event: %d, link_id=%u", NETDEV_PR_ARG, __entry->evt, __entry->link_id) ); DECLARE_EVENT_CLASS(cfg80211_rx_evt, TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id), TP_ARGS(netdev, addr, link_id), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) __field(int, link_id) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); __entry->link_id = link_id; ), TP_printk(NETDEV_PR_FMT ", %pM, link_id:%d", NETDEV_PR_ARG, __entry->addr, __entry->link_id) ); DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame, TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id), TP_ARGS(netdev, addr, link_id) ); DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame, TP_PROTO(struct net_device *netdev, const u8 *addr, int link_id), TP_ARGS(netdev, addr, link_id) ); TRACE_EVENT(cfg80211_ibss_joined, TP_PROTO(struct net_device *netdev, const u8 *bssid, struct ieee80211_channel *channel), TP_ARGS(netdev, bssid, channel), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(bssid, bssid); CHAN_ASSIGN(channel); ), TP_printk(NETDEV_PR_FMT ", bssid: %pM, " CHAN_PR_FMT, NETDEV_PR_ARG, __entry->bssid, CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_probe_status, TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie, bool acked), TP_ARGS(netdev, addr, cookie, acked), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(addr) __field(u64, cookie) __field(bool, acked) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(addr, addr); __entry->cookie = cookie; __entry->acked = acked; ), TP_printk(NETDEV_PR_FMT " addr:%pM, cookie: %llu, acked: %s", NETDEV_PR_ARG, __entry->addr, __entry->cookie, BOOL_TO_STR(__entry->acked)) ); TRACE_EVENT(cfg80211_cqm_pktloss_notify, TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets), TP_ARGS(netdev, peer, num_packets), TP_STRUCT__entry( NETDEV_ENTRY MAC_ENTRY(peer) __field(u32, num_packets) ), TP_fast_assign( NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->num_packets = num_packets; ), TP_printk(NETDEV_PR_FMT ", peer: %pM, num of lost packets: %u", NETDEV_PR_ARG, __entry->peer, __entry->num_packets) ); DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_gtk_rekey_notify, TP_PROTO(struct net_device *netdev, const u8 *macaddr), TP_ARGS(netdev, macaddr) ); TRACE_EVENT(cfg80211_pmksa_candidate_notify, TP_PROTO(struct net_device *netdev, int index, const u8 *bssid, bool preauth), TP_ARGS(netdev, index, bssid, preauth), TP_STRUCT__entry( NETDEV_ENTRY __field(int, index) MAC_ENTRY(bssid) __field(bool, preauth) ), TP_fast_assign( NETDEV_ASSIGN; __entry->index = index; MAC_ASSIGN(bssid, bssid); __entry->preauth = preauth; ), TP_printk(NETDEV_PR_FMT ", index:%d, bssid: %pM, pre auth: %s", NETDEV_PR_ARG, __entry->index, __entry->bssid, BOOL_TO_STR(__entry->preauth)) ); TRACE_EVENT(cfg80211_report_obss_beacon, TP_PROTO(struct wiphy *wiphy, const u8 *frame, size_t len, int freq, int sig_dbm), TP_ARGS(wiphy, frame, len, freq, sig_dbm), TP_STRUCT__entry( WIPHY_ENTRY __field(int, freq) __field(int, sig_dbm) ), TP_fast_assign( WIPHY_ASSIGN; __entry->freq = freq; __entry->sig_dbm = sig_dbm; ), TP_printk(WIPHY_PR_FMT ", freq: "KHZ_F", sig_dbm: %d", WIPHY_PR_ARG, PR_KHZ(__entry->freq), __entry->sig_dbm) ); TRACE_EVENT(cfg80211_tdls_oper_request, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer, enum nl80211_tdls_operation oper, u16 reason_code), TP_ARGS(wiphy, netdev, peer, oper, reason_code), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __field(enum nl80211_tdls_operation, oper) __field(u16, reason_code) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, peer); __entry->oper = oper; __entry->reason_code = reason_code; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM, oper: %d, reason_code %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->oper, __entry->reason_code) ); TRACE_EVENT(cfg80211_scan_done, TP_PROTO(struct cfg80211_scan_request_int *request, struct cfg80211_scan_info *info), TP_ARGS(request, info), TP_STRUCT__entry( __field(u32, n_channels) __dynamic_array(u8, ie, request ? request->req.ie_len : 0) __array(u32, rates, NUM_NL80211_BANDS) __field(u32, wdev_id) MAC_ENTRY(wiphy_mac) __field(bool, no_cck) __field(bool, aborted) __field(u64, scan_start_tsf) MAC_ENTRY(tsf_bssid) ), TP_fast_assign( if (request) { memcpy(__get_dynamic_array(ie), request->req.ie, request->req.ie_len); memcpy(__entry->rates, request->req.rates, NUM_NL80211_BANDS); __entry->wdev_id = request->req.wdev ? request->req.wdev->identifier : 0; if (request->req.wiphy) MAC_ASSIGN(wiphy_mac, request->req.wiphy->perm_addr); __entry->no_cck = request->req.no_cck; } if (info) { __entry->aborted = info->aborted; __entry->scan_start_tsf = info->scan_start_tsf; MAC_ASSIGN(tsf_bssid, info->tsf_bssid); } ), TP_printk("aborted: %s, scan start (TSF): %llu, tsf_bssid: %pM", BOOL_TO_STR(__entry->aborted), (unsigned long long)__entry->scan_start_tsf, __entry->tsf_bssid) ); DECLARE_EVENT_CLASS(wiphy_id_evt, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id), TP_STRUCT__entry( WIPHY_ENTRY __field(u64, id) ), TP_fast_assign( WIPHY_ASSIGN; __entry->id = id; ), TP_printk(WIPHY_PR_FMT ", id: %llu", WIPHY_PR_ARG, __entry->id) ); DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_stopped, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id) ); DEFINE_EVENT(wiphy_id_evt, cfg80211_sched_scan_results, TP_PROTO(struct wiphy *wiphy, u64 id), TP_ARGS(wiphy, id) ); TRACE_EVENT(cfg80211_get_bss, TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel, const u8 *bssid, const u8 *ssid, size_t ssid_len, enum ieee80211_bss_type bss_type, enum ieee80211_privacy privacy), TP_ARGS(wiphy, channel, bssid, ssid, ssid_len, bss_type, privacy), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY MAC_ENTRY(bssid) __dynamic_array(u8, ssid, ssid_len) __field(enum ieee80211_bss_type, bss_type) __field(enum ieee80211_privacy, privacy) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(channel); MAC_ASSIGN(bssid, bssid); memcpy(__get_dynamic_array(ssid), ssid, ssid_len); __entry->bss_type = bss_type; __entry->privacy = privacy; ), TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT ", %pM" ", buf: %#.2x, bss_type: %d, privacy: %d", WIPHY_PR_ARG, CHAN_PR_ARG, __entry->bssid, ((u8 *)__get_dynamic_array(ssid))[0], __entry->bss_type, __entry->privacy) ); TRACE_EVENT(cfg80211_inform_bss_frame, TP_PROTO(struct wiphy *wiphy, struct cfg80211_inform_bss *data, struct ieee80211_mgmt *mgmt, size_t len), TP_ARGS(wiphy, data, mgmt, len), TP_STRUCT__entry( WIPHY_ENTRY CHAN_ENTRY __dynamic_array(u8, mgmt, len) __field(s32, signal) __field(u64, ts_boottime) __field(u64, parent_tsf) MAC_ENTRY(parent_bssid) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_ASSIGN(data->chan); if (mgmt) memcpy(__get_dynamic_array(mgmt), mgmt, len); __entry->signal = data->signal; __entry->ts_boottime = data->boottime_ns; __entry->parent_tsf = data->parent_tsf; MAC_ASSIGN(parent_bssid, data->parent_bssid); ), TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d, tsb:%llu, detect_tsf:%llu, tsf_bssid: %pM", WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal, (unsigned long long)__entry->ts_boottime, (unsigned long long)__entry->parent_tsf, __entry->parent_bssid) ); DECLARE_EVENT_CLASS(cfg80211_bss_evt, TP_PROTO(struct cfg80211_bss *pub), TP_ARGS(pub), TP_STRUCT__entry( MAC_ENTRY(bssid) CHAN_ENTRY ), TP_fast_assign( MAC_ASSIGN(bssid, pub->bssid); CHAN_ASSIGN(pub->channel); ), TP_printk("%pM, " CHAN_PR_FMT, __entry->bssid, CHAN_PR_ARG) ); DEFINE_EVENT(cfg80211_bss_evt, cfg80211_return_bss, TP_PROTO(struct cfg80211_bss *pub), TP_ARGS(pub) ); TRACE_EVENT(cfg80211_report_wowlan_wakeup, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_wowlan_wakeup *wakeup), TP_ARGS(wiphy, wdev, wakeup), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(bool, non_wireless) __field(bool, disconnect) __field(bool, magic_pkt) __field(bool, gtk_rekey_failure) __field(bool, eap_identity_req) __field(bool, four_way_handshake) __field(bool, rfkill_release) __field(s32, pattern_idx) __field(u32, packet_len) __dynamic_array(u8, packet, wakeup ? wakeup->packet_present_len : 0) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->non_wireless = !wakeup; __entry->disconnect = wakeup ? wakeup->disconnect : false; __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false; __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false; __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false; __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false; __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false; __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false; __entry->packet_len = wakeup ? wakeup->packet_len : false; if (wakeup && wakeup->packet && wakeup->packet_present_len) memcpy(__get_dynamic_array(packet), wakeup->packet, wakeup->packet_present_len); ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(cfg80211_ft_event, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ft_event_params *ft_event), TP_ARGS(wiphy, netdev, ft_event), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __dynamic_array(u8, ies, ft_event->ies_len) MAC_ENTRY(target_ap) __dynamic_array(u8, ric_ies, ft_event->ric_ies_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; if (ft_event->ies) memcpy(__get_dynamic_array(ies), ft_event->ies, ft_event->ies_len); MAC_ASSIGN(target_ap, ft_event->target_ap); if (ft_event->ric_ies) memcpy(__get_dynamic_array(ric_ies), ft_event->ric_ies, ft_event->ric_ies_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->target_ap) ); TRACE_EVENT(cfg80211_stop_iface, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), TP_ARGS(wiphy, wdev), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) ); TRACE_EVENT(cfg80211_pmsr_report, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie, const u8 *addr), TP_ARGS(wiphy, wdev, cookie, addr), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) MAC_ENTRY(addr) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; MAC_ASSIGN(addr, addr); ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld, %pM", WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie, __entry->addr) ); TRACE_EVENT(cfg80211_pmsr_complete, TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie), TP_ARGS(wiphy, wdev, cookie), TP_STRUCT__entry( WIPHY_ENTRY WDEV_ENTRY __field(u64, cookie) ), TP_fast_assign( WIPHY_ASSIGN; WDEV_ASSIGN; __entry->cookie = cookie; ), TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld", WIPHY_PR_ARG, WDEV_PR_ARG, (unsigned long long)__entry->cookie) ); TRACE_EVENT(cfg80211_update_owe_info_event, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_update_owe_info *owe_info), TP_ARGS(wiphy, netdev, owe_info), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY MAC_ENTRY(peer) __dynamic_array(u8, ie, owe_info->ie_len) __field(int, assoc_link_id) MAC_ENTRY(peer_mld_addr) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; MAC_ASSIGN(peer, owe_info->peer); memcpy(__get_dynamic_array(ie), owe_info->ie, owe_info->ie_len); __entry->assoc_link_id = owe_info->assoc_link_id; MAC_ASSIGN(peer_mld_addr, owe_info->peer_mld_addr); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", peer: %pM," " assoc_link_id: %d, peer_mld_addr: %pM", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->peer, __entry->assoc_link_id, __entry->peer_mld_addr) ); TRACE_EVENT(cfg80211_bss_color_notify, TP_PROTO(struct net_device *netdev, enum nl80211_commands cmd, u8 count, u64 color_bitmap), TP_ARGS(netdev, cmd, count, color_bitmap), TP_STRUCT__entry( NETDEV_ENTRY __field(u32, cmd) __field(u8, count) __field(u64, color_bitmap) ), TP_fast_assign( NETDEV_ASSIGN; __entry->cmd = cmd; __entry->count = count; __entry->color_bitmap = color_bitmap; ), TP_printk(NETDEV_PR_FMT ", cmd: %x, count: %u, bitmap: %llx", NETDEV_PR_ARG, __entry->cmd, __entry->count, __entry->color_bitmap) ); TRACE_EVENT(cfg80211_assoc_comeback, TP_PROTO(struct wireless_dev *wdev, const u8 *ap_addr, u32 timeout), TP_ARGS(wdev, ap_addr, timeout), TP_STRUCT__entry( WDEV_ENTRY MAC_ENTRY(ap_addr) __field(u32, timeout) ), TP_fast_assign( WDEV_ASSIGN; MAC_ASSIGN(ap_addr, ap_addr); __entry->timeout = timeout; ), TP_printk(WDEV_PR_FMT ", %pM, timeout: %u TUs", WDEV_PR_ARG, __entry->ap_addr, __entry->timeout) ); DECLARE_EVENT_CLASS(link_station_add_mod, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct link_station_parameters *params), TP_ARGS(wiphy, netdev, params), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __array(u8, mld_mac, 6) __array(u8, link_mac, 6) __field(u32, link_id) __dynamic_array(u8, supported_rates, params->supported_rates_len) __array(u8, ht_capa, (int)sizeof(struct ieee80211_ht_cap)) __array(u8, vht_capa, (int)sizeof(struct ieee80211_vht_cap)) __field(u8, opmode_notif) __field(bool, opmode_notif_used) __dynamic_array(u8, he_capa, params->he_capa_len) __array(u8, he_6ghz_capa, (int)sizeof(struct ieee80211_he_6ghz_capa)) __dynamic_array(u8, eht_capa, params->eht_capa_len) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; memset(__entry->mld_mac, 0, 6); memset(__entry->link_mac, 0, 6); if (params->mld_mac) memcpy(__entry->mld_mac, params->mld_mac, 6); if (params->link_mac) memcpy(__entry->link_mac, params->link_mac, 6); __entry->link_id = params->link_id; if (params->supported_rates && params->supported_rates_len) memcpy(__get_dynamic_array(supported_rates), params->supported_rates, params->supported_rates_len); memset(__entry->ht_capa, 0, sizeof(struct ieee80211_ht_cap)); if (params->ht_capa) memcpy(__entry->ht_capa, params->ht_capa, sizeof(struct ieee80211_ht_cap)); memset(__entry->vht_capa, 0, sizeof(struct ieee80211_vht_cap)); if (params->vht_capa) memcpy(__entry->vht_capa, params->vht_capa, sizeof(struct ieee80211_vht_cap)); __entry->opmode_notif = params->opmode_notif; __entry->opmode_notif_used = params->opmode_notif_used; if (params->he_capa && params->he_capa_len) memcpy(__get_dynamic_array(he_capa), params->he_capa, params->he_capa_len); memset(__entry->he_6ghz_capa, 0, sizeof(struct ieee80211_he_6ghz_capa)); if (params->he_6ghz_capa) memcpy(__entry->he_6ghz_capa, params->he_6ghz_capa, sizeof(struct ieee80211_he_6ghz_capa)); if (params->eht_capa && params->eht_capa_len) memcpy(__get_dynamic_array(eht_capa), params->eht_capa, params->eht_capa_len); ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" ", link mac: %pM, link id: %u", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac, __entry->link_mac, __entry->link_id) ); DEFINE_EVENT(link_station_add_mod, rdev_add_link_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct link_station_parameters *params), TP_ARGS(wiphy, netdev, params) ); DEFINE_EVENT(link_station_add_mod, rdev_mod_link_station, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct link_station_parameters *params), TP_ARGS(wiphy, netdev, params) ); TRACE_EVENT(cfg80211_links_removed, TP_PROTO(struct net_device *netdev, u16 link_mask), TP_ARGS(netdev, link_mask), TP_STRUCT__entry( NETDEV_ENTRY __field(u16, link_mask) ), TP_fast_assign( NETDEV_ASSIGN; __entry->link_mask = link_mask; ), TP_printk(NETDEV_PR_FMT ", link_mask:0x%x", NETDEV_PR_ARG, __entry->link_mask) ); TRACE_EVENT(cfg80211_mlo_reconf_add_done, TP_PROTO(struct net_device *netdev, u16 link_mask, const u8 *buf, size_t len, bool driver_initiated), TP_ARGS(netdev, link_mask, buf, len, driver_initiated), TP_STRUCT__entry( NETDEV_ENTRY __field(u16, link_mask) __dynamic_array(u8, buf, len) __field(bool, driver_initiated) ), TP_fast_assign( NETDEV_ASSIGN; __entry->link_mask = link_mask; memcpy(__get_dynamic_array(buf), buf, len); __entry->driver_initiated = driver_initiated; ), TP_printk(NETDEV_PR_FMT ", link_mask:0x%x, driver_initiated:%d", NETDEV_PR_ARG, __entry->link_mask, __entry->driver_initiated) ); TRACE_EVENT(rdev_assoc_ml_reconf, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_ml_reconf_req *req), TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, add_links) __field(u16, rem_links) __field(u16, ext_mld_capa_ops) ), TP_fast_assign( WIPHY_ASSIGN; NETDEV_ASSIGN; u32 i; __entry->add_links = 0; __entry->rem_links = req->rem_links; for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) if (req->add_links[i].bss) __entry->add_links |= BIT(i); __entry->ext_mld_capa_ops = req->ext_mld_capa_ops; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", add_links=0x%x, rem_links=0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->add_links, __entry->rem_links) ); TRACE_EVENT(cfg80211_epcs_changed, TP_PROTO(struct wireless_dev *wdev, bool enabled), TP_ARGS(wdev, enabled), TP_STRUCT__entry( WDEV_ENTRY __field(u32, enabled) ), TP_fast_assign( WDEV_ASSIGN; __entry->enabled = enabled; ), TP_printk(WDEV_PR_FMT ", enabled=%u", WDEV_PR_ARG, __entry->enabled) ); TRACE_EVENT(cfg80211_next_nan_dw_notif, TP_PROTO(struct wireless_dev *wdev, struct ieee80211_channel *chan), TP_ARGS(wdev, chan), TP_STRUCT__entry( WDEV_ENTRY CHAN_ENTRY ), TP_fast_assign( WDEV_ASSIGN; CHAN_ASSIGN(chan); ), TP_printk(WDEV_PR_FMT " " CHAN_PR_FMT, WDEV_PR_ARG, CHAN_PR_ARG) ); TRACE_EVENT(cfg80211_nan_cluster_joined, TP_PROTO(struct wireless_dev *wdev, const u8 *cluster_id, bool new_cluster), TP_ARGS(wdev, cluster_id, new_cluster), TP_STRUCT__entry( WDEV_ENTRY MAC_ENTRY(cluster_id) __field(bool, new_cluster) ), TP_fast_assign( WDEV_ASSIGN; MAC_ASSIGN(cluster_id, cluster_id); __entry->new_cluster = new_cluster; ), TP_printk(WDEV_PR_FMT " cluster_id %pMF%s", WDEV_PR_ARG, __entry->cluster_id, __entry->new_cluster ? " [new]" : "") ); #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h>
1923 2164 2148 1971 2171 2006 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM rpm #if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_RUNTIME_POWER_H #include <linux/ktime.h> #include <linux/tracepoint.h> struct device; /* * The rpm_internal events are used for tracing some important * runtime pm internal functions. */ DECLARE_EVENT_CLASS(rpm_internal, TP_PROTO(struct device *dev, int flags), TP_ARGS(dev, flags), TP_STRUCT__entry( __string( name, dev_name(dev) ) __field( int, flags ) __field( int , usage_count ) __field( int , disable_depth ) __field( int , runtime_auto ) __field( int , request_pending ) __field( int , irq_safe ) __field( int , child_count ) ), TP_fast_assign( __assign_str(name); __entry->flags = flags; __entry->usage_count = atomic_read( &dev->power.usage_count); __entry->disable_depth = dev->power.disable_depth; __entry->runtime_auto = dev->power.runtime_auto; __entry->request_pending = dev->power.request_pending; __entry->irq_safe = dev->power.irq_safe; __entry->child_count = atomic_read( &dev->power.child_count); ), TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d" " irq-%-1d child-%d", __get_str(name), __entry->flags, __entry->usage_count, __entry->disable_depth, __entry->runtime_auto, __entry->request_pending, __entry->irq_safe, __entry->child_count ) ); DEFINE_EVENT(rpm_internal, rpm_suspend, TP_PROTO(struct device *dev, int flags), TP_ARGS(dev, flags) ); DEFINE_EVENT(rpm_internal, rpm_resume, TP_PROTO(struct device *dev, int flags), TP_ARGS(dev, flags) ); DEFINE_EVENT(rpm_internal, rpm_idle, TP_PROTO(struct device *dev, int flags), TP_ARGS(dev, flags) ); DEFINE_EVENT(rpm_internal, rpm_usage, TP_PROTO(struct device *dev, int flags), TP_ARGS(dev, flags) ); TRACE_EVENT(rpm_return_int, TP_PROTO(struct device *dev, unsigned long ip, int ret), TP_ARGS(dev, ip, ret), TP_STRUCT__entry( __string( name, dev_name(dev)) __field( unsigned long, ip ) __field( int, ret ) ), TP_fast_assign( __assign_str(name); __entry->ip = ip; __entry->ret = ret; ), TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name), __entry->ret) ); #define RPM_STATUS_STRINGS \ EM(RPM_INVALID, "RPM_INVALID") \ EM(RPM_ACTIVE, "RPM_ACTIVE") \ EM(RPM_RESUMING, "RPM_RESUMING") \ EM(RPM_SUSPENDED, "RPM_SUSPENDED") \ EMe(RPM_SUSPENDING, "RPM_SUSPENDING") /* Enums require being exported to userspace, for user tool parsing. */ #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); RPM_STATUS_STRINGS /* * Now redefine the EM() and EMe() macros to map the enums to the strings that * will be printed in the output. */ #undef EM #undef EMe #define EM(a, b) { a, b }, #define EMe(a, b) { a, b } TRACE_EVENT(rpm_status, TP_PROTO(struct device *dev, enum rpm_status status), TP_ARGS(dev, status), TP_STRUCT__entry( __string(name, dev_name(dev)) __field(int, status) ), TP_fast_assign( __assign_str(name); __entry->status = status; ), TP_printk("%s status=%s", __get_str(name), __print_symbolic(__entry->status, RPM_STATUS_STRINGS)) ); #endif /* _TRACE_RUNTIME_POWER_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
15 82 12 75 8 72 11 71 1 2 75 79 79 79 73 2 75 68 25 44 70 9 70 93 90 86 13 2 2 77 74 3 77 1 95 1 9 3 5 3 2 2 2 1 1 2 6 3 8 8 62 55 19 58 60 19 64 2 2 2 84 1 1 2 1 2 78 7 56 76 8 70 16 63 76 52 23 76 91 3 89 89 17 74 12 78 3 4 3 5 74 54 45 2 73 5 68 73 5 68 10 64 17 7 10 3 3 3 1 7 4 3 2 6 2 4 4 5 1 3 4 4 4 3 6 1 4 3 1 1 5 1 5 1 2 1 2 1 2 2 3 3 1 1 1 6 2 4 2 1 2 2 3 3 5 3 126 12 6 1 85 10 1 6 6 3 4 3 3 7 9 11 36 36 21 4 17 2 17 3 2 2 64 1 64 64 64 7 3 3 1 6 6 4 2 2 4 4 4 1 3 2 1 11 1 6 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 1993 by Theodore Ts'o. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/major.h> #include <linux/wait.h> #include <linux/blkpg.h> #include <linux/init.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/compat.h> #include <linux/suspend.h> #include <linux/freezer.h> #include <linux/mutex.h> #include <linux/writeback.h> #include <linux/completion.h> #include <linux/highmem.h> #include <linux/splice.h> #include <linux/sysfs.h> #include <linux/miscdevice.h> #include <linux/falloc.h> #include <linux/uio.h> #include <linux/ioprio.h> #include <linux/blk-cgroup.h> #include <linux/sched/mm.h> #include <linux/statfs.h> #include <linux/uaccess.h> #include <linux/blk-mq.h> #include <linux/spinlock.h> #include <uapi/linux/loop.h> /* Possible states of device */ enum { Lo_unbound, Lo_bound, Lo_rundown, Lo_deleting, }; struct loop_device { int lo_number; loff_t lo_offset; loff_t lo_sizelimit; int lo_flags; char lo_file_name[LO_NAME_SIZE]; struct file *lo_backing_file; unsigned int lo_min_dio_size; struct block_device *lo_device; gfp_t old_gfp_mask; spinlock_t lo_lock; int lo_state; spinlock_t lo_work_lock; struct workqueue_struct *workqueue; struct work_struct rootcg_work; struct list_head rootcg_cmd_list; struct list_head idle_worker_list; struct rb_root worker_tree; struct timer_list timer; bool sysfs_inited; struct request_queue *lo_queue; struct blk_mq_tag_set tag_set; struct gendisk *lo_disk; struct mutex lo_mutex; bool idr_visible; }; struct loop_cmd { struct list_head list_entry; bool use_aio; /* use AIO interface to handle I/O */ atomic_t ref; /* only for aio */ long ret; struct kiocb iocb; struct bio_vec *bvec; struct cgroup_subsys_state *blkcg_css; struct cgroup_subsys_state *memcg_css; }; #define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ) #define LOOP_DEFAULT_HW_Q_DEPTH 128 static DEFINE_IDR(loop_index_idr); static DEFINE_MUTEX(loop_ctl_mutex); static DEFINE_MUTEX(loop_validate_mutex); /** * loop_global_lock_killable() - take locks for safe loop_validate_file() test * * @lo: struct loop_device * @global: true if @lo is about to bind another "struct loop_device", false otherwise * * Returns 0 on success, -EINTR otherwise. * * Since loop_validate_file() traverses on other "struct loop_device" if * is_loop_device() is true, we need a global lock for serializing concurrent * loop_configure()/loop_change_fd()/__loop_clr_fd() calls. */ static int loop_global_lock_killable(struct loop_device *lo, bool global) { int err; if (global) { err = mutex_lock_killable(&loop_validate_mutex); if (err) return err; } err = mutex_lock_killable(&lo->lo_mutex); if (err && global) mutex_unlock(&loop_validate_mutex); return err; } /** * loop_global_unlock() - release locks taken by loop_global_lock_killable() * * @lo: struct loop_device * @global: true if @lo was about to bind another "struct loop_device", false otherwise */ static void loop_global_unlock(struct loop_device *lo, bool global) { mutex_unlock(&lo->lo_mutex); if (global) mutex_unlock(&loop_validate_mutex); } static int max_part; static int part_shift; static loff_t lo_calculate_size(struct loop_device *lo, struct file *file) { loff_t loopsize; int ret; if (S_ISBLK(file_inode(file)->i_mode)) { loopsize = i_size_read(file->f_mapping->host); } else { struct kstat stat; /* * Get the accurate file size. This provides better results than * cached inode data, particularly for network filesystems where * metadata may be stale. */ ret = vfs_getattr_nosec(&file->f_path, &stat, STATX_SIZE, 0); if (ret) return 0; loopsize = stat.size; } if (lo->lo_offset > 0) loopsize -= lo->lo_offset; /* offset is beyond i_size, weird but possible */ if (loopsize < 0) return 0; if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize) loopsize = lo->lo_sizelimit; /* * Unfortunately, if we want to do I/O on the device, * the number of 512-byte sectors has to fit into a sector_t. */ return loopsize >> 9; } /* * We support direct I/O only if lo_offset is aligned with the logical I/O size * of backing device, and the logical block size of loop is bigger than that of * the backing device. */ static bool lo_can_use_dio(struct loop_device *lo) { if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT)) return false; if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size) return false; if (lo->lo_offset & (lo->lo_min_dio_size - 1)) return false; return true; } /* * Direct I/O can be enabled either by using an O_DIRECT file descriptor, or by * passing in the LO_FLAGS_DIRECT_IO flag from userspace. It will be silently * disabled when the device block size is too small or the offset is unaligned. * * loop_get_status will always report the effective LO_FLAGS_DIRECT_IO flag and * not the originally passed in one. */ static inline void loop_update_dio(struct loop_device *lo) { lockdep_assert_held(&lo->lo_mutex); WARN_ON_ONCE(lo->lo_state == Lo_bound && lo->lo_queue->mq_freeze_depth == 0); if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && !lo_can_use_dio(lo)) lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } /** * loop_set_size() - sets device size and notifies userspace * @lo: struct loop_device to set the size for * @size: new size of the loop device * * Callers must validate that the size passed into this function fits into * a sector_t, eg using loop_validate_size() */ static void loop_set_size(struct loop_device *lo, loff_t size) { if (!set_capacity_and_notify(lo->lo_disk, size)) kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); } static void loop_clear_limits(struct loop_device *lo, int mode) { struct queue_limits lim = queue_limits_start_update(lo->lo_queue); if (mode & FALLOC_FL_ZERO_RANGE) lim.max_write_zeroes_sectors = 0; if (mode & FALLOC_FL_PUNCH_HOLE) { lim.max_hw_discard_sectors = 0; lim.discard_granularity = 0; } /* * XXX: this updates the queue limits without freezing the queue, which * is against the locking protocol and dangerous. But we can't just * freeze the queue as we're inside the ->queue_rq method here. So this * should move out into a workqueue unless we get the file operations to * advertise if they support specific fallocate operations. */ queue_limits_commit_update(lo->lo_queue, &lim); } static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, int mode) { /* * We use fallocate to manipulate the space mappings used by the image * a.k.a. discard/zerorange. */ struct file *file = lo->lo_backing_file; int ret; mode |= FALLOC_FL_KEEP_SIZE; if (!bdev_max_discard_sectors(lo->lo_device)) return -EOPNOTSUPP; ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq)); if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP)) return -EIO; /* * We initially configure the limits in a hope that fallocate is * supported and clear them here if that turns out not to be true. */ if (unlikely(ret == -EOPNOTSUPP)) loop_clear_limits(lo, mode); return ret; } static int lo_req_flush(struct loop_device *lo, struct request *rq) { int ret = vfs_fsync(lo->lo_backing_file, 0); if (unlikely(ret && ret != -EINVAL)) ret = -EIO; return ret; } static void lo_complete_rq(struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); blk_status_t ret = BLK_STS_OK; if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) || req_op(rq) != REQ_OP_READ) { if (cmd->ret < 0) ret = errno_to_blk_status(cmd->ret); goto end_io; } /* * Short READ - if we got some data, advance our request and * retry it. If we got no data, end the rest with EIO. */ if (cmd->ret) { blk_update_request(rq, BLK_STS_OK, cmd->ret); cmd->ret = 0; blk_mq_requeue_request(rq, true); } else { struct bio *bio = rq->bio; while (bio) { zero_fill_bio(bio); bio = bio->bi_next; } ret = BLK_STS_IOERR; end_io: blk_mq_end_request(rq, ret); } } static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; if (req_op(rq) == REQ_OP_WRITE) kiocb_end_write(&cmd->iocb); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } static void lo_rw_aio_complete(struct kiocb *iocb, long ret) { struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb); cmd->ret = ret; lo_rw_aio_do_completion(cmd); } static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, loff_t pos, int rw) { struct iov_iter iter; struct req_iterator rq_iter; struct bio_vec *bvec; struct request *rq = blk_mq_rq_from_pdu(cmd); struct bio *bio = rq->bio; struct file *file = lo->lo_backing_file; struct bio_vec tmp; unsigned int offset; unsigned int nr_bvec; int ret; nr_bvec = blk_rq_nr_bvec(rq); if (rq->bio != rq->biotail) { bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), GFP_NOIO); if (!bvec) return -EIO; cmd->bvec = bvec; /* * The bios of the request may be started from the middle of * the 'bvec' because of bio splitting, so we can't directly * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec * API will take care of all details for us. */ rq_for_each_bvec(tmp, rq, rq_iter) { *bvec = tmp; bvec++; } bvec = cmd->bvec; offset = 0; } else { /* * Same here, this bio may be started from the middle of the * 'bvec' because of bio splitting, so offset from the bvec * must be passed to iov iterator */ offset = bio->bi_iter.bi_bvec_done; bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); } atomic_set(&cmd->ref, 2); iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq)); iter.iov_offset = offset; cmd->iocb.ki_pos = pos; cmd->iocb.ki_filp = file; cmd->iocb.ki_ioprio = req_get_ioprio(rq); if (cmd->use_aio) { cmd->iocb.ki_complete = lo_rw_aio_complete; cmd->iocb.ki_flags = IOCB_DIRECT; } else { cmd->iocb.ki_complete = NULL; cmd->iocb.ki_flags = 0; } if (rw == ITER_SOURCE) { kiocb_start_write(&cmd->iocb); ret = file->f_op->write_iter(&cmd->iocb, &iter); } else ret = file->f_op->read_iter(&cmd->iocb, &iter); lo_rw_aio_do_completion(cmd); if (ret != -EIOCBQUEUED) lo_rw_aio_complete(&cmd->iocb, ret); return -EIOCBQUEUED; } static int do_req_filebacked(struct loop_device *lo, struct request *rq) { struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; switch (req_op(rq)) { case REQ_OP_FLUSH: return lo_req_flush(lo, rq); case REQ_OP_WRITE_ZEROES: /* * If the caller doesn't want deallocation, call zeroout to * write zeroes the range. Otherwise, punch them out. */ return lo_fallocate(lo, rq, pos, (rq->cmd_flags & REQ_NOUNMAP) ? FALLOC_FL_ZERO_RANGE : FALLOC_FL_PUNCH_HOLE); case REQ_OP_DISCARD: return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); case REQ_OP_WRITE: return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); case REQ_OP_READ: return lo_rw_aio(lo, cmd, pos, ITER_DEST); default: WARN_ON_ONCE(1); return -EIO; } } static void loop_reread_partitions(struct loop_device *lo) { int rc; mutex_lock(&lo->lo_disk->open_mutex); rc = bdev_disk_changed(lo->lo_disk, false); mutex_unlock(&lo->lo_disk->open_mutex); if (rc) pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", __func__, lo->lo_number, lo->lo_file_name, rc); } static unsigned int loop_query_min_dio_size(struct loop_device *lo) { struct file *file = lo->lo_backing_file; struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev; struct kstat st; /* * Use the minimal dio alignment of the file system if provided. */ if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) && (st.result_mask & STATX_DIOALIGN)) return st.dio_offset_align; /* * In a perfect world this wouldn't be needed, but as of Linux 6.13 only * a handful of file systems support the STATX_DIOALIGN flag. */ if (sb_bdev) return bdev_logical_block_size(sb_bdev); return SECTOR_SIZE; } static inline int is_loop_device(struct file *file) { struct inode *i = file->f_mapping->host; return i && S_ISBLK(i->i_mode) && imajor(i) == LOOP_MAJOR; } static int loop_validate_file(struct file *file, struct block_device *bdev) { struct inode *inode = file->f_mapping->host; struct file *f = file; /* Avoid recursion */ while (is_loop_device(f)) { struct loop_device *l; lockdep_assert_held(&loop_validate_mutex); if (f->f_mapping->host->i_rdev == bdev->bd_dev) return -EBADF; l = I_BDEV(f->f_mapping->host)->bd_disk->private_data; if (l->lo_state != Lo_bound) return -EINVAL; /* Order wrt setting lo->lo_backing_file in loop_configure(). */ rmb(); f = l->lo_backing_file; } if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -EINVAL; return 0; } static void loop_assign_backing_file(struct loop_device *lo, struct file *file) { lo->lo_backing_file = file; lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); mapping_set_gfp_mask(file->f_mapping, lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS)); if (lo->lo_backing_file->f_flags & O_DIRECT) lo->lo_flags |= LO_FLAGS_DIRECT_IO; lo->lo_min_dio_size = loop_query_min_dio_size(lo); } static int loop_check_backing_file(struct file *file) { if (!file->f_op->read_iter) return -EINVAL; if ((file->f_mode & FMODE_WRITE) && !file->f_op->write_iter) return -EINVAL; return 0; } /* * loop_change_fd switched the backing store of a loopback device to * a new file. This is useful for operating system installers to free up * the original file and in High Availability environments to switch to * an alternative location for the content in case of server meltdown. * This can only work if the loop device is used read-only, and if the * new backing store is the same size and type as the old backing store. */ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { struct file *file = fget(arg); struct file *old_file; unsigned int memflags; int error; bool partscan; bool is_loop; if (!file) return -EBADF; error = loop_check_backing_file(file); if (error) { fput(file); return error; } /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); is_loop = is_loop_device(file); error = loop_global_lock_killable(lo, is_loop); if (error) goto out_putf; error = -ENXIO; if (lo->lo_state != Lo_bound) goto out_err; /* the loop device has to be read-only */ error = -EINVAL; if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) goto out_err; error = loop_validate_file(file, bdev); if (error) goto out_err; old_file = lo->lo_backing_file; error = -EINVAL; /* size of the new backing store needs to be the same */ if (lo_calculate_size(lo, file) != lo_calculate_size(lo, old_file)) goto out_err; /* * We might switch to direct I/O mode for the loop device, write back * all dirty data the page cache now that so that the individual I/O * operations don't have to do that. */ vfs_fsync(file, 0); /* and ... switch */ disk_force_media_change(lo->lo_disk); memflags = blk_mq_freeze_queue(lo->lo_queue); mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); loop_assign_backing_file(lo, file); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue, memflags); partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; loop_global_unlock(lo, is_loop); /* * Flush loop_validate_file() before fput(), for l->lo_backing_file * might be pointing at old_file which might be the last reference. */ if (!is_loop) { mutex_lock(&loop_validate_mutex); mutex_unlock(&loop_validate_mutex); } /* * We must drop file reference outside of lo_mutex as dropping * the file ref can take open_mutex which creates circular locking * dependency. */ fput(old_file); dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); if (partscan) loop_reread_partitions(lo); error = 0; done: kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); return error; out_err: loop_global_unlock(lo, is_loop); out_putf: fput(file); dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); goto done; } /* loop sysfs attributes */ static ssize_t loop_attr_show(struct device *dev, char *page, ssize_t (*callback)(struct loop_device *, char *)) { struct gendisk *disk = dev_to_disk(dev); struct loop_device *lo = disk->private_data; return callback(lo, page); } #define LOOP_ATTR_RO(_name) \ static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ static ssize_t loop_attr_do_show_##_name(struct device *d, \ struct device_attribute *attr, char *b) \ { \ return loop_attr_show(d, b, loop_attr_##_name##_show); \ } \ static struct device_attribute loop_attr_##_name = \ __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL); static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) { ssize_t ret; char *p = NULL; spin_lock_irq(&lo->lo_lock); if (lo->lo_backing_file) p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); spin_unlock_irq(&lo->lo_lock); if (IS_ERR_OR_NULL(p)) ret = PTR_ERR(p); else { ret = strlen(p); memmove(buf, p, ret); buf[ret++] = '\n'; buf[ret] = 0; } return ret; } static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) { return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); } static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) { return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); } static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) { int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); return sysfs_emit(buf, "%s\n", autoclear ? "1" : "0"); } static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) { int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); return sysfs_emit(buf, "%s\n", partscan ? "1" : "0"); } static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) { int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); return sysfs_emit(buf, "%s\n", dio ? "1" : "0"); } LOOP_ATTR_RO(backing_file); LOOP_ATTR_RO(offset); LOOP_ATTR_RO(sizelimit); LOOP_ATTR_RO(autoclear); LOOP_ATTR_RO(partscan); LOOP_ATTR_RO(dio); static struct attribute *loop_attrs[] = { &loop_attr_backing_file.attr, &loop_attr_offset.attr, &loop_attr_sizelimit.attr, &loop_attr_autoclear.attr, &loop_attr_partscan.attr, &loop_attr_dio.attr, NULL, }; static struct attribute_group loop_attribute_group = { .name = "loop", .attrs= loop_attrs, }; static void loop_sysfs_init(struct loop_device *lo) { lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_sysfs_exit(struct loop_device *lo) { if (lo->sysfs_inited) sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, &loop_attribute_group); } static void loop_get_discard_config(struct loop_device *lo, u32 *granularity, u32 *max_discard_sectors) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct kstatfs sbuf; /* * If the backing device is a block device, mirror its zeroing * capability. Set the discard sectors to the block device's zeroing * capabilities because loop discards result in blkdev_issue_zeroout(), * not blkdev_issue_discard(). This maintains consistent behavior with * file-backed loop devices: discarded regions read back as zero. */ if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); *max_discard_sectors = bdev_write_zeroes_sectors(bdev); *granularity = bdev_discard_granularity(bdev); /* * We use punch hole to reclaim the free space used by the * image a.k.a. discard. */ } else if (file->f_op->fallocate && !vfs_statfs(&file->f_path, &sbuf)) { *max_discard_sectors = UINT_MAX >> 9; *granularity = sbuf.f_bsize; } } struct loop_worker { struct rb_node rb_node; struct work_struct work; struct list_head cmd_list; struct list_head idle_list; struct loop_device *lo; struct cgroup_subsys_state *blkcg_css; unsigned long last_ran_at; }; static void loop_workfn(struct work_struct *work); #ifdef CONFIG_BLK_CGROUP static inline int queue_on_root_worker(struct cgroup_subsys_state *css) { return !css || css == blkcg_root_css; } #else static inline int queue_on_root_worker(struct cgroup_subsys_state *css) { return !css; } #endif static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) { struct rb_node **node, *parent = NULL; struct loop_worker *cur_worker, *worker = NULL; struct work_struct *work; struct list_head *cmd_list; spin_lock_irq(&lo->lo_work_lock); if (queue_on_root_worker(cmd->blkcg_css)) goto queue_work; node = &lo->worker_tree.rb_node; while (*node) { parent = *node; cur_worker = container_of(*node, struct loop_worker, rb_node); if (cur_worker->blkcg_css == cmd->blkcg_css) { worker = cur_worker; break; } else if ((long)cur_worker->blkcg_css < (long)cmd->blkcg_css) { node = &(*node)->rb_left; } else { node = &(*node)->rb_right; } } if (worker) goto queue_work; worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT); /* * In the event we cannot allocate a worker, just queue on the * rootcg worker and issue the I/O as the rootcg */ if (!worker) { cmd->blkcg_css = NULL; if (cmd->memcg_css) css_put(cmd->memcg_css); cmd->memcg_css = NULL; goto queue_work; } worker->blkcg_css = cmd->blkcg_css; css_get(worker->blkcg_css); INIT_WORK(&worker->work, loop_workfn); INIT_LIST_HEAD(&worker->cmd_list); INIT_LIST_HEAD(&worker->idle_list); worker->lo = lo; rb_link_node(&worker->rb_node, parent, node); rb_insert_color(&worker->rb_node, &lo->worker_tree); queue_work: if (worker) { /* * We need to remove from the idle list here while * holding the lock so that the idle timer doesn't * free the worker */ if (!list_empty(&worker->idle_list)) list_del_init(&worker->idle_list); work = &worker->work; cmd_list = &worker->cmd_list; } else { work = &lo->rootcg_work; cmd_list = &lo->rootcg_cmd_list; } list_add_tail(&cmd->list_entry, cmd_list); queue_work(lo->workqueue, work); spin_unlock_irq(&lo->lo_work_lock); } static void loop_set_timer(struct loop_device *lo) { timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT); } static void loop_free_idle_workers(struct loop_device *lo, bool delete_all) { struct loop_worker *pos, *worker; spin_lock_irq(&lo->lo_work_lock); list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, idle_list) { if (!delete_all && time_is_after_jiffies(worker->last_ran_at + LOOP_IDLE_WORKER_TIMEOUT)) break; list_del(&worker->idle_list); rb_erase(&worker->rb_node, &lo->worker_tree); css_put(worker->blkcg_css); kfree(worker); } if (!list_empty(&lo->idle_worker_list)) loop_set_timer(lo); spin_unlock_irq(&lo->lo_work_lock); } static void loop_free_idle_workers_timer(struct timer_list *timer) { struct loop_device *lo = container_of(timer, struct loop_device, timer); return loop_free_idle_workers(lo, false); } /** * loop_set_status_from_info - configure device from loop_info * @lo: struct loop_device to configure * @info: struct loop_info64 to configure the device with * * Configures the loop device parameters according to the passed * in loop_info64 configuration. */ static int loop_set_status_from_info(struct loop_device *lo, const struct loop_info64 *info) { if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) return -EINVAL; switch (info->lo_encrypt_type) { case LO_CRYPT_NONE: break; case LO_CRYPT_XOR: pr_warn("support for the xor transformation has been removed.\n"); return -EINVAL; case LO_CRYPT_CRYPTOAPI: pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n"); return -EINVAL; default: return -EINVAL; } /* Avoid assigning overflow values */ if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) return -EOVERFLOW; lo->lo_offset = info->lo_offset; lo->lo_sizelimit = info->lo_sizelimit; memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; return 0; } static unsigned int loop_default_blocksize(struct loop_device *lo) { /* In case of direct I/O, match underlying minimum I/O size */ if (lo->lo_flags & LO_FLAGS_DIRECT_IO) return lo->lo_min_dio_size; return SECTOR_SIZE; } static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim, unsigned int bsize) { struct file *file = lo->lo_backing_file; struct inode *inode = file->f_mapping->host; struct block_device *backing_bdev = NULL; u32 granularity = 0, max_discard_sectors = 0; if (S_ISBLK(inode->i_mode)) backing_bdev = I_BDEV(inode); else if (inode->i_sb->s_bdev) backing_bdev = inode->i_sb->s_bdev; if (!bsize) bsize = loop_default_blocksize(lo); loop_get_discard_config(lo, &granularity, &max_discard_sectors); lim->logical_block_size = bsize; lim->physical_block_size = bsize; lim->io_min = bsize; lim->features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_ROTATIONAL); if (file->f_op->fsync && !(lo->lo_flags & LO_FLAGS_READ_ONLY)) lim->features |= BLK_FEAT_WRITE_CACHE; if (backing_bdev && !bdev_nonrot(backing_bdev)) lim->features |= BLK_FEAT_ROTATIONAL; lim->max_hw_discard_sectors = max_discard_sectors; lim->max_write_zeroes_sectors = max_discard_sectors; if (max_discard_sectors) lim->discard_granularity = granularity; else lim->discard_granularity = 0; } static int loop_configure(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_config *config) { struct file *file = fget(config->fd); struct queue_limits lim; int error; loff_t size; bool partscan; bool is_loop; if (!file) return -EBADF; error = loop_check_backing_file(file); if (error) { fput(file); return error; } is_loop = is_loop_device(file); /* This is safe, since we have a reference from open(). */ __module_get(THIS_MODULE); /* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. */ if (!(mode & BLK_OPEN_EXCL)) { error = bd_prepare_to_claim(bdev, loop_configure, NULL); if (error) goto out_putf; } error = loop_global_lock_killable(lo, is_loop); if (error) goto out_bdev; error = -EBUSY; if (lo->lo_state != Lo_unbound) goto out_unlock; error = loop_validate_file(file, bdev); if (error) goto out_unlock; if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) { error = -EINVAL; goto out_unlock; } error = loop_set_status_from_info(lo, &config->info); if (error) goto out_unlock; lo->lo_flags = config->info.lo_flags; if (!(file->f_mode & FMODE_WRITE) || !(mode & BLK_OPEN_WRITE) || !file->f_op->write_iter) lo->lo_flags |= LO_FLAGS_READ_ONLY; if (!lo->workqueue) { lo->workqueue = alloc_workqueue("loop%d", WQ_UNBOUND | WQ_FREEZABLE, 0, lo->lo_number); if (!lo->workqueue) { error = -ENOMEM; goto out_unlock; } } /* suppress uevents while reconfiguring the device */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); disk_force_media_change(lo->lo_disk); set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); lo->lo_device = bdev; loop_assign_backing_file(lo, file); lim = queue_limits_start_update(lo->lo_queue); loop_update_limits(lo, &lim, config->block_size); /* No need to freeze the queue as the device isn't bound yet. */ error = queue_limits_commit_update(lo->lo_queue, &lim); if (error) goto out_unlock; /* * We might switch to direct I/O mode for the loop device, write back * all dirty data the page cache now that so that the individual I/O * operations don't have to do that. */ vfs_fsync(file, 0); loop_update_dio(lo); loop_sysfs_init(lo); size = lo_calculate_size(lo, file); loop_set_size(lo, size); /* Order wrt reading lo_state in loop_validate_file(). */ wmb(); WRITE_ONCE(lo->lo_state, Lo_bound); if (part_shift) lo->lo_flags |= LO_FLAGS_PARTSCAN; partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); loop_global_unlock(lo, is_loop); if (partscan) loop_reread_partitions(lo); if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); return 0; out_unlock: loop_global_unlock(lo, is_loop); out_bdev: if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_configure); out_putf: fput(file); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); return error; } static void __loop_clr_fd(struct loop_device *lo) { struct queue_limits lim; struct file *filp; gfp_t gfp = lo->old_gfp_mask; spin_lock_irq(&lo->lo_lock); filp = lo->lo_backing_file; lo->lo_backing_file = NULL; spin_unlock_irq(&lo->lo_lock); lo->lo_device = NULL; lo->lo_offset = 0; lo->lo_sizelimit = 0; memset(lo->lo_file_name, 0, LO_NAME_SIZE); /* * Reset the block size to the default. * * No queue freezing needed because this is called from the final * ->release call only, so there can't be any outstanding I/O. */ lim = queue_limits_start_update(lo->lo_queue); lim.logical_block_size = SECTOR_SIZE; lim.physical_block_size = SECTOR_SIZE; lim.io_min = SECTOR_SIZE; queue_limits_commit_update(lo->lo_queue, &lim); invalidate_disk(lo->lo_disk); loop_sysfs_exit(lo); /* let user-space know about this change */ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); mapping_set_gfp_mask(filp->f_mapping, gfp); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); disk_force_media_change(lo->lo_disk); if (lo->lo_flags & LO_FLAGS_PARTSCAN) { int err; /* * open_mutex has been held already in release path, so don't * acquire it if this function is called in such case. * * If the reread partition isn't from release path, lo_refcnt * must be at least one and it can only become zero when the * current holder is released. */ err = bdev_disk_changed(lo->lo_disk, false); if (err) pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", __func__, lo->lo_number, err); /* Device is gone, no point in returning error */ } /* * lo->lo_state is set to Lo_unbound here after above partscan has * finished. There cannot be anybody else entering __loop_clr_fd() as * Lo_rundown state protects us from all the other places trying to * change the 'lo' device. */ lo->lo_flags = 0; if (!part_shift) set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); mutex_lock(&lo->lo_mutex); WRITE_ONCE(lo->lo_state, Lo_unbound); mutex_unlock(&lo->lo_mutex); /* * Need not hold lo_mutex to fput backing file. Calling fput holding * lo_mutex triggers a circular lock dependency possibility warning as * fput can take open_mutex which is usually taken before lo_mutex. */ fput(filp); } static int loop_clr_fd(struct loop_device *lo) { int err; /* * Since lo_ioctl() is called without locks held, it is possible that * loop_configure()/loop_change_fd() and loop_clr_fd() run in parallel. * * Therefore, use global lock when setting Lo_rundown state in order to * make sure that loop_validate_file() will fail if the "struct file" * which loop_configure()/loop_change_fd() found via fget() was this * loop device. */ err = loop_global_lock_killable(lo, true); if (err) return err; if (lo->lo_state != Lo_bound) { loop_global_unlock(lo, true); return -ENXIO; } /* * Mark the device for removing the backing device on last close. * If we are the only opener, also switch the state to roundown here to * prevent new openers from coming in. */ lo->lo_flags |= LO_FLAGS_AUTOCLEAR; if (disk_openers(lo->lo_disk) == 1) WRITE_ONCE(lo->lo_state, Lo_rundown); loop_global_unlock(lo, true); return 0; } static int loop_set_status(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_info64 *info) { int err; bool partscan = false; bool size_changed = false; unsigned int memflags; /* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. */ if (!(mode & BLK_OPEN_EXCL)) { err = bd_prepare_to_claim(bdev, loop_set_status, NULL); if (err) goto out_reread_partitions; } err = mutex_lock_killable(&lo->lo_mutex); if (err) goto out_abort_claiming; if (lo->lo_state != Lo_bound) { err = -ENXIO; goto out_unlock; } if (lo->lo_offset != info->lo_offset || lo->lo_sizelimit != info->lo_sizelimit) { size_changed = true; sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); } /* I/O needs to be drained before changing lo_offset or lo_sizelimit */ memflags = blk_mq_freeze_queue(lo->lo_queue); err = loop_set_status_from_info(lo, info); if (err) goto out_unfreeze; partscan = !(lo->lo_flags & LO_FLAGS_PARTSCAN) && (info->lo_flags & LO_FLAGS_PARTSCAN); lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS; lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS); /* update the direct I/O flag if lo_offset changed */ loop_update_dio(lo); out_unfreeze: blk_mq_unfreeze_queue(lo->lo_queue, memflags); if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); if (!err && size_changed) { loff_t new_size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, new_size); } out_unlock: mutex_unlock(&lo->lo_mutex); out_abort_claiming: if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_set_status); out_reread_partitions: if (partscan) loop_reread_partitions(lo); return err; } static int loop_get_status(struct loop_device *lo, struct loop_info64 *info) { struct path path; struct kstat stat; int ret; ret = mutex_lock_killable(&lo->lo_mutex); if (ret) return ret; if (lo->lo_state != Lo_bound) { mutex_unlock(&lo->lo_mutex); return -ENXIO; } memset(info, 0, sizeof(*info)); info->lo_number = lo->lo_number; info->lo_offset = lo->lo_offset; info->lo_sizelimit = lo->lo_sizelimit; info->lo_flags = lo->lo_flags; memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); /* Drop lo_mutex while we call into the filesystem. */ path = lo->lo_backing_file->f_path; path_get(&path); mutex_unlock(&lo->lo_mutex); ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); if (!ret) { info->lo_device = huge_encode_dev(stat.dev); info->lo_inode = stat.ino; info->lo_rdevice = huge_encode_dev(stat.rdev); } path_put(&path); return ret; } static void loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64) { memset(info64, 0, sizeof(*info64)); info64->lo_number = info->lo_number; info64->lo_device = info->lo_device; info64->lo_inode = info->lo_inode; info64->lo_rdevice = info->lo_rdevice; info64->lo_offset = info->lo_offset; info64->lo_sizelimit = 0; info64->lo_flags = info->lo_flags; memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE); } static int loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info) { memset(info, 0, sizeof(*info)); info->lo_number = info64->lo_number; info->lo_device = info64->lo_device; info->lo_inode = info64->lo_inode; info->lo_rdevice = info64->lo_rdevice; info->lo_offset = info64->lo_offset; info->lo_flags = info64->lo_flags; memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE); /* error in case values were truncated */ if (info->lo_device != info64->lo_device || info->lo_rdevice != info64->lo_rdevice || info->lo_inode != info64->lo_inode || info->lo_offset != info64->lo_offset) return -EOVERFLOW; return 0; } static int loop_set_status_old(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; if (copy_from_user(&info, arg, sizeof (struct loop_info))) return -EFAULT; loop_info64_from_old(&info, &info64); return loop_set_status(lo, mode, bdev, &info64); } static int loop_set_status64(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct loop_info64 __user *arg) { struct loop_info64 info64; if (copy_from_user(&info64, arg, sizeof (struct loop_info64))) return -EFAULT; return loop_set_status(lo, mode, bdev, &info64); } static int loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { struct loop_info info; struct loop_info64 info64; int err; if (!arg) return -EINVAL; err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_old(&info64, &info); if (!err && copy_to_user(arg, &info, sizeof(info))) err = -EFAULT; return err; } static int loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { struct loop_info64 info64; int err; if (!arg) return -EINVAL; err = loop_get_status(lo, &info64); if (!err && copy_to_user(arg, &info64, sizeof(info64))) err = -EFAULT; return err; } static int loop_set_capacity(struct loop_device *lo) { loff_t size; if (unlikely(lo->lo_state != Lo_bound)) return -ENXIO; size = lo_calculate_size(lo, lo->lo_backing_file); loop_set_size(lo, size); return 0; } static int loop_set_dio(struct loop_device *lo, unsigned long arg) { bool use_dio = !!arg; unsigned int memflags; if (lo->lo_state != Lo_bound) return -ENXIO; if (use_dio == !!(lo->lo_flags & LO_FLAGS_DIRECT_IO)) return 0; if (use_dio) { if (!lo_can_use_dio(lo)) return -EINVAL; /* flush dirty pages before starting to use direct I/O */ vfs_fsync(lo->lo_backing_file, 0); } memflags = blk_mq_freeze_queue(lo->lo_queue); if (use_dio) lo->lo_flags |= LO_FLAGS_DIRECT_IO; else lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; blk_mq_unfreeze_queue(lo->lo_queue, memflags); return 0; } static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, unsigned long arg) { struct queue_limits lim; unsigned int memflags; int err = 0; /* * If we don't hold exclusive handle for the device, upgrade to it * here to avoid changing device under exclusive owner. */ if (!(mode & BLK_OPEN_EXCL)) { err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL); if (err) return err; } err = mutex_lock_killable(&lo->lo_mutex); if (err) goto abort_claim; if (lo->lo_state != Lo_bound) { err = -ENXIO; goto unlock; } if (lo->lo_queue->limits.logical_block_size == arg) goto unlock; sync_blockdev(lo->lo_device); invalidate_bdev(lo->lo_device); lim = queue_limits_start_update(lo->lo_queue); loop_update_limits(lo, &lim, arg); memflags = blk_mq_freeze_queue(lo->lo_queue); err = queue_limits_commit_update(lo->lo_queue, &lim); loop_update_dio(lo); blk_mq_unfreeze_queue(lo->lo_queue, memflags); unlock: mutex_unlock(&lo->lo_mutex); abort_claim: if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, loop_set_block_size); return err; } static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, unsigned long arg) { int err; err = mutex_lock_killable(&lo->lo_mutex); if (err) return err; switch (cmd) { case LOOP_SET_CAPACITY: err = loop_set_capacity(lo); break; case LOOP_SET_DIRECT_IO: err = loop_set_dio(lo, arg); break; default: err = -EINVAL; } mutex_unlock(&lo->lo_mutex); return err; } static int lo_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; void __user *argp = (void __user *) arg; int err; switch (cmd) { case LOOP_SET_FD: { /* * Legacy case - pass in a zeroed out struct loop_config with * only the file descriptor set , which corresponds with the * default parameters we'd have used otherwise. */ struct loop_config config; memset(&config, 0, sizeof(config)); config.fd = arg; return loop_configure(lo, mode, bdev, &config); } case LOOP_CONFIGURE: { struct loop_config config; if (copy_from_user(&config, argp, sizeof(config))) return -EFAULT; return loop_configure(lo, mode, bdev, &config); } case LOOP_CHANGE_FD: return loop_change_fd(lo, bdev, arg); case LOOP_CLR_FD: return loop_clr_fd(lo); case LOOP_SET_STATUS: err = -EPERM; if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status_old(lo, mode, bdev, argp); break; case LOOP_GET_STATUS: return loop_get_status_old(lo, argp); case LOOP_SET_STATUS64: err = -EPERM; if ((mode & BLK_OPEN_WRITE) || capable(CAP_SYS_ADMIN)) err = loop_set_status64(lo, mode, bdev, argp); break; case LOOP_GET_STATUS64: return loop_get_status64(lo, argp); case LOOP_SET_BLOCK_SIZE: if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; return loop_set_block_size(lo, mode, bdev, arg); case LOOP_SET_CAPACITY: case LOOP_SET_DIRECT_IO: if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN)) return -EPERM; fallthrough; default: err = lo_simple_ioctl(lo, cmd, arg); break; } return err; } #ifdef CONFIG_COMPAT struct compat_loop_info { compat_int_t lo_number; /* ioctl r/o */ compat_dev_t lo_device; /* ioctl r/o */ compat_ulong_t lo_inode; /* ioctl r/o */ compat_dev_t lo_rdevice; /* ioctl r/o */ compat_int_t lo_offset; compat_int_t lo_encrypt_type; /* obsolete, ignored */ compat_int_t lo_encrypt_key_size; /* ioctl w/o */ compat_int_t lo_flags; /* ioctl r/o */ char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ compat_ulong_t lo_init[2]; char reserved[4]; }; /* * Transfer 32-bit compatibility structure in userspace to 64-bit loop info * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_from_compat(const struct compat_loop_info __user *arg, struct loop_info64 *info64) { struct compat_loop_info info; if (copy_from_user(&info, arg, sizeof(info))) return -EFAULT; memset(info64, 0, sizeof(*info64)); info64->lo_number = info.lo_number; info64->lo_device = info.lo_device; info64->lo_inode = info.lo_inode; info64->lo_rdevice = info.lo_rdevice; info64->lo_offset = info.lo_offset; info64->lo_sizelimit = 0; info64->lo_flags = info.lo_flags; memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE); return 0; } /* * Transfer 64-bit loop info to 32-bit compatibility structure in userspace * - noinlined to reduce stack space usage in main part of driver */ static noinline int loop_info64_to_compat(const struct loop_info64 *info64, struct compat_loop_info __user *arg) { struct compat_loop_info info; memset(&info, 0, sizeof(info)); info.lo_number = info64->lo_number; info.lo_device = info64->lo_device; info.lo_inode = info64->lo_inode; info.lo_rdevice = info64->lo_rdevice; info.lo_offset = info64->lo_offset; info.lo_flags = info64->lo_flags; memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE); /* error in case values were truncated */ if (info.lo_device != info64->lo_device || info.lo_rdevice != info64->lo_rdevice || info.lo_inode != info64->lo_inode || info.lo_offset != info64->lo_offset) return -EOVERFLOW; if (copy_to_user(arg, &info, sizeof(info))) return -EFAULT; return 0; } static int loop_set_status_compat(struct loop_device *lo, blk_mode_t mode, struct block_device *bdev, const struct compat_loop_info __user *arg) { struct loop_info64 info64; int ret; ret = loop_info64_from_compat(arg, &info64); if (ret < 0) return ret; return loop_set_status(lo, mode, bdev, &info64); } static int loop_get_status_compat(struct loop_device *lo, struct compat_loop_info __user *arg) { struct loop_info64 info64; int err; if (!arg) return -EINVAL; err = loop_get_status(lo, &info64); if (!err) err = loop_info64_to_compat(&info64, arg); return err; } static int lo_compat_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { struct loop_device *lo = bdev->bd_disk->private_data; int err; switch(cmd) { case LOOP_SET_STATUS: err = loop_set_status_compat(lo, mode, bdev, (const struct compat_loop_info __user *)arg); break; case LOOP_GET_STATUS: err = loop_get_status_compat(lo, (struct compat_loop_info __user *)arg); break; case LOOP_SET_CAPACITY: case LOOP_CLR_FD: case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: case LOOP_CONFIGURE: arg = (unsigned long) compat_ptr(arg); fallthrough; case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: case LOOP_SET_DIRECT_IO: err = lo_ioctl(bdev, mode, cmd, arg); break; default: err = -ENOIOCTLCMD; break; } return err; } #endif static int lo_open(struct gendisk *disk, blk_mode_t mode) { struct loop_device *lo = disk->private_data; int err; err = mutex_lock_killable(&lo->lo_mutex); if (err) return err; if (lo->lo_state == Lo_deleting || lo->lo_state == Lo_rundown) err = -ENXIO; mutex_unlock(&lo->lo_mutex); return err; } static void lo_release(struct gendisk *disk) { struct loop_device *lo = disk->private_data; bool need_clear = false; if (disk_openers(disk) > 0) return; /* * Clear the backing device information if this is the last close of * a device that's been marked for auto clear, or on which LOOP_CLR_FD * has been called. */ mutex_lock(&lo->lo_mutex); if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) WRITE_ONCE(lo->lo_state, Lo_rundown); need_clear = (lo->lo_state == Lo_rundown); mutex_unlock(&lo->lo_mutex); if (need_clear) __loop_clr_fd(lo); } static void lo_free_disk(struct gendisk *disk) { struct loop_device *lo = disk->private_data; if (lo->workqueue) destroy_workqueue(lo->workqueue); loop_free_idle_workers(lo, true); timer_shutdown_sync(&lo->timer); mutex_destroy(&lo->lo_mutex); kfree(lo); } static const struct block_device_operations lo_fops = { .owner = THIS_MODULE, .open = lo_open, .release = lo_release, .ioctl = lo_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = lo_compat_ioctl, #endif .free_disk = lo_free_disk, }; /* * And now the modules code and kernel interface. */ /* * If max_loop is specified, create that many devices upfront. * This also becomes a hard limit. If max_loop is not specified, * the default isn't a hard limit (as before commit 85c50197716c * changed the default value from 0 for max_loop=0 reasons), just * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * init time. Loop devices can be requested on-demand with the * /dev/loop-control interface, or be instantiated by accessing * a 'dead' device node. */ static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD static bool max_loop_specified; static int max_loop_param_set_int(const char *val, const struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (ret < 0) return ret; max_loop_specified = true; return 0; } static const struct kernel_param_ops max_loop_param_ops = { .set = max_loop_param_set_int, .get = param_get_int, }; module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); #else module_param(max_loop, int, 0444); MODULE_PARM_DESC(max_loop, "Initial number of loop devices"); #endif module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH; static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p) { int qd, ret; ret = kstrtoint(s, 0, &qd); if (ret < 0) return ret; if (qd < 1) return -EINVAL; hw_queue_depth = qd; return 0; } static const struct kernel_param_ops loop_hw_qdepth_param_ops = { .set = loop_set_hw_queue_depth, .get = param_get_int, }; device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH)); MODULE_DESCRIPTION("Loopback device support"); MODULE_LICENSE("GPL"); MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct request *rq = bd->rq; struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq); struct loop_device *lo = rq->q->queuedata; blk_mq_start_request(rq); if (data_race(READ_ONCE(lo->lo_state)) != Lo_bound) return BLK_STS_IOERR; switch (req_op(rq)) { case REQ_OP_FLUSH: case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES: cmd->use_aio = false; break; default: cmd->use_aio = lo->lo_flags & LO_FLAGS_DIRECT_IO; break; } /* always use the first bio's css */ cmd->blkcg_css = NULL; cmd->memcg_css = NULL; #ifdef CONFIG_BLK_CGROUP if (rq->bio) { cmd->blkcg_css = bio_blkcg_css(rq->bio); #ifdef CONFIG_MEMCG if (cmd->blkcg_css) { cmd->memcg_css = cgroup_get_e_css(cmd->blkcg_css->cgroup, &memory_cgrp_subsys); } #endif } #endif loop_queue_work(lo, cmd); return BLK_STS_OK; } static void loop_handle_cmd(struct loop_cmd *cmd) { struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; struct request *rq = blk_mq_rq_from_pdu(cmd); const bool write = op_is_write(req_op(rq)); struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; goto failed; } /* We can block in this context, so ignore REQ_NOWAIT. */ if (rq->cmd_flags & REQ_NOWAIT) rq->cmd_flags &= ~REQ_NOWAIT; if (cmd_blkcg_css) kthread_associate_blkcg(cmd_blkcg_css); if (cmd_memcg_css) old_memcg = set_active_memcg( mem_cgroup_from_css(cmd_memcg_css)); /* * do_req_filebacked() may call blk_mq_complete_request() synchronously * or asynchronously if using aio. Hence, do not touch 'cmd' after * do_req_filebacked() has returned unless we are sure that 'cmd' has * not yet been completed. */ ret = do_req_filebacked(lo, rq); if (cmd_blkcg_css) kthread_associate_blkcg(NULL); if (cmd_memcg_css) { set_active_memcg(old_memcg); css_put(cmd_memcg_css); } failed: /* complete non-aio request */ if (ret != -EIOCBQUEUED) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else cmd->ret = ret ? -EIO : 0; if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } } static void loop_process_work(struct loop_worker *worker, struct list_head *cmd_list, struct loop_device *lo) { int orig_flags = current->flags; struct loop_cmd *cmd; current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO; spin_lock_irq(&lo->lo_work_lock); while (!list_empty(cmd_list)) { cmd = container_of( cmd_list->next, struct loop_cmd, list_entry); list_del(cmd_list->next); spin_unlock_irq(&lo->lo_work_lock); loop_handle_cmd(cmd); cond_resched(); spin_lock_irq(&lo->lo_work_lock); } /* * We only add to the idle list if there are no pending cmds * *and* the worker will not run again which ensures that it * is safe to free any worker on the idle list */ if (worker && !work_pending(&worker->work)) { worker->last_ran_at = jiffies; list_add_tail(&worker->idle_list, &lo->idle_worker_list); loop_set_timer(lo); } spin_unlock_irq(&lo->lo_work_lock); current->flags = orig_flags; } static void loop_workfn(struct work_struct *work) { struct loop_worker *worker = container_of(work, struct loop_worker, work); loop_process_work(worker, &worker->cmd_list, worker->lo); } static void loop_rootcg_workfn(struct work_struct *work) { struct loop_device *lo = container_of(work, struct loop_device, rootcg_work); loop_process_work(NULL, &lo->rootcg_cmd_list, lo); } static const struct blk_mq_ops loop_mq_ops = { .queue_rq = loop_queue_rq, .complete = lo_complete_rq, }; static int loop_add(int i) { struct queue_limits lim = { /* * Random number picked from the historic block max_sectors cap. */ .max_hw_sectors = 2560u, }; struct loop_device *lo; struct gendisk *disk; int err; err = -ENOMEM; lo = kzalloc(sizeof(*lo), GFP_KERNEL); if (!lo) goto out; lo->worker_tree = RB_ROOT; INIT_LIST_HEAD(&lo->idle_worker_list); timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE); WRITE_ONCE(lo->lo_state, Lo_unbound); err = mutex_lock_killable(&loop_ctl_mutex); if (err) goto out_free_dev; /* allocate id, if @id >= 0, we're requesting that specific id */ if (i >= 0) { err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); if (err == -ENOSPC) err = -EEXIST; } else { err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); } mutex_unlock(&loop_ctl_mutex); if (err < 0) goto out_free_dev; i = err; lo->tag_set.ops = &loop_mq_ops; lo->tag_set.nr_hw_queues = 1; lo->tag_set.queue_depth = hw_queue_depth; lo->tag_set.numa_node = NUMA_NO_NODE; lo->tag_set.cmd_size = sizeof(struct loop_cmd); lo->tag_set.flags = BLK_MQ_F_STACKING | BLK_MQ_F_NO_SCHED_BY_DEFAULT; lo->tag_set.driver_data = lo; err = blk_mq_alloc_tag_set(&lo->tag_set); if (err) goto out_free_idr; disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, &lim, lo); if (IS_ERR(disk)) { err = PTR_ERR(disk); goto out_cleanup_tags; } lo->lo_queue = lo->lo_disk->queue; /* * Disable partition scanning by default. The in-kernel partition * scanning can be requested individually per-device during its * setup. Userspace can always add and remove partitions from all * devices. The needed partition minors are allocated from the * extended minor space, the main loop device numbers will continue * to match the loop minors, regardless of the number of partitions * used. * * If max_part is given, partition scanning is globally enabled for * all loop devices. The minors for the main loop devices will be * multiples of max_part. * * Note: Global-for-all-devices, set-only-at-init, read-only module * parameteters like 'max_loop' and 'max_part' make things needlessly * complicated, are too static, inflexible and may surprise * userspace tools. Parameters like this in general should be avoided. */ if (!part_shift) set_bit(GD_SUPPRESS_PART_SCAN, &disk->state); mutex_init(&lo->lo_mutex); lo->lo_number = i; spin_lock_init(&lo->lo_lock); spin_lock_init(&lo->lo_work_lock); INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn); INIT_LIST_HEAD(&lo->rootcg_cmd_list); disk->major = LOOP_MAJOR; disk->first_minor = i << part_shift; disk->minors = 1 << part_shift; disk->fops = &lo_fops; disk->private_data = lo; disk->queue = lo->lo_queue; disk->events = DISK_EVENT_MEDIA_CHANGE; disk->event_flags = DISK_EVENT_FLAG_UEVENT; sprintf(disk->disk_name, "loop%d", i); /* Make this loop device reachable from pathname. */ err = add_disk(disk); if (err) goto out_cleanup_disk; /* Show this loop device. */ mutex_lock(&loop_ctl_mutex); lo->idr_visible = true; mutex_unlock(&loop_ctl_mutex); return i; out_cleanup_disk: put_disk(disk); out_cleanup_tags: blk_mq_free_tag_set(&lo->tag_set); out_free_idr: mutex_lock(&loop_ctl_mutex); idr_remove(&loop_index_idr, i); mutex_unlock(&loop_ctl_mutex); out_free_dev: kfree(lo); out: return err; } static void loop_remove(struct loop_device *lo) { /* Make this loop device unreachable from pathname. */ del_gendisk(lo->lo_disk); blk_mq_free_tag_set(&lo->tag_set); mutex_lock(&loop_ctl_mutex); idr_remove(&loop_index_idr, lo->lo_number); mutex_unlock(&loop_ctl_mutex); put_disk(lo->lo_disk); } #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD static void loop_probe(dev_t dev) { int idx = MINOR(dev) >> part_shift; if (max_loop_specified && max_loop && idx >= max_loop) return; loop_add(idx); } #else #define loop_probe NULL #endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */ static int loop_control_remove(int idx) { struct loop_device *lo; int ret; if (idx < 0) { pr_warn_once("deleting an unspecified loop device is not supported.\n"); return -EINVAL; } /* Hide this loop device for serialization. */ ret = mutex_lock_killable(&loop_ctl_mutex); if (ret) return ret; lo = idr_find(&loop_index_idr, idx); if (!lo || !lo->idr_visible) ret = -ENODEV; else lo->idr_visible = false; mutex_unlock(&loop_ctl_mutex); if (ret) return ret; /* Check whether this loop device can be removed. */ ret = mutex_lock_killable(&lo->lo_mutex); if (ret) goto mark_visible; if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) { mutex_unlock(&lo->lo_mutex); ret = -EBUSY; goto mark_visible; } /* Mark this loop device as no more bound, but not quite unbound yet */ WRITE_ONCE(lo->lo_state, Lo_deleting); mutex_unlock(&lo->lo_mutex); loop_remove(lo); return 0; mark_visible: /* Show this loop device again. */ mutex_lock(&loop_ctl_mutex); lo->idr_visible = true; mutex_unlock(&loop_ctl_mutex); return ret; } static int loop_control_get_free(int idx) { struct loop_device *lo; int id, ret; ret = mutex_lock_killable(&loop_ctl_mutex); if (ret) return ret; idr_for_each_entry(&loop_index_idr, lo, id) { /* * Hitting a race results in creating a new loop device * which is harmless. */ if (lo->idr_visible && data_race(READ_ONCE(lo->lo_state)) == Lo_unbound) goto found; } mutex_unlock(&loop_ctl_mutex); return loop_add(-1); found: mutex_unlock(&loop_ctl_mutex); return id; } static long loop_control_ioctl(struct file *file, unsigned int cmd, unsigned long parm) { switch (cmd) { case LOOP_CTL_ADD: return loop_add(parm); case LOOP_CTL_REMOVE: return loop_control_remove(parm); case LOOP_CTL_GET_FREE: return loop_control_get_free(parm); default: return -ENOSYS; } } static const struct file_operations loop_ctl_fops = { .open = nonseekable_open, .unlocked_ioctl = loop_control_ioctl, .compat_ioctl = loop_control_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice loop_misc = { .minor = LOOP_CTRL_MINOR, .name = "loop-control", .fops = &loop_ctl_fops, }; MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR); MODULE_ALIAS("devname:loop-control"); static int __init loop_init(void) { int i; int err; part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can decide correct minor number * if [s]he want to create more devices. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) { err = -EINVAL; goto err_out; } if (max_loop > 1UL << (MINORBITS - part_shift)) { err = -EINVAL; goto err_out; } err = misc_register(&loop_misc); if (err < 0) goto err_out; if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) { err = -EIO; goto misc_out; } /* pre-create number of devices given by config or max_loop */ for (i = 0; i < max_loop; i++) loop_add(i); printk(KERN_INFO "loop: module loaded\n"); return 0; misc_out: misc_deregister(&loop_misc); err_out: return err; } static void __exit loop_exit(void) { struct loop_device *lo; int id; unregister_blkdev(LOOP_MAJOR, "loop"); misc_deregister(&loop_misc); /* * There is no need to use loop_ctl_mutex here, for nobody else can * access loop_index_idr when this module is unloading (unless forced * module unloading is requested). If this is not a clean unloading, * we have no means to avoid kernel crash. */ idr_for_each_entry(&loop_index_idr, lo, id) loop_remove(lo); idr_destroy(&loop_index_idr); } module_init(loop_init); module_exit(loop_exit); #ifndef MODULE static int __init max_loop_setup(char *str) { max_loop = simple_strtol(str, NULL, 0); #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD max_loop_specified = true; #endif return 1; } __setup("max_loop=", max_loop_setup); #endif
49685 19287 10429 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H #define HAVE_JUMP_LABEL_BATCH #include <asm/asm.h> #include <asm/nops.h> #ifndef __ASSEMBLER__ #include <linux/stringify.h> #include <linux/types.h> #define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ _ASM_ALIGN "\n\t" \ ANNOTATE_DATA_SPECIAL "\n" \ ".long 1b - . \n\t" \ ".long " label " - . \n\t" \ _ASM_PTR " " key " - . \n\t" \ ".popsection \n\t" /* This macro is also expanded on the Rust side. */ #ifdef CONFIG_HAVE_JUMP_LABEL_HACK #define ARCH_STATIC_BRANCH_ASM(key, label) \ "1: jmp " label " # objtool NOPs this \n\t" \ JUMP_TABLE_ENTRY(key " + 2", label) #else /* !CONFIG_HAVE_JUMP_LABEL_HACK */ #define ARCH_STATIC_BRANCH_ASM(key, label) \ "1: .byte " __stringify(BYTES_NOP5) "\n\t" \ JUMP_TABLE_ENTRY(key, label) #endif /* CONFIG_HAVE_JUMP_LABEL_HACK */ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm goto(ARCH_STATIC_BRANCH_ASM("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm goto("1:" "jmp %l[l_yes]\n\t" JUMP_TABLE_ENTRY("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } extern int arch_jump_entry_size(struct jump_entry *entry); #endif /* __ASSEMBLER__ */ #endif
5 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 // SPDX-License-Identifier: GPL-2.0-only /* * Ceph cache definitions. * * Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved. * Written by Milosz Tanski (milosz@adfin.com) */ #include <linux/ceph/ceph_debug.h> #include <linux/fs_context.h> #include "super.h" #include "cache.h" void ceph_fscache_register_inode_cookie(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode); /* No caching for filesystem? */ if (!fsc->fscache) return; /* Regular files only */ if (!S_ISREG(inode->i_mode)) return; /* Only new inodes! */ if (!(inode_state_read_once(inode) & I_NEW)) return; WARN_ON_ONCE(ci->netfs.cache); ci->netfs.cache = fscache_acquire_cookie(fsc->fscache, 0, &ci->i_vino, sizeof(ci->i_vino), &ci->i_version, sizeof(ci->i_version), i_size_read(inode)); if (ci->netfs.cache) mapping_set_release_always(inode->i_mapping); } void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci) { fscache_relinquish_cookie(ceph_fscache_cookie(ci), false); } void ceph_fscache_use_cookie(struct inode *inode, bool will_modify) { struct ceph_inode_info *ci = ceph_inode(inode); fscache_use_cookie(ceph_fscache_cookie(ci), will_modify); } void ceph_fscache_unuse_cookie(struct inode *inode, bool update) { struct ceph_inode_info *ci = ceph_inode(inode); if (update) { loff_t i_size = i_size_read(inode); fscache_unuse_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size); } else { fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL); } } void ceph_fscache_update(struct inode *inode) { struct ceph_inode_info *ci = ceph_inode(inode); loff_t i_size = i_size_read(inode); fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size); } void ceph_fscache_invalidate(struct inode *inode, bool dio_write) { struct ceph_inode_info *ci = ceph_inode(inode); fscache_invalidate(ceph_fscache_cookie(ci), &ci->i_version, i_size_read(inode), dio_write ? FSCACHE_INVAL_DIO_WRITE : 0); } int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc) { const struct ceph_fsid *fsid = &fsc->client->fsid; const char *fscache_uniq = fsc->mount_options->fscache_uniq; size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0; char *name; int err = 0; name = kasprintf(GFP_KERNEL, "ceph,%pU%s%s", fsid, uniq_len ? "," : "", uniq_len ? fscache_uniq : ""); if (!name) return -ENOMEM; fsc->fscache = fscache_acquire_volume(name, NULL, NULL, 0); if (IS_ERR_OR_NULL(fsc->fscache)) { errorfc(fc, "Unable to register fscache cookie for %s", name); err = fsc->fscache ? PTR_ERR(fsc->fscache) : -EOPNOTSUPP; fsc->fscache = NULL; } kfree(name); return err; } void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) { fscache_relinquish_volume(fsc->fscache, NULL, false); }
709 17 119 113 55 890 1 889 1 1 75 1 74 74 75 47 29 9 9 9 75 75 26 26 1 475 10 5 14 1 15 4 1 2 3 2 2 2 7 7 973 973 708 587 705 710 5 703 5 11 583 702 352 56 5 288 4 1 4 1 4 1 4 1 4 1 5 198 112 113 5730 406 408 408 18 391 426 20 408 407 26 26 85 85 24 24 125 83 47 4340 220 4344 4227 4340 277 270 5 157 158 158 142 6 5 127 1 1 2 3 1 1 7 3 6 137 100 1 1 97 143 70 70 70 49 11 70 61 55 23 57 4 55 37 30 21 6613 9181 1088 4371 7695 3634 102 1 100 323 17 307 930 318 67 574 38 59 60 60 60 60 23 9 5 11 2572 7 7 7 889 890 889 1 1 1 9 1 3350 17 20 20 49 12 11 49 6 43 635 2011 2010 1 71 59 396 268 811 814 811 670 377 346 345 436 435 436 436 14 1 4 1 1 1 1 1 4 2 1435 28 1422 1145 341 112 872 866 8 35 2 195 2 103 42 62 4 26 26 24 1 55 54 36 1 30 30 18 19 130 5070 1111 1241 79 62 1037 2524 38 6 2229 118 88 46 7 1 6 6 2 4 7 2 3 1 1 2 2 1 1 11 4 3 4 12 10 2 131 2 129 13593 12098 972 2 333 7 531 3 3 3 3 3 92 122 7 41 74 19 187 94 1 94 95 92 3 95 11223 11111 94 1 94 92 3 122 115 92 92 93 93 6 4 3 4 6 10579 10577 3 10264 327 1 2 1 1 79 79 15 15 15 15 15 199 464 474 461 10 3 3 15 15 8 8 21 17 306 27 27 295 295 5 4 15 15 13 20 20 27 28 38 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 // SPDX-License-Identifier: GPL-2.0-only /* * Simplified MAC Kernel (smack) security module * * This file contains the smack hook function implementations. * * Authors: * Casey Schaufler <casey@schaufler-ca.com> * Jarkko Sakkinen <jarkko.sakkinen@intel.com> * * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. * Paul Moore <paul@paul-moore.com> * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2011 Intel Corporation. */ #include <linux/xattr.h> #include <linux/pagemap.h> #include <linux/mount.h> #include <linux/stat.h> #include <linux/kd.h> #include <asm/ioctls.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/icmpv6.h> #include <linux/slab.h> #include <linux/mutex.h> #include <net/cipso_ipv4.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/audit.h> #include <linux/magic.h> #include <linux/dcache.h> #include <linux/personality.h> #include <linux/msg.h> #include <linux/shm.h> #include <uapi/linux/shm.h> #include <linux/binfmts.h> #include <linux/parser.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/watch_queue.h> #include <linux/io_uring/cmd.h> #include <uapi/linux/lsm.h> #include "smack.h" #define TRANS_TRUE "TRUE" #define TRANS_TRUE_SIZE 4 #define SMK_CONNECTING 0 #define SMK_RECEIVING 1 #define SMK_SENDING 2 /* * Smack uses multiple xattrs. * SMACK64 - for access control, * SMACK64TRANSMUTE - label initialization, * Not saved on files - SMACK64IPIN and SMACK64IPOUT, * Must be set explicitly - SMACK64EXEC and SMACK64MMAP */ #define SMACK_INODE_INIT_XATTRS 2 #ifdef SMACK_IPV6_PORT_LABELING static DEFINE_MUTEX(smack_ipv6_lock); static LIST_HEAD(smk_ipv6_port_list); #endif struct kmem_cache *smack_rule_cache; int smack_enabled __initdata; #define A(s) {"smack"#s, sizeof("smack"#s) - 1, Opt_##s} static struct { const char *name; int len; int opt; } smk_mount_opts[] = { {"smackfsdef", sizeof("smackfsdef") - 1, Opt_fsdefault}, A(fsdefault), A(fsfloor), A(fshat), A(fsroot), A(fstransmute) }; #undef A static int match_opt_prefix(char *s, int l, char **arg) { int i; for (i = 0; i < ARRAY_SIZE(smk_mount_opts); i++) { size_t len = smk_mount_opts[i].len; if (len > l || memcmp(s, smk_mount_opts[i].name, len)) continue; if (len == l || s[len] != '=') continue; *arg = s + len + 1; return smk_mount_opts[i].opt; } return Opt_error; } #ifdef CONFIG_SECURITY_SMACK_BRINGUP static char *smk_bu_mess[] = { "Bringup Error", /* Unused */ "Bringup", /* SMACK_BRINGUP_ALLOW */ "Unconfined Subject", /* SMACK_UNCONFINED_SUBJECT */ "Unconfined Object", /* SMACK_UNCONFINED_OBJECT */ }; static void smk_bu_mode(int mode, char *s) { smack_str_from_perm(s, mode); } #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_note(char *note, struct smack_known *sskp, struct smack_known *oskp, int mode, int rc) { char acc[SMK_NUM_ACCESS_TYPE + 1]; if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) %s\n", smk_bu_mess[rc], sskp->smk_known, oskp->smk_known, acc, note); return 0; } #else #define smk_bu_note(note, sskp, oskp, mode, RC) (RC) #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_current(char *note, struct smack_known *oskp, int mode, int rc) { struct task_smack *tsp = smack_cred(current_cred()); char acc[SMK_NUM_ACCESS_TYPE + 1]; if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) %s %s\n", smk_bu_mess[rc], tsp->smk_task->smk_known, oskp->smk_known, acc, current->comm, note); return 0; } #else #define smk_bu_current(note, oskp, mode, RC) (RC) #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_task(struct task_struct *otp, int mode, int rc) { struct task_smack *tsp = smack_cred(current_cred()); struct smack_known *smk_task = smk_of_task_struct_obj(otp); char acc[SMK_NUM_ACCESS_TYPE + 1]; if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) %s to %s\n", smk_bu_mess[rc], tsp->smk_task->smk_known, smk_task->smk_known, acc, current->comm, otp->comm); return 0; } #else #define smk_bu_task(otp, mode, RC) (RC) #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_inode(struct inode *inode, int mode, int rc) { struct task_smack *tsp = smack_cred(current_cred()); struct inode_smack *isp = smack_inode(inode); char acc[SMK_NUM_ACCESS_TYPE + 1]; if (isp->smk_flags & SMK_INODE_IMPURE) pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n", inode->i_sb->s_id, inode->i_ino, current->comm); if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; if (rc == SMACK_UNCONFINED_SUBJECT && (mode & (MAY_WRITE | MAY_APPEND))) isp->smk_flags |= SMK_INODE_IMPURE; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) inode=(%s %ld) %s\n", smk_bu_mess[rc], tsp->smk_task->smk_known, isp->smk_inode->smk_known, acc, inode->i_sb->s_id, inode->i_ino, current->comm); return 0; } #else #define smk_bu_inode(inode, mode, RC) (RC) #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_file(struct file *file, int mode, int rc) { struct task_smack *tsp = smack_cred(current_cred()); struct smack_known *sskp = tsp->smk_task; struct inode *inode = file_inode(file); struct inode_smack *isp = smack_inode(inode); char acc[SMK_NUM_ACCESS_TYPE + 1]; if (isp->smk_flags & SMK_INODE_IMPURE) pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n", inode->i_sb->s_id, inode->i_ino, current->comm); if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc], sskp->smk_known, smk_of_inode(inode)->smk_known, acc, inode->i_sb->s_id, inode->i_ino, file, current->comm); return 0; } #else #define smk_bu_file(file, mode, RC) (RC) #endif #ifdef CONFIG_SECURITY_SMACK_BRINGUP static int smk_bu_credfile(const struct cred *cred, struct file *file, int mode, int rc) { struct task_smack *tsp = smack_cred(cred); struct smack_known *sskp = tsp->smk_task; struct inode *inode = file_inode(file); struct inode_smack *isp = smack_inode(inode); char acc[SMK_NUM_ACCESS_TYPE + 1]; if (isp->smk_flags & SMK_INODE_IMPURE) pr_info("Smack Unconfined Corruption: inode=(%s %ld) %s\n", inode->i_sb->s_id, inode->i_ino, current->comm); if (rc <= 0) return rc; if (rc > SMACK_UNCONFINED_OBJECT) rc = 0; smk_bu_mode(mode, acc); pr_info("Smack %s: (%s %s %s) file=(%s %ld %pD) %s\n", smk_bu_mess[rc], sskp->smk_known, smk_of_inode(inode)->smk_known, acc, inode->i_sb->s_id, inode->i_ino, file, current->comm); return 0; } #else #define smk_bu_credfile(cred, file, mode, RC) (RC) #endif /** * smk_fetch - Fetch the smack label from a file. * @name: type of the label (attribute) * @ip: a pointer to the inode * @dp: a pointer to the dentry * * Returns a pointer to the master list entry for the Smack label, * NULL if there was no label to fetch, or an error code. */ static struct smack_known *smk_fetch(const char *name, struct inode *ip, struct dentry *dp) { int rc; char *buffer; struct smack_known *skp = NULL; if (!(ip->i_opflags & IOP_XATTR)) return ERR_PTR(-EOPNOTSUPP); buffer = kzalloc(SMK_LONGLABEL, GFP_NOFS); if (buffer == NULL) return ERR_PTR(-ENOMEM); rc = __vfs_getxattr(dp, ip, name, buffer, SMK_LONGLABEL); if (rc < 0) skp = ERR_PTR(rc); else if (rc == 0) skp = NULL; else skp = smk_import_entry(buffer, rc); kfree(buffer); return skp; } /** * init_inode_smack - initialize an inode security blob * @inode: inode to extract the info from * @skp: a pointer to the Smack label entry to use in the blob * */ static void init_inode_smack(struct inode *inode, struct smack_known *skp) { struct inode_smack *isp = smack_inode(inode); isp->smk_inode = skp; isp->smk_flags = 0; } /** * init_task_smack - initialize a task security blob * @tsp: blob to initialize * @task: a pointer to the Smack label for the running task * @forked: a pointer to the Smack label for the forked task * */ static void init_task_smack(struct task_smack *tsp, struct smack_known *task, struct smack_known *forked) { tsp->smk_task = task; tsp->smk_forked = forked; INIT_LIST_HEAD(&tsp->smk_rules); INIT_LIST_HEAD(&tsp->smk_relabel); mutex_init(&tsp->smk_rules_lock); } /** * smk_copy_rules - copy a rule set * @nhead: new rules header pointer * @ohead: old rules header pointer * @gfp: type of the memory for the allocation * * Returns 0 on success, -ENOMEM on error */ static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead, gfp_t gfp) { struct smack_rule *nrp; struct smack_rule *orp; int rc = 0; list_for_each_entry_rcu(orp, ohead, list) { nrp = kmem_cache_zalloc(smack_rule_cache, gfp); if (nrp == NULL) { rc = -ENOMEM; break; } *nrp = *orp; list_add_rcu(&nrp->list, nhead); } return rc; } /** * smk_copy_relabel - copy smk_relabel labels list * @nhead: new rules header pointer * @ohead: old rules header pointer * @gfp: type of the memory for the allocation * * Returns 0 on success, -ENOMEM on error */ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead, gfp_t gfp) { struct smack_known_list_elem *nklep; struct smack_known_list_elem *oklep; list_for_each_entry(oklep, ohead, list) { nklep = kzalloc(sizeof(struct smack_known_list_elem), gfp); if (nklep == NULL) { smk_destroy_label_list(nhead); return -ENOMEM; } nklep->smk_label = oklep->smk_label; list_add(&nklep->list, nhead); } return 0; } /** * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_* * @mode: input mode in form of PTRACE_MODE_* * * Returns a converted MAY_* mode usable by smack rules */ static inline unsigned int smk_ptrace_mode(unsigned int mode) { if (mode & PTRACE_MODE_ATTACH) return MAY_READWRITE; if (mode & PTRACE_MODE_READ) return MAY_READ; return 0; } /** * smk_ptrace_rule_check - helper for ptrace access * @tracer: tracer process * @tracee_known: label entry of the process that's about to be traced * @mode: ptrace attachment mode (PTRACE_MODE_*) * @func: name of the function that called us, used for audit * * Returns 0 on access granted, -error on error */ static int smk_ptrace_rule_check(struct task_struct *tracer, struct smack_known *tracee_known, unsigned int mode, const char *func) { int rc; struct smk_audit_info ad, *saip = NULL; struct task_smack *tsp; struct smack_known *tracer_known; const struct cred *tracercred; if ((mode & PTRACE_MODE_NOAUDIT) == 0) { smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, tracer); saip = &ad; } rcu_read_lock(); tracercred = __task_cred(tracer); tsp = smack_cred(tracercred); tracer_known = smk_of_task(tsp); if ((mode & PTRACE_MODE_ATTACH) && (smack_ptrace_rule == SMACK_PTRACE_EXACT || smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) { if (tracer_known->smk_known == tracee_known->smk_known) rc = 0; else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN) rc = -EACCES; else if (smack_privileged_cred(CAP_SYS_PTRACE, tracercred)) rc = 0; else rc = -EACCES; if (saip) smack_log(tracer_known->smk_known, tracee_known->smk_known, 0, rc, saip); rcu_read_unlock(); return rc; } /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */ rc = smk_tskacc(tsp, tracee_known, smk_ptrace_mode(mode), saip); rcu_read_unlock(); return rc; } /* * LSM hooks. * We he, that is fun! */ /** * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH * @ctp: child task pointer * @mode: ptrace attachment mode (PTRACE_MODE_*) * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks. */ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) { struct smack_known *skp; skp = smk_of_task_struct_obj(ctp); return smk_ptrace_rule_check(current, skp, mode, __func__); } /** * smack_ptrace_traceme - Smack approval on PTRACE_TRACEME * @ptp: parent task pointer * * Returns 0 if access is OK, an error code otherwise * * Do the capability checks, and require PTRACE_MODE_ATTACH. */ static int smack_ptrace_traceme(struct task_struct *ptp) { struct smack_known *skp; skp = smk_of_task(smack_cred(current_cred())); return smk_ptrace_rule_check(ptp, skp, PTRACE_MODE_ATTACH, __func__); } /** * smack_syslog - Smack approval on syslog * @typefrom_file: unused * * Returns 0 on success, error code otherwise. */ static int smack_syslog(int typefrom_file) { int rc = 0; struct smack_known *skp = smk_of_current(); if (smack_privileged(CAP_MAC_OVERRIDE)) return 0; if (smack_syslog_label != NULL && smack_syslog_label != skp) rc = -EACCES; return rc; } /* * Superblock Hooks. */ /** * smack_sb_alloc_security - allocate a superblock blob * @sb: the superblock getting the blob * * Returns 0 on success or -ENOMEM on error. */ static int smack_sb_alloc_security(struct super_block *sb) { struct superblock_smack *sbsp = smack_superblock(sb); sbsp->smk_root = &smack_known_floor; sbsp->smk_default = &smack_known_floor; sbsp->smk_floor = &smack_known_floor; sbsp->smk_hat = &smack_known_hat; /* * SMK_SB_INITIALIZED will be zero from kzalloc. */ return 0; } struct smack_mnt_opts { const char *fsdefault; const char *fsfloor; const char *fshat; const char *fsroot; const char *fstransmute; }; static void smack_free_mnt_opts(void *mnt_opts) { kfree(mnt_opts); } static int smack_add_opt(int token, const char *s, void **mnt_opts) { struct smack_mnt_opts *opts = *mnt_opts; struct smack_known *skp; if (!opts) { opts = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL); if (!opts) return -ENOMEM; *mnt_opts = opts; } if (!s) return -ENOMEM; skp = smk_import_entry(s, 0); if (IS_ERR(skp)) return PTR_ERR(skp); switch (token) { case Opt_fsdefault: if (opts->fsdefault) goto out_opt_err; opts->fsdefault = skp->smk_known; break; case Opt_fsfloor: if (opts->fsfloor) goto out_opt_err; opts->fsfloor = skp->smk_known; break; case Opt_fshat: if (opts->fshat) goto out_opt_err; opts->fshat = skp->smk_known; break; case Opt_fsroot: if (opts->fsroot) goto out_opt_err; opts->fsroot = skp->smk_known; break; case Opt_fstransmute: if (opts->fstransmute) goto out_opt_err; opts->fstransmute = skp->smk_known; break; } return 0; out_opt_err: pr_warn("Smack: duplicate mount options\n"); return -EINVAL; } /** * smack_fs_context_submount - Initialise security data for a filesystem context * @fc: The filesystem context. * @reference: reference superblock * * Returns 0 on success or -ENOMEM on error. */ static int smack_fs_context_submount(struct fs_context *fc, struct super_block *reference) { struct superblock_smack *sbsp; struct smack_mnt_opts *ctx; struct inode_smack *isp; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; fc->security = ctx; sbsp = smack_superblock(reference); isp = smack_inode(reference->s_root->d_inode); if (sbsp->smk_default) { ctx->fsdefault = kstrdup(sbsp->smk_default->smk_known, GFP_KERNEL); if (!ctx->fsdefault) return -ENOMEM; } if (sbsp->smk_floor) { ctx->fsfloor = kstrdup(sbsp->smk_floor->smk_known, GFP_KERNEL); if (!ctx->fsfloor) return -ENOMEM; } if (sbsp->smk_hat) { ctx->fshat = kstrdup(sbsp->smk_hat->smk_known, GFP_KERNEL); if (!ctx->fshat) return -ENOMEM; } if (isp->smk_flags & SMK_INODE_TRANSMUTE) { if (sbsp->smk_root) { ctx->fstransmute = kstrdup(sbsp->smk_root->smk_known, GFP_KERNEL); if (!ctx->fstransmute) return -ENOMEM; } } return 0; } /** * smack_fs_context_dup - Duplicate the security data on fs_context duplication * @fc: The new filesystem context. * @src_fc: The source filesystem context being duplicated. * * Returns 0 on success or -ENOMEM on error. */ static int smack_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc) { struct smack_mnt_opts *dst, *src = src_fc->security; if (!src) return 0; fc->security = kzalloc(sizeof(struct smack_mnt_opts), GFP_KERNEL); if (!fc->security) return -ENOMEM; dst = fc->security; dst->fsdefault = src->fsdefault; dst->fsfloor = src->fsfloor; dst->fshat = src->fshat; dst->fsroot = src->fsroot; dst->fstransmute = src->fstransmute; return 0; } static const struct fs_parameter_spec smack_fs_parameters[] = { fsparam_string("smackfsdef", Opt_fsdefault), fsparam_string("smackfsdefault", Opt_fsdefault), fsparam_string("smackfsfloor", Opt_fsfloor), fsparam_string("smackfshat", Opt_fshat), fsparam_string("smackfsroot", Opt_fsroot), fsparam_string("smackfstransmute", Opt_fstransmute), {} }; /** * smack_fs_context_parse_param - Parse a single mount parameter * @fc: The new filesystem context being constructed. * @param: The parameter. * * Returns 0 on success, -ENOPARAM to pass the parameter on or anything else on * error. */ static int smack_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct fs_parse_result result; int opt, rc; opt = fs_parse(fc, smack_fs_parameters, param, &result); if (opt < 0) return opt; rc = smack_add_opt(opt, param->string, &fc->security); if (!rc) param->string = NULL; return rc; } static int smack_sb_eat_lsm_opts(char *options, void **mnt_opts) { char *from = options, *to = options; bool first = true; while (1) { char *next = strchr(from, ','); int token, len, rc; char *arg = NULL; if (next) len = next - from; else len = strlen(from); token = match_opt_prefix(from, len, &arg); if (token != Opt_error) { arg = kmemdup_nul(arg, from + len - arg, GFP_KERNEL); rc = smack_add_opt(token, arg, mnt_opts); kfree(arg); if (unlikely(rc)) { if (*mnt_opts) smack_free_mnt_opts(*mnt_opts); *mnt_opts = NULL; return rc; } } else { if (!first) { // copy with preceding comma from--; len++; } if (to != from) memmove(to, from, len); to += len; first = false; } if (!from[len]) break; from += len + 1; } *to = '\0'; return 0; } /** * smack_set_mnt_opts - set Smack specific mount options * @sb: the file system superblock * @mnt_opts: Smack mount options * @kern_flags: mount option from kernel space or user space * @set_kern_flags: where to store converted mount opts * * Returns 0 on success, an error code on failure * * Allow filesystems with binary mount data to explicitly set Smack mount * labels. */ static int smack_set_mnt_opts(struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) { struct dentry *root = sb->s_root; struct inode *inode = d_backing_inode(root); struct superblock_smack *sp = smack_superblock(sb); struct inode_smack *isp; struct smack_known *skp; struct smack_mnt_opts *opts = mnt_opts; bool transmute = false; if (sp->smk_flags & SMK_SB_INITIALIZED) return 0; if (!smack_privileged(CAP_MAC_ADMIN)) { /* * Unprivileged mounts don't get to specify Smack values. */ if (opts) return -EPERM; /* * Unprivileged mounts get root and default from the caller. */ skp = smk_of_current(); sp->smk_root = skp; sp->smk_default = skp; /* * For a handful of fs types with no user-controlled * backing store it's okay to trust security labels * in the filesystem. The rest are untrusted. */ if (sb->s_user_ns != &init_user_ns && sb->s_magic != SYSFS_MAGIC && sb->s_magic != TMPFS_MAGIC && sb->s_magic != RAMFS_MAGIC) { transmute = true; sp->smk_flags |= SMK_SB_UNTRUSTED; } } sp->smk_flags |= SMK_SB_INITIALIZED; if (opts) { if (opts->fsdefault) { skp = smk_import_entry(opts->fsdefault, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_default = skp; } if (opts->fsfloor) { skp = smk_import_entry(opts->fsfloor, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_floor = skp; } if (opts->fshat) { skp = smk_import_entry(opts->fshat, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_hat = skp; } if (opts->fsroot) { skp = smk_import_entry(opts->fsroot, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_root = skp; } if (opts->fstransmute) { skp = smk_import_entry(opts->fstransmute, 0); if (IS_ERR(skp)) return PTR_ERR(skp); sp->smk_root = skp; transmute = true; } } /* * Initialize the root inode. */ init_inode_smack(inode, sp->smk_root); if (transmute) { isp = smack_inode(inode); isp->smk_flags |= SMK_INODE_TRANSMUTE; } return 0; } /** * smack_sb_statfs - Smack check on statfs * @dentry: identifies the file system in question * * Returns 0 if current can read the floor of the filesystem, * and error code otherwise */ static int smack_sb_statfs(struct dentry *dentry) { struct superblock_smack *sbp = smack_superblock(dentry->d_sb); int rc; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad); rc = smk_bu_current("statfs", sbp->smk_floor, MAY_READ, rc); return rc; } /* * BPRM hooks */ /** * smack_bprm_creds_for_exec - Update bprm->cred if needed for exec * @bprm: the exec information * * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise */ static int smack_bprm_creds_for_exec(struct linux_binprm *bprm) { struct inode *inode = file_inode(bprm->file); struct task_smack *bsp = smack_cred(bprm->cred); struct inode_smack *isp; struct superblock_smack *sbsp; int rc; isp = smack_inode(inode); if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task) return 0; sbsp = smack_superblock(inode->i_sb); if ((sbsp->smk_flags & SMK_SB_UNTRUSTED) && isp->smk_task != sbsp->smk_root) return 0; if (bprm->unsafe & LSM_UNSAFE_PTRACE) { struct task_struct *tracer; rc = 0; rcu_read_lock(); tracer = ptrace_parent(current); if (likely(tracer != NULL)) rc = smk_ptrace_rule_check(tracer, isp->smk_task, PTRACE_MODE_ATTACH, __func__); rcu_read_unlock(); if (rc != 0) return rc; } if (bprm->unsafe & ~LSM_UNSAFE_PTRACE) return -EPERM; bsp->smk_task = isp->smk_task; bprm->per_clear |= PER_CLEAR_ON_SETID; /* Decide if this is a secure exec. */ if (bsp->smk_task != bsp->smk_forked) bprm->secureexec = 1; return 0; } /* * Inode hooks */ /** * smack_inode_alloc_security - allocate an inode blob * @inode: the inode in need of a blob * * Returns 0 */ static int smack_inode_alloc_security(struct inode *inode) { struct smack_known *skp = smk_of_current(); init_inode_smack(inode, skp); return 0; } /** * smk_rule_transmutes - does access rule for (subject,object) contain 't'? * @subject: a pointer to the subject's Smack label entry * @object: a pointer to the object's Smack label entry */ static bool smk_rule_transmutes(struct smack_known *subject, const struct smack_known *object) { int may; rcu_read_lock(); may = smk_access_entry(subject->smk_known, object->smk_known, &subject->smk_rules); rcu_read_unlock(); return (may > 0) && (may & MAY_TRANSMUTE); } static int xattr_dupval(struct xattr *xattrs, int *xattr_count, const char *name, const void *value, unsigned int vallen) { struct xattr * const xattr = lsm_get_xattr_slot(xattrs, xattr_count); if (!xattr) return 0; xattr->value = kmemdup(value, vallen, GFP_NOFS); if (!xattr->value) return -ENOMEM; xattr->value_len = vallen; xattr->name = name; return 0; } /** * smack_inode_init_security - copy out the smack from an inode * @inode: the newly created inode * @dir: containing directory object * @qstr: unused * @xattrs: where to put the attributes * @xattr_count: current number of LSM-provided xattrs (updated) * * Returns 0 if it all works out, -ENOMEM if there's no memory */ static int smack_inode_init_security(struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) { struct task_smack *tsp = smack_cred(current_cred()); struct inode_smack * const issp = smack_inode(inode); struct smack_known *dsp = smk_of_inode(dir); int rc = 0; int transflag = 0; bool trans_cred; bool trans_rule; /* * UNIX domain sockets use lower level socket data. Let * UDS inode have fixed * label to keep smack_inode_permission() calm * when called from unix_find_bsd() */ if (S_ISSOCK(inode->i_mode)) { /* forced label, no need to save to xattrs */ issp->smk_inode = &smack_known_star; goto instant_inode; } /* * If equal, transmuting already occurred in * smack_dentry_create_files_as(). No need to check again. */ trans_cred = (tsp->smk_task == tsp->smk_transmuted); if (!trans_cred) trans_rule = smk_rule_transmutes(smk_of_task(tsp), dsp); /* * In addition to having smk_task equal to smk_transmuted, * if the access rule allows transmutation and the directory * requests transmutation then by all means transmute. * Mark the inode as changed. */ if (trans_cred || (trans_rule && smk_inode_transmutable(dir))) { /* * The caller of smack_dentry_create_files_as() * should have overridden the current cred, so the * inode label was already set correctly in * smack_inode_alloc_security(). */ if (!trans_cred) issp->smk_inode = dsp; if (S_ISDIR(inode->i_mode)) { transflag = SMK_INODE_TRANSMUTE; if (xattr_dupval(xattrs, xattr_count, XATTR_SMACK_TRANSMUTE, TRANS_TRUE, TRANS_TRUE_SIZE )) rc = -ENOMEM; } } if (rc == 0) if (xattr_dupval(xattrs, xattr_count, XATTR_SMACK_SUFFIX, issp->smk_inode->smk_known, strlen(issp->smk_inode->smk_known) )) rc = -ENOMEM; instant_inode: issp->smk_flags |= (SMK_INODE_INSTANT | transflag); return rc; } /** * smack_inode_link - Smack check on link * @old_dentry: the existing object * @dir: unused * @new_dentry: the new object * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct smack_known *isp; struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(d_backing_inode(old_dentry)); rc = smk_curacc(isp, MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_WRITE, rc); if (rc == 0 && d_is_positive(new_dentry)) { isp = smk_of_inode(d_backing_inode(new_dentry)); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_WRITE, rc); } return rc; } /** * smack_inode_unlink - Smack check on inode deletion * @dir: containing directory object * @dentry: file to unlink * * Returns 0 if current can write the containing directory * and the object, error code otherwise */ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry) { struct inode *ip = d_backing_inode(dentry); struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* * You need write access to the thing you're unlinking */ rc = smk_curacc(smk_of_inode(ip), MAY_WRITE, &ad); rc = smk_bu_inode(ip, MAY_WRITE, rc); if (rc == 0) { /* * You also need write access to the containing directory */ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); rc = smk_bu_inode(dir, MAY_WRITE, rc); } return rc; } /** * smack_inode_rmdir - Smack check on directory deletion * @dir: containing directory object * @dentry: directory to unlink * * Returns 0 if current can write the containing directory * and the directory, error code otherwise */ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* * You need write access to the thing you're removing */ rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); if (rc == 0) { /* * You also need write access to the containing directory */ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); rc = smk_bu_inode(dir, MAY_WRITE, rc); } return rc; } /** * smack_inode_rename - Smack check on rename * @old_inode: unused * @old_dentry: the old object * @new_inode: unused * @new_dentry: the new object * * Read and write access is required on both the old and * new directories. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_rename(struct inode *old_inode, struct dentry *old_dentry, struct inode *new_inode, struct dentry *new_dentry) { int rc; struct smack_known *isp; struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(d_backing_inode(old_dentry)); rc = smk_curacc(isp, MAY_READWRITE, &ad); rc = smk_bu_inode(d_backing_inode(old_dentry), MAY_READWRITE, rc); if (rc == 0 && d_is_positive(new_dentry)) { isp = smk_of_inode(d_backing_inode(new_dentry)); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_READWRITE, &ad); rc = smk_bu_inode(d_backing_inode(new_dentry), MAY_READWRITE, rc); } return rc; } /** * smack_inode_permission - Smack version of permission() * @inode: the inode in question * @mask: the access requested * * This is the important Smack hook. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_permission(struct inode *inode, int mask) { struct superblock_smack *sbsp = smack_superblock(inode->i_sb); struct smk_audit_info ad; int no_block = mask & MAY_NOT_BLOCK; int rc; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); /* * No permission to check. Existence test. Yup, it's there. */ if (mask == 0) return 0; if (sbsp->smk_flags & SMK_SB_UNTRUSTED) { if (smk_of_inode(inode) != sbsp->smk_root) return -EACCES; } /* May be droppable after audit */ if (no_block) return -ECHILD; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, inode); rc = smk_curacc(smk_of_inode(inode), mask, &ad); rc = smk_bu_inode(inode, mask, rc); return rc; } /** * smack_inode_setattr - Smack check for setting attributes * @idmap: idmap of the mount * @dentry: the object * @iattr: for the force flag * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *iattr) { struct smk_audit_info ad; int rc; /* * Need to allow for clearing the setuid bit. */ if (iattr->ia_valid & ATTR_FORCE) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); return rc; } /** * smack_inode_getattr - Smack check for getting attributes * @path: path to extract the info from * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getattr(const struct path *path) { struct smk_audit_info ad; struct inode *inode = d_backing_inode(path->dentry); int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, *path); rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad); rc = smk_bu_inode(inode, MAY_READ, rc); return rc; } /** * smack_inode_xattr_skipcap - Skip the xattr capability checks? * @name: name of the xattr * * Returns 1 to indicate that Smack "owns" the access control rights to xattrs * named @name; the LSM layer should avoid enforcing any traditional * capability based access controls on this xattr. Returns 0 to indicate that * Smack does not "own" the access control rights to xattrs named @name and is * deferring to the LSM layer for further access controls, including capability * based controls. */ static int smack_inode_xattr_skipcap(const char *name) { if (strncmp(name, XATTR_SMACK_SUFFIX, strlen(XATTR_SMACK_SUFFIX))) return 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 || strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || strcmp(name, XATTR_NAME_SMACKMMAP) == 0 || strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) return 1; return 0; } /** * smack_inode_setxattr - Smack check for setting xattrs * @idmap: idmap of the mount * @dentry: the object * @name: name of the attribute * @value: value of the attribute * @size: size of the value * @flags: unused * * This protects the Smack attribute explicitly. * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_setxattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct smk_audit_info ad; struct smack_known *skp; int check_priv = 0; int check_import = 0; int check_star = 0; int rc = 0; umode_t const i_mode = d_backing_inode(dentry)->i_mode; /* * Check label validity here so import won't fail in post_setxattr */ if (strcmp(name, XATTR_NAME_SMACK) == 0) { /* * UDS inode has fixed label */ if (S_ISSOCK(i_mode)) { rc = -EINVAL; } else { check_priv = 1; check_import = 1; } } else if (strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { check_priv = 1; check_import = 1; } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { check_priv = 1; check_import = 1; check_star = 1; } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { check_priv = 1; if (!S_ISDIR(i_mode) || size != TRANS_TRUE_SIZE || strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; } if (check_priv && !smack_privileged(CAP_MAC_ADMIN)) rc = -EPERM; if (rc == 0 && check_import) { skp = size ? smk_import_entry(value, size) : NULL; if (IS_ERR(skp)) rc = PTR_ERR(skp); else if (skp == NULL || (check_star && (skp == &smack_known_star || skp == &smack_known_web))) rc = -EINVAL; } smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); if (rc == 0) { rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); } return rc; } /** * smack_inode_post_setxattr - Apply the Smack update approved above * @dentry: object * @name: attribute name * @value: attribute value * @size: attribute size * @flags: unused * * Set the pointer in the inode blob to the entry found * in the master label list. */ static void smack_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct smack_known *skp; struct inode_smack *isp = smack_inode(d_backing_inode(dentry)); if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { isp->smk_flags |= SMK_INODE_TRANSMUTE; return; } if (strcmp(name, XATTR_NAME_SMACK) == 0) { skp = smk_import_entry(value, size); if (!IS_ERR(skp)) isp->smk_inode = skp; } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) { skp = smk_import_entry(value, size); if (!IS_ERR(skp)) isp->smk_task = skp; } else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { skp = smk_import_entry(value, size); if (!IS_ERR(skp)) isp->smk_mmap = skp; } return; } /** * smack_inode_getxattr - Smack check on getxattr * @dentry: the object * @name: unused * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_getxattr(struct dentry *dentry, const char *name) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_READ, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_READ, rc); return rc; } /** * smack_inode_removexattr - Smack check on removexattr * @idmap: idmap of the mount * @dentry: the object * @name: name of the attribute * * Removing the Smack attribute requires CAP_MAC_ADMIN * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_removexattr(struct mnt_idmap *idmap, struct dentry *dentry, const char *name) { struct inode_smack *isp; struct smk_audit_info ad; int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 || strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 || strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { if (!smack_privileged(CAP_MAC_ADMIN)) rc = -EPERM; } if (rc != 0) return rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); if (rc != 0) return rc; isp = smack_inode(d_backing_inode(dentry)); /* * Don't do anything special for these. * XATTR_NAME_SMACKIPIN * XATTR_NAME_SMACKIPOUT * XATTR_NAME_SMACK if S_ISSOCK (UDS inode has fixed label) */ if (strcmp(name, XATTR_NAME_SMACK) == 0) { if (!S_ISSOCK(d_backing_inode(dentry)->i_mode)) { struct super_block *sbp = dentry->d_sb; struct superblock_smack *sbsp = smack_superblock(sbp); isp->smk_inode = sbsp->smk_default; } } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) isp->smk_task = NULL; else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) isp->smk_mmap = NULL; else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) isp->smk_flags &= ~SMK_INODE_TRANSMUTE; return 0; } /** * smack_inode_set_acl - Smack check for setting posix acls * @idmap: idmap of the mnt this request came from * @dentry: the object * @acl_name: name of the posix acl * @kacl: the posix acls * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_set_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); return rc; } /** * smack_inode_get_acl - Smack check for getting posix acls * @idmap: idmap of the mnt this request came from * @dentry: the object * @acl_name: name of the posix acl * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_get_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_READ, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_READ, rc); return rc; } /** * smack_inode_remove_acl - Smack check for getting posix acls * @idmap: idmap of the mnt this request came from * @dentry: the object * @acl_name: name of the posix acl * * Returns 0 if access is permitted, an error code otherwise */ static int smack_inode_remove_acl(struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) { struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(smk_of_inode(d_backing_inode(dentry)), MAY_WRITE, &ad); rc = smk_bu_inode(d_backing_inode(dentry), MAY_WRITE, rc); return rc; } /** * smack_inode_getsecurity - get smack xattrs * @idmap: idmap of the mount * @inode: the object * @name: attribute name * @buffer: where to put the result * @alloc: duplicate memory * * Returns the size of the attribute or an error code */ static int smack_inode_getsecurity(struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) { struct socket_smack *ssp; struct socket *sock; struct super_block *sbp; struct inode *ip = inode; struct smack_known *isp; struct inode_smack *ispp; size_t label_len; char *label = NULL; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { isp = smk_of_inode(inode); } else if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) { ispp = smack_inode(inode); if (ispp->smk_flags & SMK_INODE_TRANSMUTE) label = TRANS_TRUE; else label = ""; } else { /* * The rest of the Smack xattrs are only on sockets. */ sbp = ip->i_sb; if (sbp->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(ip); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = smack_sock(sock->sk); if (strcmp(name, XATTR_SMACK_IPIN) == 0) isp = ssp->smk_in; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) isp = ssp->smk_out; else return -EOPNOTSUPP; } if (!label) label = isp->smk_known; label_len = strlen(label); if (alloc) { *buffer = kstrdup(label, GFP_KERNEL); if (*buffer == NULL) return -ENOMEM; } return label_len; } /** * smack_inode_listsecurity - list the Smack attributes * @inode: the object * @buffer: where they go * @buffer_size: size of buffer */ static int smack_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size) { int len = sizeof(XATTR_NAME_SMACK); if (buffer != NULL && len <= buffer_size) memcpy(buffer, XATTR_NAME_SMACK, len); return len; } /** * smack_inode_getlsmprop - Extract inode's security id * @inode: inode to extract the info from * @prop: where result will be saved */ static void smack_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop) { prop->smack.skp = smk_of_inode(inode); } /* * File Hooks */ /* * There is no smack_file_permission hook * * Should access checks be done on each read or write? * UNICOS and SELinux say yes. * Trusted Solaris, Trusted Irix, and just about everyone else says no. * * I'll say no for now. Smack does not do the frequent * label changing that SELinux does. */ /** * smack_file_alloc_security - assign a file security blob * @file: the object * * The security blob for a file is a pointer to the master * label list, so no allocation is done. * * f_security is the owner security information. It * isn't used on file access checks, it's for send_sigio. * * Returns 0 */ static int smack_file_alloc_security(struct file *file) { struct smack_known **blob = smack_file(file); *blob = smk_of_current(); return 0; } /** * smack_file_ioctl - Smack check on ioctls * @file: the object * @cmd: what to do * @arg: unused * * Relies heavily on the correct use of the ioctl command conventions. * * Returns 0 if allowed, error code otherwise */ static int smack_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc = 0; struct smk_audit_info ad; struct inode *inode = file_inode(file); if (unlikely(IS_PRIVATE(inode))) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); if (_IOC_DIR(cmd) & _IOC_WRITE) { rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad); rc = smk_bu_file(file, MAY_WRITE, rc); } if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ)) { rc = smk_curacc(smk_of_inode(inode), MAY_READ, &ad); rc = smk_bu_file(file, MAY_READ, rc); } return rc; } /** * smack_file_lock - Smack check on file locking * @file: the object * @cmd: unused * * Returns 0 if current has lock access, error code otherwise */ static int smack_file_lock(struct file *file, unsigned int cmd) { struct smk_audit_info ad; int rc; struct inode *inode = file_inode(file); if (unlikely(IS_PRIVATE(inode))) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad); rc = smk_bu_file(file, MAY_LOCK, rc); return rc; } /** * smack_file_fcntl - Smack check on fcntl * @file: the object * @cmd: what action to check * @arg: unused * * Generally these operations are harmless. * File locking operations present an obvious mechanism * for passing information, so they require write access. * * Returns 0 if current has access, error code otherwise */ static int smack_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct smk_audit_info ad; int rc = 0; struct inode *inode = file_inode(file); if (unlikely(IS_PRIVATE(inode))) return 0; switch (cmd) { case F_GETLK: break; case F_SETLK: case F_SETLKW: smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_curacc(smk_of_inode(inode), MAY_LOCK, &ad); rc = smk_bu_file(file, MAY_LOCK, rc); break; case F_SETOWN: case F_SETSIG: smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_curacc(smk_of_inode(inode), MAY_WRITE, &ad); rc = smk_bu_file(file, MAY_WRITE, rc); break; default: break; } return rc; } /** * smack_mmap_file - Check permissions for a mmap operation. * @file: contains the file structure for file to map (may be NULL). * @reqprot: contains the protection requested by the application. * @prot: contains the protection that will be applied by the kernel. * @flags: contains the operational flags. * * The @file may be NULL, e.g. if mapping anonymous memory. * * Return 0 if permission is granted. */ static int smack_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { struct smack_known *skp; struct smack_known *mkp; struct smack_rule *srp; struct task_smack *tsp; struct smack_known *okp; struct inode_smack *isp; struct superblock_smack *sbsp; int may; int mmay; int tmay; int rc; if (file == NULL) return 0; if (unlikely(IS_PRIVATE(file_inode(file)))) return 0; isp = smack_inode(file_inode(file)); if (isp->smk_mmap == NULL) return 0; sbsp = smack_superblock(file_inode(file)->i_sb); if (sbsp->smk_flags & SMK_SB_UNTRUSTED && isp->smk_mmap != sbsp->smk_root) return -EACCES; mkp = isp->smk_mmap; tsp = smack_cred(current_cred()); skp = smk_of_current(); rc = 0; rcu_read_lock(); /* * For each Smack rule associated with the subject * label verify that the SMACK64MMAP also has access * to that rule's object label. */ list_for_each_entry_rcu(srp, &skp->smk_rules, list) { okp = srp->smk_object; /* * Matching labels always allows access. */ if (mkp->smk_known == okp->smk_known) continue; /* * If there is a matching local rule take * that into account as well. */ may = smk_access_entry(srp->smk_subject->smk_known, okp->smk_known, &tsp->smk_rules); if (may == -ENOENT) may = srp->smk_access; else may &= srp->smk_access; /* * If may is zero the SMACK64MMAP subject can't * possibly have less access. */ if (may == 0) continue; /* * Fetch the global list entry. * If there isn't one a SMACK64MMAP subject * can't have as much access as current. */ mmay = smk_access_entry(mkp->smk_known, okp->smk_known, &mkp->smk_rules); if (mmay == -ENOENT) { rc = -EACCES; break; } /* * If there is a local entry it modifies the * potential access, too. */ tmay = smk_access_entry(mkp->smk_known, okp->smk_known, &tsp->smk_rules); if (tmay != -ENOENT) mmay &= tmay; /* * If there is any access available to current that is * not available to a SMACK64MMAP subject * deny access. */ if ((may | mmay) != mmay) { rc = -EACCES; break; } } rcu_read_unlock(); return rc; } /** * smack_file_set_fowner - set the file security blob value * @file: object in question * */ static void smack_file_set_fowner(struct file *file) { struct smack_known **blob = smack_file(file); *blob = smk_of_current(); } /** * smack_file_send_sigiotask - Smack on sigio * @tsk: The target task * @fown: the object the signal come from * @signum: unused * * Allow a privileged task to get signals even if it shouldn't * * Returns 0 if a subject with the object's smack could * write to the task, an error code otherwise. */ static int smack_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { struct smack_known **blob; struct smack_known *skp; struct smack_known *tkp = smk_of_task(smack_cred(tsk->cred)); const struct cred *tcred; struct file *file; int rc; struct smk_audit_info ad; /* * struct fown_struct is never outside the context of a struct file */ file = fown->file; /* we don't log here as rc can be overridden */ blob = smack_file(file); skp = *blob; rc = smk_access(skp, tkp, MAY_DELIVER, NULL); rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc); rcu_read_lock(); tcred = __task_cred(tsk); if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred)) rc = 0; rcu_read_unlock(); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, tsk); smack_log(skp->smk_known, tkp->smk_known, MAY_DELIVER, rc, &ad); return rc; } /** * smack_file_receive - Smack file receive check * @file: the object * * Returns 0 if current has access, error code otherwise */ static int smack_file_receive(struct file *file) { int rc; int may = 0; struct smk_audit_info ad; struct inode *inode = file_inode(file); struct socket *sock; struct task_smack *tsp; struct socket_smack *ssp; if (unlikely(IS_PRIVATE(inode))) return 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); if (inode->i_sb->s_magic == SOCKFS_MAGIC) { sock = SOCKET_I(inode); ssp = smack_sock(sock->sk); tsp = smack_cred(current_cred()); /* * If the receiving process can't write to the * passed socket or if the passed socket can't * write to the receiving process don't accept * the passed socket. */ rc = smk_access(tsp->smk_task, ssp->smk_out, MAY_WRITE, &ad); rc = smk_bu_file(file, may, rc); if (rc < 0) return rc; rc = smk_access(ssp->smk_in, tsp->smk_task, MAY_WRITE, &ad); rc = smk_bu_file(file, may, rc); return rc; } /* * This code relies on bitmasks. */ if (file->f_mode & FMODE_READ) may = MAY_READ; if (file->f_mode & FMODE_WRITE) may |= MAY_WRITE; rc = smk_curacc(smk_of_inode(inode), may, &ad); rc = smk_bu_file(file, may, rc); return rc; } /** * smack_file_open - Smack dentry open processing * @file: the object * * Set the security blob in the file structure. * Allow the open only if the task has read access. There are * many read operations (e.g. fstat) that you can do with an * fd even if you have the file open write-only. * * Returns 0 if current has access, error code otherwise */ static int smack_file_open(struct file *file) { struct task_smack *tsp = smack_cred(file->f_cred); struct inode *inode = file_inode(file); struct smk_audit_info ad; int rc; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad); rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc); return rc; } /* * Task hooks */ /** * smack_cred_alloc_blank - "allocate" blank task-level security credentials * @cred: the new credentials * @gfp: the atomicity of any memory allocations * * Prepare a blank set of credentials for modification. This must allocate all * the memory the LSM module might require such that cred_transfer() can * complete without error. */ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp) { init_task_smack(smack_cred(cred), NULL, NULL); return 0; } /** * smack_cred_free - "free" task-level security credentials * @cred: the credentials in question * */ static void smack_cred_free(struct cred *cred) { struct task_smack *tsp = smack_cred(cred); struct smack_rule *rp; struct list_head *l; struct list_head *n; smk_destroy_label_list(&tsp->smk_relabel); list_for_each_safe(l, n, &tsp->smk_rules) { rp = list_entry(l, struct smack_rule, list); list_del(&rp->list); kmem_cache_free(smack_rule_cache, rp); } } /** * smack_cred_prepare - prepare new set of credentials for modification * @new: the new credentials * @old: the original credentials * @gfp: the atomicity of any memory allocations * * Prepare a new set of credentials for modification. */ static int smack_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { struct task_smack *old_tsp = smack_cred(old); struct task_smack *new_tsp = smack_cred(new); int rc; init_task_smack(new_tsp, old_tsp->smk_task, old_tsp->smk_task); rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp); if (rc != 0) return rc; rc = smk_copy_relabel(&new_tsp->smk_relabel, &old_tsp->smk_relabel, gfp); return rc; } /** * smack_cred_transfer - Transfer the old credentials to the new credentials * @new: the new credentials * @old: the original credentials * * Fill in a set of blank credentials from another set of credentials. */ static void smack_cred_transfer(struct cred *new, const struct cred *old) { struct task_smack *old_tsp = smack_cred(old); struct task_smack *new_tsp = smack_cred(new); init_task_smack(new_tsp, old_tsp->smk_task, old_tsp->smk_task); } /** * smack_cred_getsecid - get the secid corresponding to a creds structure * @cred: the object creds * @secid: where to put the result * * Sets the secid to contain a u32 version of the smack label. */ static void smack_cred_getsecid(const struct cred *cred, u32 *secid) { struct smack_known *skp; rcu_read_lock(); skp = smk_of_task(smack_cred(cred)); *secid = skp->smk_secid; rcu_read_unlock(); } /** * smack_cred_getlsmprop - get the Smack label for a creds structure * @cred: the object creds * @prop: where to put the data * * Sets the Smack part of the ref */ static void smack_cred_getlsmprop(const struct cred *cred, struct lsm_prop *prop) { rcu_read_lock(); prop->smack.skp = smk_of_task(smack_cred(cred)); rcu_read_unlock(); } /** * smack_kernel_act_as - Set the subjective context in a set of credentials * @new: points to the set of credentials to be modified. * @secid: specifies the security ID to be set * * Set the security data for a kernel service. */ static int smack_kernel_act_as(struct cred *new, u32 secid) { struct task_smack *new_tsp = smack_cred(new); new_tsp->smk_task = smack_from_secid(secid); return 0; } /** * smack_kernel_create_files_as - Set the file creation label in a set of creds * @new: points to the set of credentials to be modified * @inode: points to the inode to use as a reference * * Set the file creation context in a set of credentials to the same * as the objective context of the specified inode */ static int smack_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_smack *isp = smack_inode(inode); struct task_smack *tsp = smack_cred(new); tsp->smk_forked = isp->smk_inode; tsp->smk_task = tsp->smk_forked; return 0; } /** * smk_curacc_on_task - helper to log task related access * @p: the task object * @access: the access requested * @caller: name of the calling function for audit * * Return 0 if access is permitted */ static int smk_curacc_on_task(struct task_struct *p, int access, const char *caller) { struct smk_audit_info ad; struct smack_known *skp = smk_of_task_struct_obj(p); int rc; smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); rc = smk_curacc(skp, access, &ad); rc = smk_bu_task(p, access, rc); return rc; } /** * smack_task_setpgid - Smack check on setting pgid * @p: the task object * @pgid: unused * * Return 0 if write access is permitted */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** * smack_task_getpgid - Smack access check for getpgid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getpgid(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ, __func__); } /** * smack_task_getsid - Smack access check for getsid * @p: the object task * * Returns 0 if current can read the object task, error code otherwise */ static int smack_task_getsid(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ, __func__); } /** * smack_current_getlsmprop_subj - get the subjective secid of the current task * @prop: where to put the result * * Sets the secid to contain a u32 version of the task's subjective smack label. */ static void smack_current_getlsmprop_subj(struct lsm_prop *prop) { prop->smack.skp = smk_of_current(); } /** * smack_task_getlsmprop_obj - get the objective data of the task * @p: the task * @prop: where to put the result * * Sets the secid to contain a u32 version of the task's objective smack label. */ static void smack_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop) { prop->smack.skp = smk_of_task_struct_obj(p); } /** * smack_task_setnice - Smack check on setting nice * @p: the task object * @nice: unused * * Return 0 if write access is permitted */ static int smack_task_setnice(struct task_struct *p, int nice) { return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** * smack_task_setioprio - Smack check on setting ioprio * @p: the task object * @ioprio: unused * * Return 0 if write access is permitted */ static int smack_task_setioprio(struct task_struct *p, int ioprio) { return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** * smack_task_getioprio - Smack check on reading ioprio * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getioprio(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ, __func__); } /** * smack_task_setscheduler - Smack check on setting scheduler * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_setscheduler(struct task_struct *p) { return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** * smack_task_getscheduler - Smack check on reading scheduler * @p: the task object * * Return 0 if read access is permitted */ static int smack_task_getscheduler(struct task_struct *p) { return smk_curacc_on_task(p, MAY_READ, __func__); } /** * smack_task_movememory - Smack check on moving memory * @p: the task object * * Return 0 if write access is permitted */ static int smack_task_movememory(struct task_struct *p) { return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** * smack_task_kill - Smack check on signal delivery * @p: the task object * @info: unused * @sig: unused * @cred: identifies the cred to use in lieu of current's * * Return 0 if write access is permitted * */ static int smack_task_kill(struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) { struct smk_audit_info ad; struct smack_known *skp; struct smack_known *tkp = smk_of_task_struct_obj(p); int rc; if (!sig) return 0; /* null signal; existence test */ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); /* * Sending a signal requires that the sender * can write the receiver. */ if (cred == NULL) { rc = smk_curacc(tkp, MAY_DELIVER, &ad); rc = smk_bu_task(p, MAY_DELIVER, rc); return rc; } /* * If the cred isn't NULL we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ skp = smk_of_task(smack_cred(cred)); rc = smk_access(skp, tkp, MAY_DELIVER, &ad); rc = smk_bu_note("USB signal", skp, tkp, MAY_DELIVER, rc); return rc; } /** * smack_task_to_inode - copy task smack into the inode blob * @p: task to copy from * @inode: inode to copy to * * Sets the smack pointer in the inode security blob */ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = smack_inode(inode); struct smack_known *skp = smk_of_task_struct_obj(p); isp->smk_inode = skp; isp->smk_flags |= SMK_INODE_INSTANT; } /* * Socket hooks. */ /** * smack_sk_alloc_security - Allocate a socket blob * @sk: the socket * @family: unused * @gfp_flags: memory allocation flags * * Assign Smack pointers to current * * Returns 0 on success, -ENOMEM is there's no memory */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { struct smack_known *skp = smk_of_current(); struct socket_smack *ssp = smack_sock(sk); /* * Sockets created by kernel threads receive web label. */ if (unlikely(current->flags & PF_KTHREAD)) { ssp->smk_in = &smack_known_web; ssp->smk_out = &smack_known_web; } else { ssp->smk_in = skp; ssp->smk_out = skp; } ssp->smk_packet = NULL; return 0; } #ifdef SMACK_IPV6_PORT_LABELING /** * smack_sk_free_security - Free a socket blob * @sk: the socket * * Clears the blob pointer */ static void smack_sk_free_security(struct sock *sk) { struct smk_port_label *spp; if (sk->sk_family == PF_INET6) { rcu_read_lock(); list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) { if (spp->smk_sock != sk) continue; spp->smk_can_reuse = 1; break; } rcu_read_unlock(); } } #endif /** * smack_sk_clone_security - Copy security context * @sk: the old socket * @newsk: the new socket * * Copy the security context of the old socket pointer to the cloned */ static void smack_sk_clone_security(const struct sock *sk, struct sock *newsk) { struct socket_smack *ssp_old = smack_sock(sk); struct socket_smack *ssp_new = smack_sock(newsk); *ssp_new = *ssp_old; } /** * smack_ipv4host_label - check host based restrictions * @sip: the object end * * looks for host based access restrictions * * This version will only be appropriate for really small sets of single label * hosts. The caller is responsible for ensuring that the RCU read lock is * taken before calling this function. * * Returns the label of the far end or NULL if it's not special. */ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) { struct smk_net4addr *snp; struct in_addr *siap = &sip->sin_addr; if (siap->s_addr == 0) return NULL; list_for_each_entry_rcu(snp, &smk_net4addr_list, list) /* * we break after finding the first match because * the list is sorted from longest to shortest mask * so we have found the most specific match */ if (snp->smk_host.s_addr == (siap->s_addr & snp->smk_mask.s_addr)) return snp->smk_label; return NULL; } #if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address * * Returns boolean true if this is the localhost address */ static bool smk_ipv6_localhost(struct sockaddr_in6 *sip) { __be16 *be16p = (__be16 *)&sip->sin6_addr; __be32 *be32p = (__be32 *)&sip->sin6_addr; if (be32p[0] == 0 && be32p[1] == 0 && be32p[2] == 0 && be16p[6] == 0 && ntohs(be16p[7]) == 1) return true; return false; } /** * smack_ipv6host_label - check host based restrictions * @sip: the object end * * looks for host based access restrictions * * This version will only be appropriate for really small sets of single label * hosts. The caller is responsible for ensuring that the RCU read lock is * taken before calling this function. * * Returns the label of the far end or NULL if it's not special. */ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) { struct smk_net6addr *snp; struct in6_addr *sap = &sip->sin6_addr; int i; int found = 0; /* * It's local. Don't look for a host label. */ if (smk_ipv6_localhost(sip)) return NULL; list_for_each_entry_rcu(snp, &smk_net6addr_list, list) { /* * If the label is NULL the entry has * been renounced. Ignore it. */ if (snp->smk_label == NULL) continue; /* * we break after finding the first match because * the list is sorted from longest to shortest mask * so we have found the most specific match */ for (found = 1, i = 0; i < 8; i++) { if ((sap->s6_addr16[i] & snp->smk_mask.s6_addr16[i]) != snp->smk_host.s6_addr16[i]) { found = 0; break; } } if (found) return snp->smk_label; } return NULL; } #endif /* CONFIG_IPV6 */ /** * smack_netlbl_add - Set the secattr on a socket * @sk: the socket * * Attach the outbound smack value (smk_out) to the socket. * * Returns 0 on success or an error code */ static int smack_netlbl_add(struct sock *sk) { struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = ssp->smk_out; int rc; local_bh_disable(); bh_lock_sock_nested(sk); rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel, netlbl_sk_lock_check(sk)); switch (rc) { case 0: ssp->smk_state = SMK_NETLBL_LABELED; break; case -EDESTADDRREQ: ssp->smk_state = SMK_NETLBL_REQSKB; rc = 0; break; } bh_unlock_sock(sk); local_bh_enable(); return rc; } /** * smack_netlbl_delete - Remove the secattr from a socket * @sk: the socket * * Remove the outbound smack value from a socket */ static void smack_netlbl_delete(struct sock *sk) { struct socket_smack *ssp = smack_sock(sk); /* * Take the label off the socket if one is set. */ if (ssp->smk_state != SMK_NETLBL_LABELED) return; local_bh_disable(); bh_lock_sock_nested(sk); netlbl_sock_delattr(sk); bh_unlock_sock(sk); local_bh_enable(); ssp->smk_state = SMK_NETLBL_UNLABELED; } /** * smk_ipv4_check - Perform IPv4 host access checks * @sk: the socket * @sap: the destination address * * Set the correct secattr for the given socket based on the destination * address and perform any outbound access checks needed. * * Returns 0 on success or an error code. * */ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) { struct smack_known *skp; int rc = 0; struct smack_known *hkp; struct socket_smack *ssp = smack_sock(sk); struct smk_audit_info ad; rcu_read_lock(); hkp = smack_ipv4host_label(sap); if (hkp != NULL) { #ifdef CONFIG_AUDIT struct lsm_network_audit net; smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = sap->sin_family; ad.a.u.net->dport = sap->sin_port; ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr; #endif skp = ssp->smk_out; rc = smk_access(skp, hkp, MAY_WRITE, &ad); rc = smk_bu_note("IPv4 host check", skp, hkp, MAY_WRITE, rc); /* * Clear the socket netlabel if it's set. */ if (!rc) smack_netlbl_delete(sk); } rcu_read_unlock(); return rc; } #if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label * @object: object Smack label * @address: address * @act: the action being taken * * Check an IPv6 access */ static int smk_ipv6_check(struct smack_known *subject, struct smack_known *object, struct sockaddr_in6 *address, int act) { #ifdef CONFIG_AUDIT struct lsm_network_audit net; #endif struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = PF_INET6; ad.a.u.net->dport = address->sin6_port; if (act == SMK_RECEIVING) ad.a.u.net->v6info.saddr = address->sin6_addr; else ad.a.u.net->v6info.daddr = address->sin6_addr; #endif rc = smk_access(subject, object, MAY_WRITE, &ad); rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } #endif /* CONFIG_IPV6 */ #ifdef SMACK_IPV6_PORT_LABELING /** * smk_ipv6_port_label - Smack port access table management * @sock: socket * @address: address * * Create or update the port list entry */ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) { struct sock *sk = sock->sk; struct sockaddr_in6 *addr6; struct socket_smack *ssp = smack_sock(sock->sk); struct smk_port_label *spp; unsigned short port = 0; if (address == NULL) { /* * This operation is changing the Smack information * on the bound socket. Take the changes to the port * as well. */ rcu_read_lock(); list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) { if (sk != spp->smk_sock) continue; spp->smk_in = ssp->smk_in; spp->smk_out = ssp->smk_out; rcu_read_unlock(); return; } /* * A NULL address is only used for updating existing * bound entries. If there isn't one, it's OK. */ rcu_read_unlock(); return; } addr6 = (struct sockaddr_in6 *)address; port = ntohs(addr6->sin6_port); /* * This is a special case that is safely ignored. */ if (port == 0) return; /* * Look for an existing port list entry. * This is an indication that a port is getting reused. */ rcu_read_lock(); list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) { if (spp->smk_port != port || spp->smk_sock_type != sock->type) continue; if (spp->smk_can_reuse != 1) { rcu_read_unlock(); return; } spp->smk_port = port; spp->smk_sock = sk; spp->smk_in = ssp->smk_in; spp->smk_out = ssp->smk_out; spp->smk_can_reuse = 0; rcu_read_unlock(); return; } rcu_read_unlock(); /* * A new port entry is required. */ spp = kzalloc(sizeof(*spp), GFP_KERNEL); if (spp == NULL) return; spp->smk_port = port; spp->smk_sock = sk; spp->smk_in = ssp->smk_in; spp->smk_out = ssp->smk_out; spp->smk_sock_type = sock->type; spp->smk_can_reuse = 0; mutex_lock(&smack_ipv6_lock); list_add_rcu(&spp->list, &smk_ipv6_port_list); mutex_unlock(&smack_ipv6_lock); return; } /** * smk_ipv6_port_check - check Smack port access * @sk: socket * @address: address * @act: the action being taken * * Create or update the port list entry */ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, int act) { struct smk_port_label *spp; struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = NULL; unsigned short port; struct smack_known *object; if (act == SMK_RECEIVING) { skp = smack_ipv6host_label(address); object = ssp->smk_in; } else { skp = ssp->smk_out; object = smack_ipv6host_label(address); } /* * The other end is a single label host. */ if (skp != NULL && object != NULL) return smk_ipv6_check(skp, object, address, act); if (skp == NULL) skp = smack_net_ambient; if (object == NULL) object = smack_net_ambient; /* * It's remote, so port lookup does no good. */ if (!smk_ipv6_localhost(address)) return smk_ipv6_check(skp, object, address, act); /* * It's local so the send check has to have passed. */ if (act == SMK_RECEIVING) return 0; port = ntohs(address->sin6_port); rcu_read_lock(); list_for_each_entry_rcu(spp, &smk_ipv6_port_list, list) { if (spp->smk_port != port || spp->smk_sock_type != sk->sk_type) continue; object = spp->smk_in; if (act == SMK_CONNECTING) ssp->smk_packet = spp->smk_out; break; } rcu_read_unlock(); return smk_ipv6_check(skp, object, address, act); } #endif /** * smack_inode_setsecurity - set smack xattrs * @inode: the object * @name: attribute name * @value: attribute value * @size: size of the attribute * @flags: unused * * Sets the named attribute in the appropriate blob * * Returns 0 on success, or an error code */ static int smack_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { struct smack_known *skp; struct inode_smack *nsp = smack_inode(inode); struct socket_smack *ssp; struct socket *sock; int rc = 0; if (value == NULL || size > SMK_LONGLABEL || size == 0) return -EINVAL; if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) { if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE || strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) return -EINVAL; nsp->smk_flags |= SMK_INODE_TRANSMUTE; return 0; } skp = smk_import_entry(value, size); if (IS_ERR(skp)) return PTR_ERR(skp); if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { nsp->smk_inode = skp; nsp->smk_flags |= SMK_INODE_INSTANT; return 0; } /* * The rest of the Smack xattrs are only on sockets. */ if (inode->i_sb->s_magic != SOCKFS_MAGIC) return -EOPNOTSUPP; sock = SOCKET_I(inode); if (sock == NULL || sock->sk == NULL) return -EOPNOTSUPP; ssp = smack_sock(sock->sk); if (strcmp(name, XATTR_SMACK_IPIN) == 0) ssp->smk_in = skp; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) { ssp->smk_out = skp; if (sock->sk->sk_family == PF_INET) { rc = smack_netlbl_add(sock->sk); if (rc != 0) printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n", __func__, -rc); } } else return -EOPNOTSUPP; #ifdef SMACK_IPV6_PORT_LABELING if (sock->sk->sk_family == PF_INET6) smk_ipv6_port_label(sock, NULL); #endif return 0; } /** * smack_socket_post_create - finish socket setup * @sock: the socket * @family: protocol family * @type: unused * @protocol: unused * @kern: unused * * Sets the netlabel information on the socket * * Returns 0 on success, and error code otherwise */ static int smack_socket_post_create(struct socket *sock, int family, int type, int protocol, int kern) { struct socket_smack *ssp; if (sock->sk == NULL) return 0; /* * Sockets created by kernel threads receive web label. */ if (unlikely(current->flags & PF_KTHREAD)) { ssp = smack_sock(sock->sk); ssp->smk_in = &smack_known_web; ssp->smk_out = &smack_known_web; } if (family != PF_INET) return 0; /* * Set the outbound netlbl. */ return smack_netlbl_add(sock->sk); } /** * smack_socket_socketpair - create socket pair * @socka: one socket * @sockb: another socket * * Cross reference the peer labels for SO_PEERSEC * * Returns 0 */ static int smack_socket_socketpair(struct socket *socka, struct socket *sockb) { struct socket_smack *asp = smack_sock(socka->sk); struct socket_smack *bsp = smack_sock(sockb->sk); asp->smk_packet = bsp->smk_out; bsp->smk_packet = asp->smk_out; return 0; } #ifdef SMACK_IPV6_PORT_LABELING /** * smack_socket_bind - record port binding information. * @sock: the socket * @address: the port address * @addrlen: size of the address * * Records the label bound to a port. * * Returns 0 on success, and error code otherwise */ static int smack_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen) { if (sock->sk != NULL && sock->sk->sk_family == PF_INET6) { if (addrlen < SIN6_LEN_RFC2133 || address->sa_family != AF_INET6) return -EINVAL; smk_ipv6_port_label(sock, address); } return 0; } #endif /* SMACK_IPV6_PORT_LABELING */ /** * smack_socket_connect - connect access check * @sock: the socket * @sap: the other end * @addrlen: size of sap * * Verifies that a connection may be possible * * Returns 0 on success, and error code otherwise */ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, int addrlen) { int rc = 0; if (sock->sk == NULL) return 0; if (sock->sk->sk_family != PF_INET && (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6)) return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; #if IS_ENABLED(CONFIG_IPV6) if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL; if (addrlen < SIN6_LEN_RFC2133) return 0; if (__is_defined(SMACK_IPV6_SECMARK_LABELING)) rsp = smack_ipv6host_label(sip); if (rsp != NULL) { struct socket_smack *ssp = smack_sock(sock->sk); rc = smk_ipv6_check(ssp->smk_out, rsp, sip, SMK_CONNECTING); } #ifdef SMACK_IPV6_PORT_LABELING rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); #endif return rc; } #endif /* CONFIG_IPV6 */ if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); return rc; } /** * smack_flags_to_may - convert S_ to MAY_ values * @flags: the S_ value * * Returns the equivalent MAY_ value */ static int smack_flags_to_may(int flags) { int may = 0; if (flags & S_IRUGO) may |= MAY_READ; if (flags & S_IWUGO) may |= MAY_WRITE; if (flags & S_IXUGO) may |= MAY_EXEC; return may; } /** * smack_msg_msg_alloc_security - Set the security blob for msg_msg * @msg: the object * * Returns 0 */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { struct smack_known **blob = smack_msg_msg(msg); *blob = smk_of_current(); return 0; } /** * smack_of_ipc - the smack pointer for the ipc * @isp: the object * * Returns a pointer to the smack value */ static struct smack_known *smack_of_ipc(struct kern_ipc_perm *isp) { struct smack_known **blob = smack_ipc(isp); return *blob; } /** * smack_ipc_alloc_security - Set the security blob for ipc * @isp: the object * * Returns 0 */ static int smack_ipc_alloc_security(struct kern_ipc_perm *isp) { struct smack_known **blob = smack_ipc(isp); *blob = smk_of_current(); return 0; } /** * smk_curacc_shm : check if current has access on shm * @isp : the object * @access : access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smk_curacc_shm(struct kern_ipc_perm *isp, int access) { struct smack_known *ssp = smack_of_ipc(isp); struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = isp->id; #endif rc = smk_curacc(ssp, access, &ad); rc = smk_bu_current("shm", ssp, access, rc); return rc; } /** * smack_shm_associate - Smack access check for shm * @isp: the object * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_associate(struct kern_ipc_perm *isp, int shmflg) { int may; may = smack_flags_to_may(shmflg); return smk_curacc_shm(isp, may); } /** * smack_shm_shmctl - Smack access check for shm * @isp: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmctl(struct kern_ipc_perm *isp, int cmd) { int may; switch (cmd) { case IPC_STAT: case SHM_STAT: case SHM_STAT_ANY: may = MAY_READ; break; case IPC_SET: case SHM_LOCK: case SHM_UNLOCK: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case SHM_INFO: /* * System level information. */ return 0; default: return -EINVAL; } return smk_curacc_shm(isp, may); } /** * smack_shm_shmat - Smack access for shmat * @isp: the object * @shmaddr: unused * @shmflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_shm_shmat(struct kern_ipc_perm *isp, char __user *shmaddr, int shmflg) { int may; may = smack_flags_to_may(shmflg); return smk_curacc_shm(isp, may); } /** * smk_curacc_sem : check if current has access on sem * @isp : the object * @access : access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smk_curacc_sem(struct kern_ipc_perm *isp, int access) { struct smack_known *ssp = smack_of_ipc(isp); struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = isp->id; #endif rc = smk_curacc(ssp, access, &ad); rc = smk_bu_current("sem", ssp, access, rc); return rc; } /** * smack_sem_associate - Smack access check for sem * @isp: the object * @semflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_associate(struct kern_ipc_perm *isp, int semflg) { int may; may = smack_flags_to_may(semflg); return smk_curacc_sem(isp, may); } /** * smack_sem_semctl - Smack access check for sem * @isp: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_sem_semctl(struct kern_ipc_perm *isp, int cmd) { int may; switch (cmd) { case GETPID: case GETNCNT: case GETZCNT: case GETVAL: case GETALL: case IPC_STAT: case SEM_STAT: case SEM_STAT_ANY: may = MAY_READ; break; case SETVAL: case SETALL: case IPC_RMID: case IPC_SET: may = MAY_READWRITE; break; case IPC_INFO: case SEM_INFO: /* * System level information */ return 0; default: return -EINVAL; } return smk_curacc_sem(isp, may); } /** * smack_sem_semop - Smack checks of semaphore operations * @isp: the object * @sops: unused * @nsops: unused * @alter: unused * * Treated as read and write in all cases. * * Returns 0 if access is allowed, error code otherwise */ static int smack_sem_semop(struct kern_ipc_perm *isp, struct sembuf *sops, unsigned nsops, int alter) { return smk_curacc_sem(isp, MAY_READWRITE); } /** * smk_curacc_msq : helper to check if current has access on msq * @isp : the msq * @access : access requested * * return 0 if current has access, error otherwise */ static int smk_curacc_msq(struct kern_ipc_perm *isp, int access) { struct smack_known *msp = smack_of_ipc(isp); struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = isp->id; #endif rc = smk_curacc(msp, access, &ad); rc = smk_bu_current("msq", msp, access, rc); return rc; } /** * smack_msg_queue_associate - Smack access check for msg_queue * @isp: the object * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_associate(struct kern_ipc_perm *isp, int msqflg) { int may; may = smack_flags_to_may(msqflg); return smk_curacc_msq(isp, may); } /** * smack_msg_queue_msgctl - Smack access check for msg_queue * @isp: the object * @cmd: what it wants to do * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgctl(struct kern_ipc_perm *isp, int cmd) { int may; switch (cmd) { case IPC_STAT: case MSG_STAT: case MSG_STAT_ANY: may = MAY_READ; break; case IPC_SET: case IPC_RMID: may = MAY_READWRITE; break; case IPC_INFO: case MSG_INFO: /* * System level information */ return 0; default: return -EINVAL; } return smk_curacc_msq(isp, may); } /** * smack_msg_queue_msgsnd - Smack access check for msg_queue * @isp: the object * @msg: unused * @msqflg: access requested * * Returns 0 if current has the requested access, error code otherwise */ static int smack_msg_queue_msgsnd(struct kern_ipc_perm *isp, struct msg_msg *msg, int msqflg) { int may; may = smack_flags_to_may(msqflg); return smk_curacc_msq(isp, may); } /** * smack_msg_queue_msgrcv - Smack access check for msg_queue * @isp: the object * @msg: unused * @target: unused * @type: unused * @mode: unused * * Returns 0 if current has read and write access, error code otherwise */ static int smack_msg_queue_msgrcv(struct kern_ipc_perm *isp, struct msg_msg *msg, struct task_struct *target, long type, int mode) { return smk_curacc_msq(isp, MAY_READWRITE); } /** * smack_ipc_permission - Smack access for ipc_permission() * @ipp: the object permissions * @flag: access requested * * Returns 0 if current has read and write access, error code otherwise */ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag) { struct smack_known **blob = smack_ipc(ipp); struct smack_known *iskp = *blob; int may = smack_flags_to_may(flag); struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_IPC); ad.a.u.ipc_id = ipp->id; #endif rc = smk_curacc(iskp, may, &ad); rc = smk_bu_current("svipc", iskp, may, rc); return rc; } /** * smack_ipc_getlsmprop - Extract smack security data * @ipp: the object permissions * @prop: where result will be saved */ static void smack_ipc_getlsmprop(struct kern_ipc_perm *ipp, struct lsm_prop *prop) { struct smack_known **iskpp = smack_ipc(ipp); prop->smack.skp = *iskpp; } /** * smack_d_instantiate - Make sure the blob is correct on an inode * @opt_dentry: dentry where inode will be attached * @inode: the object * * Set the inode's security blob if it hasn't been done already. */ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) { struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; struct smack_known *skp; struct smack_known *ckp = smk_of_current(); struct smack_known *final; char trattr[TRANS_TRUE_SIZE]; int transflag = 0; int rc; struct dentry *dp; if (inode == NULL) return; isp = smack_inode(inode); /* * If the inode is already instantiated * take the quick way out */ if (isp->smk_flags & SMK_INODE_INSTANT) return; sbp = inode->i_sb; sbsp = smack_superblock(sbp); /* * We're going to use the superblock default label * if there's no label on the file. */ final = sbsp->smk_default; /* * If this is the root inode the superblock * may be in the process of initialization. * If that is the case use the root value out * of the superblock. */ if (opt_dentry->d_parent == opt_dentry) { switch (sbp->s_magic) { case CGROUP_SUPER_MAGIC: case CGROUP2_SUPER_MAGIC: /* * The cgroup filesystem is never mounted, * so there's no opportunity to set the mount * options. */ sbsp->smk_root = &smack_known_star; sbsp->smk_default = &smack_known_star; isp->smk_inode = sbsp->smk_root; break; case TMPFS_MAGIC: /* * What about shmem/tmpfs anonymous files with dentry * obtained from d_alloc_pseudo()? */ isp->smk_inode = smk_of_current(); break; case PIPEFS_MAGIC: isp->smk_inode = smk_of_current(); break; case SOCKFS_MAGIC: /* * Socket access is controlled by the socket * structures associated with the task involved. */ isp->smk_inode = &smack_known_star; break; default: isp->smk_inode = sbsp->smk_root; break; } isp->smk_flags |= SMK_INODE_INSTANT; return; } /* * This is pretty hackish. * Casey says that we shouldn't have to do * file system specific code, but it does help * with keeping it simple. */ switch (sbp->s_magic) { case SMACK_MAGIC: case CGROUP_SUPER_MAGIC: case CGROUP2_SUPER_MAGIC: /* * Casey says that it's a little embarrassing * that the smack file system doesn't do * extended attributes. * * Cgroupfs is special */ final = &smack_known_star; break; case DEVPTS_SUPER_MAGIC: /* * devpts seems content with the label of the task. * Programs that change smack have to treat the * pty with respect. */ final = ckp; break; case PROC_SUPER_MAGIC: /* * Casey says procfs appears not to care. * The superblock default suffices. */ break; case TMPFS_MAGIC: /* * Device labels should come from the filesystem, * but watch out, because they're volitile, * getting recreated on every reboot. */ final = &smack_known_star; /* * If a smack value has been set we want to use it, * but since tmpfs isn't giving us the opportunity * to set mount options simulate setting the * superblock default. */ fallthrough; default: /* * This isn't an understood special case. * Get the value from the xattr. */ /* * UDS inode has fixed label (*) */ if (S_ISSOCK(inode->i_mode)) { final = &smack_known_star; break; } /* * No xattr support means, alas, no SMACK label. * Use the aforeapplied default. * It would be curious if the label of the task * does not match that assigned. */ if (!(inode->i_opflags & IOP_XATTR)) break; /* * Get the dentry for xattr. */ dp = dget(opt_dentry); skp = smk_fetch(XATTR_NAME_SMACK, inode, dp); if (!IS_ERR_OR_NULL(skp)) final = skp; /* * Transmuting directory */ if (S_ISDIR(inode->i_mode)) { /* * If this is a new directory and the label was * transmuted when the inode was initialized * set the transmute attribute on the directory * and mark the inode. * * If there is a transmute attribute on the * directory mark the inode. */ rc = __vfs_getxattr(dp, inode, XATTR_NAME_SMACKTRANSMUTE, trattr, TRANS_TRUE_SIZE); if (rc >= 0 && strncmp(trattr, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; if (rc >= 0) transflag = SMK_INODE_TRANSMUTE; } /* * Don't let the exec or mmap label be "*" or "@". */ skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp); if (IS_ERR(skp) || skp == &smack_known_star || skp == &smack_known_web) skp = NULL; isp->smk_task = skp; skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp); if (IS_ERR(skp) || skp == &smack_known_star || skp == &smack_known_web) skp = NULL; isp->smk_mmap = skp; dput(dp); break; } if (final == NULL) isp->smk_inode = ckp; else isp->smk_inode = final; isp->smk_flags |= (SMK_INODE_INSTANT | transflag); return; } /** * smack_getselfattr - Smack current process attribute * @attr: which attribute to fetch * @ctx: buffer to receive the result * @size: available size in, actual size out * @flags: reserved, currently zero * * Fill the passed user space @ctx with the details of the requested * attribute. * * Returns the number of attributes on success, an error code otherwise. * There will only ever be one attribute. */ static int smack_getselfattr(unsigned int attr, struct lsm_ctx __user *ctx, u32 *size, u32 flags) { int rc; struct smack_known *skp; if (attr != LSM_ATTR_CURRENT) return -EOPNOTSUPP; skp = smk_of_current(); rc = lsm_fill_user_ctx(ctx, size, skp->smk_known, strlen(skp->smk_known) + 1, LSM_ID_SMACK, 0); return (!rc ? 1 : rc); } /** * smack_getprocattr - Smack process attribute access * @p: the object task * @name: the name of the attribute in /proc/.../attr * @value: where to put the result * * Places a copy of the task Smack into value * * Returns the length of the smack label or an error code */ static int smack_getprocattr(struct task_struct *p, const char *name, char **value) { struct smack_known *skp = smk_of_task_struct_obj(p); char *cp; int slen; if (strcmp(name, "current") != 0) return -EINVAL; cp = kstrdup(skp->smk_known, GFP_KERNEL); if (cp == NULL) return -ENOMEM; slen = strlen(cp); *value = cp; return slen; } /** * do_setattr - Smack process attribute setting * @attr: the ID of the attribute * @value: the value to set * @size: the size of the value * * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * * Returns zero on success or an error code */ static int do_setattr(unsigned int attr, void *value, size_t size) { struct task_smack *tsp = smack_cred(current_cred()); struct cred *new; struct smack_known *skp; int label_len; /* * let unprivileged user validate input, check permissions later */ if (value == NULL || size == 0 || size >= SMK_LONGLABEL) return -EINVAL; label_len = smk_parse_label_len(value, size); if (label_len < 0 || label_len != size) return -EINVAL; /* * No process is ever allowed the web ("@") label * and the star ("*") label. */ if (label_len == 1 /* '@', '*' */) { const char c = *(const char *)value; if (c == *smack_known_web.smk_known || c == *smack_known_star.smk_known) return -EPERM; } if (!smack_privileged(CAP_MAC_ADMIN)) { const struct smack_known_list_elem *sklep; list_for_each_entry(sklep, &tsp->smk_relabel, list) { const char *cp = sklep->smk_label->smk_known; if (strlen(cp) == label_len && strncmp(cp, value, label_len) == 0) goto in_relabel; } return -EPERM; in_relabel: ; } skp = smk_import_valid_label(value, label_len, GFP_KERNEL); if (IS_ERR(skp)) return PTR_ERR(skp); new = prepare_creds(); if (new == NULL) return -ENOMEM; tsp = smack_cred(new); tsp->smk_task = skp; /* * process can change its label only once */ smk_destroy_label_list(&tsp->smk_relabel); commit_creds(new); return 0; } /** * smack_setselfattr - Set a Smack process attribute * @attr: which attribute to set * @ctx: buffer containing the data * @size: size of @ctx * @flags: reserved, must be zero * * Fill the passed user space @ctx with the details of the requested * attribute. * * Returns 0 on success, an error code otherwise. */ static int smack_setselfattr(unsigned int attr, struct lsm_ctx *ctx, u32 size, u32 flags) { if (attr != LSM_ATTR_CURRENT) return -EOPNOTSUPP; if (ctx->flags) return -EINVAL; /* * string must have \0 terminator, included in ctx->ctx * (see description of struct lsm_ctx) */ if (ctx->ctx_len == 0) return -EINVAL; if (ctx->ctx[ctx->ctx_len - 1] != '\0') return -EINVAL; /* * other do_setattr() caller, smack_setprocattr(), * does not count \0 into size, so * decreasing length by 1 to accommodate the divergence. */ return do_setattr(attr, ctx->ctx, ctx->ctx_len - 1); } /** * smack_setprocattr - Smack process attribute setting * @name: the name of the attribute in /proc/.../attr * @value: the value to set * @size: the size of the value * * Sets the Smack value of the task. Only setting self * is permitted and only with privilege * * Returns the size of the input value or an error code */ static int smack_setprocattr(const char *name, void *value, size_t size) { size_t realsize = size; unsigned int attr = lsm_name_to_attr(name); switch (attr) { case LSM_ATTR_UNDEF: return -EINVAL; default: return -EOPNOTSUPP; case LSM_ATTR_CURRENT: ; } /* * The value for the "current" attribute is the label * followed by one of the 4 trailers: none, \0, \n, \n\0 * * I.e. following inputs are accepted as 3-characters long label "foo": * * "foo" (3 characters) * "foo\0" (4 characters) * "foo\n" (4 characters) * "foo\n\0" (5 characters) */ if (realsize && (((const char *)value)[realsize - 1] == '\0')) --realsize; if (realsize && (((const char *)value)[realsize - 1] == '\n')) --realsize; return do_setattr(attr, value, realsize) ? : size; } /** * smack_unix_stream_connect - Smack access on UDS * @sock: one sock * @other: the other sock * @newsk: unused * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { struct smack_known *skp; struct smack_known *okp; struct socket_smack *ssp = smack_sock(sock); struct socket_smack *osp = smack_sock(other); struct socket_smack *nsp = smack_sock(newsk); struct smk_audit_info ad; int rc = 0; #ifdef CONFIG_AUDIT struct lsm_network_audit net; #endif if (!smack_privileged(CAP_MAC_OVERRIDE)) { skp = ssp->smk_out; okp = osp->smk_in; #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); smk_ad_setfield_u_net_sk(&ad, other); #endif rc = smk_access(skp, okp, MAY_WRITE, &ad); rc = smk_bu_note("UDS connect", skp, okp, MAY_WRITE, rc); if (rc == 0) { okp = osp->smk_out; skp = ssp->smk_in; rc = smk_access(okp, skp, MAY_WRITE, &ad); rc = smk_bu_note("UDS connect", okp, skp, MAY_WRITE, rc); } } if (rc == 0) { /* * Cross reference the peer labels for SO_PEERSEC. */ nsp->smk_packet = ssp->smk_out; ssp->smk_packet = osp->smk_out; /* * new/child/established socket must inherit listening socket labels */ nsp->smk_out = osp->smk_out; nsp->smk_in = osp->smk_in; } return rc; } /** * smack_unix_may_send - Smack access on UDS * @sock: one socket * @other: the other socket * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ static int smack_unix_may_send(struct socket *sock, struct socket *other) { struct socket_smack *ssp = smack_sock(sock->sk); struct socket_smack *osp = smack_sock(other->sk); struct smk_audit_info ad; int rc; #ifdef CONFIG_AUDIT struct lsm_network_audit net; smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); smk_ad_setfield_u_net_sk(&ad, other->sk); #endif if (smack_privileged(CAP_MAC_OVERRIDE)) return 0; rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad); rc = smk_bu_note("UDS send", ssp->smk_out, osp->smk_in, MAY_WRITE, rc); return rc; } /** * smack_socket_sendmsg - Smack check based on destination host * @sock: the socket * @msg: the message * @size: the size of the message * * Return 0 if the current subject can write to the destination host. * For IPv4 this is only a question if the destination is a single label host. * For IPv6 this is a check against the label of the port. */ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name; #endif #ifdef SMACK_IPV6_SECMARK_LABELING struct socket_smack *ssp = smack_sock(sock->sk); struct smack_known *rsp; #endif int rc = 0; /* * Perfectly reasonable for this to be NULL */ if (sip == NULL) return 0; switch (sock->sk->sk_family) { case AF_INET: if (msg->msg_namelen < sizeof(struct sockaddr_in) || sip->sin_family != AF_INET) return -EINVAL; rc = smk_ipv4_check(sock->sk, sip); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: if (msg->msg_namelen < SIN6_LEN_RFC2133 || sap->sin6_family != AF_INET6) return -EINVAL; #ifdef SMACK_IPV6_SECMARK_LABELING rsp = smack_ipv6host_label(sap); if (rsp != NULL) rc = smk_ipv6_check(ssp->smk_out, rsp, sap, SMK_CONNECTING); #endif #ifdef SMACK_IPV6_PORT_LABELING rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING); #endif #endif /* IS_ENABLED(CONFIG_IPV6) */ break; } return rc; } /** * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack * @sap: netlabel secattr * @ssp: socket security information * * Returns a pointer to a Smack label entry found on the label list. */ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap, struct socket_smack *ssp) { struct smack_known *skp; int found = 0; int acat; int kcat; /* * Netlabel found it in the cache. */ if ((sap->flags & NETLBL_SECATTR_CACHE) != 0) return (struct smack_known *)sap->cache->data; if ((sap->flags & NETLBL_SECATTR_SECID) != 0) /* * Looks like a fallback, which gives us a secid. */ return smack_from_secid(sap->attr.secid); if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) { /* * Looks like a CIPSO packet. * If there are flags but no level netlabel isn't * behaving the way we expect it to. * * Look it up in the label table * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ rcu_read_lock(); list_for_each_entry_rcu(skp, &smack_known_list, list) { if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl) continue; /* * Compare the catsets. Use the netlbl APIs. */ if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) { if ((skp->smk_netlabel.flags & NETLBL_SECATTR_MLS_CAT) == 0) found = 1; break; } for (acat = -1, kcat = -1; acat == kcat; ) { acat = netlbl_catmap_walk(sap->attr.mls.cat, acat + 1); kcat = netlbl_catmap_walk( skp->smk_netlabel.attr.mls.cat, kcat + 1); if (acat < 0 || kcat < 0) break; } if (acat == kcat) { found = 1; break; } } rcu_read_unlock(); if (found) return skp; if (ssp != NULL && ssp->smk_in == &smack_known_star) return &smack_known_web; return &smack_known_star; } /* * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ return smack_net_ambient; } #if IS_ENABLED(CONFIG_IPV6) static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) { u8 nexthdr; int offset; int proto = -EINVAL; struct ipv6hdr _ipv6h; struct ipv6hdr *ip6; __be16 frag_off; struct tcphdr _tcph, *th; struct udphdr _udph, *uh; sip->sin6_port = 0; offset = skb_network_offset(skb); ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); if (ip6 == NULL) return -EINVAL; sip->sin6_addr = ip6->saddr; nexthdr = ip6->nexthdr; offset += sizeof(_ipv6h); offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); if (offset < 0) return -EINVAL; proto = nexthdr; switch (proto) { case IPPROTO_TCP: th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); if (th != NULL) sip->sin6_port = th->source; break; case IPPROTO_UDP: case IPPROTO_UDPLITE: uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); if (uh != NULL) sip->sin6_port = uh->source; break; } return proto; } #endif /* CONFIG_IPV6 */ /** * smack_from_skb - Smack data from the secmark in an skb * @skb: packet * * Returns smack_known of the secmark or NULL if that won't work. */ #ifdef CONFIG_NETWORK_SECMARK static struct smack_known *smack_from_skb(struct sk_buff *skb) { if (skb == NULL || skb->secmark == 0) return NULL; return smack_from_secid(skb->secmark); } #else static inline struct smack_known *smack_from_skb(struct sk_buff *skb) { return NULL; } #endif /** * smack_from_netlbl - Smack data from the IP options in an skb * @sk: socket data came in on * @family: address family * @skb: packet * * Find the Smack label in the IP options. If it hasn't been * added to the netlabel cache, add it here. * * Returns smack_known of the IP options or NULL if that won't work. */ static struct smack_known *smack_from_netlbl(const struct sock *sk, u16 family, struct sk_buff *skb) { struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = NULL; struct smack_known *skp = NULL; netlbl_secattr_init(&secattr); if (sk) ssp = smack_sock(sk); if (netlbl_skbuff_getattr(skb, family, &secattr) == 0) { skp = smack_from_secattr(&secattr, ssp); if (secattr.flags & NETLBL_SECATTR_CACHEABLE) netlbl_cache_add(skb, family, &skp->smk_netlabel); } netlbl_secattr_destroy(&secattr); return skp; } /** * smack_socket_sock_rcv_skb - Smack packet delivery access check * @sk: socket * @skb: packet * * Returns 0 if the packet should be delivered, an error code otherwise */ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp = NULL; int rc = 0; struct smk_audit_info ad; u16 family = sk->sk_family; #ifdef CONFIG_AUDIT struct lsm_network_audit net; #endif #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sadd; int proto; if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) family = PF_INET; #endif /* CONFIG_IPV6 */ switch (family) { case PF_INET: /* * If there is a secmark use it rather than the CIPSO label. * If there is no secmark fall back to CIPSO. * The secmark is assumed to reflect policy better. */ skp = smack_from_skb(skb); if (skp == NULL) { skp = smack_from_netlbl(sk, family, skb); if (skp == NULL) skp = smack_net_ambient; } #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = family; ad.a.u.net->netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif /* * Receiving a packet requires that the other end * be able to write here. Read access is not required. * This is the simplest possible security model * for networking. */ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad); rc = smk_bu_note("IPv4 delivery", skp, ssp->smk_in, MAY_WRITE, rc); if (rc != 0) netlbl_skbuff_err(skb, family, rc, 0); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: proto = smk_skb_to_addr_ipv6(skb, &sadd); if (proto != IPPROTO_UDP && proto != IPPROTO_UDPLITE && proto != IPPROTO_TCP) break; #ifdef SMACK_IPV6_SECMARK_LABELING skp = smack_from_skb(skb); if (skp == NULL) { if (smk_ipv6_localhost(&sadd)) break; skp = smack_ipv6host_label(&sadd); if (skp == NULL) skp = smack_net_ambient; } #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = family; ad.a.u.net->netif = skb->skb_iif; ipv6_skb_to_auditdata(skb, &ad.a, NULL); #endif /* CONFIG_AUDIT */ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad); rc = smk_bu_note("IPv6 delivery", skp, ssp->smk_in, MAY_WRITE, rc); #endif /* SMACK_IPV6_SECMARK_LABELING */ #ifdef SMACK_IPV6_PORT_LABELING rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING); #endif /* SMACK_IPV6_PORT_LABELING */ if (rc != 0) icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADM_PROHIBITED, 0); break; #endif /* CONFIG_IPV6 */ } return rc; } /** * smack_socket_getpeersec_stream - pull in packet label * @sock: the socket * @optval: user's destination * @optlen: size thereof * @len: max thereof * * returns zero on success, an error code otherwise */ static int smack_socket_getpeersec_stream(struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) { struct socket_smack *ssp; char *rcp = ""; u32 slen = 1; int rc = 0; ssp = smack_sock(sock->sk); if (ssp->smk_packet != NULL) { rcp = ssp->smk_packet->smk_known; slen = strlen(rcp) + 1; } if (slen > len) { rc = -ERANGE; goto out_len; } if (copy_to_sockptr(optval, rcp, slen)) rc = -EFAULT; out_len: if (copy_to_sockptr(optlen, &slen, sizeof(slen))) rc = -EFAULT; return rc; } /** * smack_socket_getpeersec_dgram - pull in packet label * @sock: the peer socket * @skb: packet data * @secid: pointer to where to put the secid of the packet * * Sets the netlabel socket state on sk from parent */ static int smack_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid) { struct socket_smack *ssp = NULL; struct smack_known *skp; struct sock *sk = NULL; int family = PF_UNSPEC; u32 s = 0; /* 0 is the invalid secid */ if (skb != NULL) { if (skb->protocol == htons(ETH_P_IP)) family = PF_INET; #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) family = PF_INET6; #endif /* CONFIG_IPV6 */ } if (family == PF_UNSPEC && sock != NULL) family = sock->sk->sk_family; switch (family) { case PF_UNIX: ssp = smack_sock(sock->sk); s = ssp->smk_out->smk_secid; break; case PF_INET: skp = smack_from_skb(skb); if (skp) { s = skp->smk_secid; break; } /* * Translate what netlabel gave us. */ if (sock != NULL) sk = sock->sk; skp = smack_from_netlbl(sk, family, skb); if (skp != NULL) s = skp->smk_secid; break; case PF_INET6: #ifdef SMACK_IPV6_SECMARK_LABELING skp = smack_from_skb(skb); if (skp) s = skp->smk_secid; #endif break; } *secid = s; if (s == 0) return -EINVAL; return 0; } /** * smack_inet_conn_request - Smack access check on connect * @sk: socket involved * @skb: packet * @req: unused * * Returns 0 if a task with the packet label could write to * the socket, otherwise an error code */ static int smack_inet_conn_request(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { u16 family = sk->sk_family; struct smack_known *skp; struct socket_smack *ssp = smack_sock(sk); struct sockaddr_in addr; struct iphdr *hdr; struct smack_known *hskp; int rc; struct smk_audit_info ad; #ifdef CONFIG_AUDIT struct lsm_network_audit net; #endif #if IS_ENABLED(CONFIG_IPV6) if (family == PF_INET6) { /* * Handle mapped IPv4 packets arriving * via IPv6 sockets. Don't set up netlabel * processing on IPv6. */ if (skb->protocol == htons(ETH_P_IP)) family = PF_INET; else return 0; } #endif /* CONFIG_IPV6 */ /* * If there is a secmark use it rather than the CIPSO label. * If there is no secmark fall back to CIPSO. * The secmark is assumed to reflect policy better. */ skp = smack_from_skb(skb); if (skp == NULL) { skp = smack_from_netlbl(sk, family, skb); if (skp == NULL) skp = &smack_known_huh; } #ifdef CONFIG_AUDIT smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); ad.a.u.net->family = family; ad.a.u.net->netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif /* * Receiving a packet requires that the other end be able to write * here. Read access is not required. */ rc = smk_access(skp, ssp->smk_in, MAY_WRITE, &ad); rc = smk_bu_note("IPv4 connect", skp, ssp->smk_in, MAY_WRITE, rc); if (rc != 0) return rc; /* * Save the peer's label in the request_sock so we can later setup * smk_packet in the child socket so that SO_PEERCRED can report it. */ req->peer_secid = skp->smk_secid; /* * We need to decide if we want to label the incoming connection here * if we do we only need to label the request_sock and the stack will * propagate the wire-label to the sock when it is created. */ hdr = ip_hdr(skb); addr.sin_addr.s_addr = hdr->saddr; rcu_read_lock(); hskp = smack_ipv4host_label(&addr); rcu_read_unlock(); if (hskp == NULL) rc = netlbl_req_setattr(req, &ssp->smk_out->smk_netlabel); else netlbl_req_delattr(req); return rc; } /** * smack_inet_csk_clone - Copy the connection information to the new socket * @sk: the new socket * @req: the connection's request_sock * * Transfer the connection's peer label to the newly created socket. */ static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { struct socket_smack *ssp = smack_sock(sk); struct smack_known *skp; if (req->peer_secid != 0) { skp = smack_from_secid(req->peer_secid); ssp->smk_packet = skp; } else ssp->smk_packet = NULL; } /* * Key management security hooks * * Casey has not tested key support very heavily. * The permission check is most likely too restrictive. * If you care about keys please have a look. */ #ifdef CONFIG_KEYS /** * smack_key_alloc - Set the key security blob * @key: object * @cred: the credentials to use * @flags: unused * * No allocation required * * Returns 0 */ static int smack_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { struct smack_known **blob = smack_key(key); struct smack_known *skp = smk_of_task(smack_cred(cred)); *blob = skp; return 0; } /** * smack_key_permission - Smack access on a key * @key_ref: gets to the object * @cred: the credentials to use * @need_perm: requested key permission * * Return 0 if the task has read and write to the object, * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) { struct smack_known **blob; struct smack_known *skp; struct key *keyp; struct smk_audit_info ad; struct smack_known *tkp = smk_of_task(smack_cred(cred)); int request = 0; int rc; /* * Validate requested permissions */ switch (need_perm) { case KEY_NEED_READ: case KEY_NEED_SEARCH: case KEY_NEED_VIEW: request |= MAY_READ; break; case KEY_NEED_WRITE: case KEY_NEED_LINK: case KEY_NEED_SETATTR: request |= MAY_WRITE; break; case KEY_NEED_UNSPECIFIED: case KEY_NEED_UNLINK: case KEY_SYSADMIN_OVERRIDE: case KEY_AUTHTOKEN_OVERRIDE: case KEY_DEFER_PERM_CHECK: return 0; default: return -EINVAL; } keyp = key_ref_to_ptr(key_ref); if (keyp == NULL) return -EINVAL; /* * If the key hasn't been initialized give it access so that * it may do so. */ blob = smack_key(keyp); skp = *blob; if (skp == NULL) return 0; /* * This should not occur */ if (tkp == NULL) return -EACCES; if (smack_privileged(CAP_MAC_OVERRIDE)) return 0; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY); ad.a.u.key_struct.key = keyp->serial; ad.a.u.key_struct.key_desc = keyp->description; #endif rc = smk_access(tkp, skp, request, &ad); rc = smk_bu_note("key access", tkp, skp, request, rc); return rc; } /* * smack_key_getsecurity - Smack label tagging the key * @key points to the key to be queried * @_buffer points to a pointer that should be set to point to the * resulting string (if no label or an error occurs). * Return the length of the string (including terminating NUL) or -ve if * an error. * May also return 0 (and a NULL buffer pointer) if there is no label. */ static int smack_key_getsecurity(struct key *key, char **_buffer) { struct smack_known **blob = smack_key(key); struct smack_known *skp = *blob; size_t length; char *copy; if (skp == NULL) { *_buffer = NULL; return 0; } copy = kstrdup(skp->smk_known, GFP_KERNEL); if (copy == NULL) return -ENOMEM; length = strlen(copy) + 1; *_buffer = copy; return length; } #ifdef CONFIG_KEY_NOTIFICATIONS /** * smack_watch_key - Smack access to watch a key for notifications. * @key: The key to be watched * * Return 0 if the @watch->cred has permission to read from the key object and * an error otherwise. */ static int smack_watch_key(struct key *key) { struct smk_audit_info ad; struct smack_known *tkp = smk_of_current(); struct smack_known **blob = smack_key(key); int rc; /* * This should not occur */ if (tkp == NULL) return -EACCES; if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred())) return 0; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY); ad.a.u.key_struct.key = key->serial; ad.a.u.key_struct.key_desc = key->description; #endif rc = smk_access(tkp, *blob, MAY_READ, &ad); rc = smk_bu_note("key watch", tkp, *blob, MAY_READ, rc); return rc; } #endif /* CONFIG_KEY_NOTIFICATIONS */ #endif /* CONFIG_KEYS */ #ifdef CONFIG_WATCH_QUEUE /** * smack_post_notification - Smack access to post a notification to a queue * @w_cred: The credentials of the watcher. * @cred: The credentials of the event source (may be NULL). * @n: The notification message to be posted. */ static int smack_post_notification(const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) { struct smk_audit_info ad; struct smack_known *subj, *obj; int rc; /* Always let maintenance notifications through. */ if (n->type == WATCH_TYPE_META) return 0; if (!cred) return 0; subj = smk_of_task(smack_cred(cred)); obj = smk_of_task(smack_cred(w_cred)); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NOTIFICATION); rc = smk_access(subj, obj, MAY_WRITE, &ad); rc = smk_bu_note("notification", subj, obj, MAY_WRITE, rc); return rc; } #endif /* CONFIG_WATCH_QUEUE */ /* * Smack Audit hooks * * Audit requires a unique representation of each Smack specific * rule. This unique representation is used to distinguish the * object to be audited from remaining kernel objects and also * works as a glue between the audit hooks. * * Since repository entries are added but never deleted, we'll use * the smack_known label address related to the given audit rule as * the needed unique representation. This also better fits the smack * model where nearly everything is a label. */ #ifdef CONFIG_AUDIT /** * smack_audit_rule_init - Initialize a smack audit rule * @field: audit rule fields given from user-space (audit.h) * @op: required testing operator (=, !=, >, <, ...) * @rulestr: smack label to be audited * @vrule: pointer to save our own audit rule representation * @gfp: type of the memory for the allocation * * Prepare to audit cases where (@field @op @rulestr) is true. * The label to be audited is created if necessary. */ static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp) { struct smack_known *skp; char **rule = (char **)vrule; *rule = NULL; if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return -EINVAL; if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; skp = smk_import_entry(rulestr, 0); if (IS_ERR(skp)) return PTR_ERR(skp); *rule = skp->smk_known; return 0; } /** * smack_audit_rule_known - Distinguish Smack audit rules * @krule: rule of interest, in Audit kernel representation format * * This is used to filter Smack rules from remaining Audit ones. * If it's proved that this rule belongs to us, the * audit_rule_match hook will be called to do the final judgement. */ static int smack_audit_rule_known(struct audit_krule *krule) { struct audit_field *f; int i; for (i = 0; i < krule->field_count; i++) { f = &krule->fields[i]; if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER) return 1; } return 0; } /** * smack_audit_rule_match - Audit given object ? * @prop: security id for identifying the object to test * @field: audit rule flags given from user-space * @op: required testing operator * @vrule: smack internal rule presentation * * The core Audit hook. It's used to take the decision of * whether to audit or not to audit a given object. */ static int smack_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, void *vrule) { struct smack_known *skp = prop->smack.skp; char *rule = vrule; if (unlikely(!rule)) { WARN_ONCE(1, "Smack: missing rule\n"); return -ENOENT; } if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return 0; /* * No need to do string comparisons. If a match occurs, * both pointers will point to the same smack_known * label. */ if (op == Audit_equal) return (rule == skp->smk_known); if (op == Audit_not_equal) return (rule != skp->smk_known); return 0; } /* * There is no need for a smack_audit_rule_free hook. * No memory was allocated. */ #endif /* CONFIG_AUDIT */ /** * smack_ismaclabel - check if xattr @name references a smack MAC label * @name: Full xattr name to check. */ static int smack_ismaclabel(const char *name) { return (strcmp(name, XATTR_SMACK_SUFFIX) == 0); } /** * smack_to_secctx - fill a lsm_context * @skp: Smack label * @cp: destination * * Fill the passed @cp and return the length of the string */ static int smack_to_secctx(struct smack_known *skp, struct lsm_context *cp) { int len = strlen(skp->smk_known); if (cp) { cp->context = skp->smk_known; cp->len = len; cp->id = LSM_ID_SMACK; } return len; } /** * smack_secid_to_secctx - return the smack label for a secid * @secid: incoming integer * @cp: destination * * Exists for networking code. */ static int smack_secid_to_secctx(u32 secid, struct lsm_context *cp) { return smack_to_secctx(smack_from_secid(secid), cp); } /** * smack_lsmprop_to_secctx - return the smack label * @prop: includes incoming Smack data * @cp: destination * * Exists for audit code. */ static int smack_lsmprop_to_secctx(struct lsm_prop *prop, struct lsm_context *cp) { return smack_to_secctx(prop->smack.skp, cp); } /** * smack_secctx_to_secid - return the secid for a smack label * @secdata: smack label * @seclen: how long result is * @secid: outgoing integer * * Exists for audit and networking code. */ static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { struct smack_known *skp = smk_find_entry(secdata); if (skp) *secid = skp->smk_secid; else *secid = 0; return 0; } /* * There used to be a smack_release_secctx hook * that did nothing back when hooks were in a vector. * Now that there's a list such a hook adds cost. */ static int smack_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen) { /* * UDS inode has fixed label. Ignore nfs label. */ if (S_ISSOCK(inode->i_mode)) return 0; return smack_inode_setsecurity(inode, XATTR_SMACK_SUFFIX, ctx, ctxlen, 0); } static int smack_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) { return __vfs_setxattr_locked(&nop_mnt_idmap, dentry, XATTR_NAME_SMACK, ctx, ctxlen, 0, NULL); } static int smack_inode_getsecctx(struct inode *inode, struct lsm_context *cp) { struct smack_known *skp = smk_of_inode(inode); cp->context = skp->smk_known; cp->len = strlen(skp->smk_known); cp->id = LSM_ID_SMACK; return 0; } static int smack_inode_copy_up(struct dentry *dentry, struct cred **new) { struct task_smack *tsp; struct smack_known *skp; struct inode_smack *isp; struct cred *new_creds = *new; if (new_creds == NULL) { new_creds = prepare_creds(); if (new_creds == NULL) return -ENOMEM; } tsp = smack_cred(new_creds); /* * Get label from overlay inode and set it in create_sid */ isp = smack_inode(d_inode(dentry)); skp = isp->smk_inode; tsp->smk_task = skp; *new = new_creds; return 0; } static int smack_inode_copy_up_xattr(struct dentry *src, const char *name) { /* * Return -ECANCELED if this is the smack access Smack attribute. */ if (!strcmp(name, XATTR_NAME_SMACK)) return -ECANCELED; return -EOPNOTSUPP; } static int smack_dentry_create_files_as(struct dentry *dentry, int mode, const struct qstr *name, const struct cred *old, struct cred *new) { struct task_smack *otsp = smack_cred(old); struct task_smack *ntsp = smack_cred(new); struct inode_smack *isp; /* * Use the process credential unless all of * the transmuting criteria are met */ ntsp->smk_task = otsp->smk_task; /* * the attribute of the containing directory */ isp = smack_inode(d_inode(dentry->d_parent)); if (isp->smk_flags & SMK_INODE_TRANSMUTE) { /* * If the directory is transmuting and the rule * providing access is transmuting use the containing * directory label instead of the process label. */ if (smk_rule_transmutes(otsp->smk_task, isp->smk_inode)) { ntsp->smk_task = isp->smk_inode; ntsp->smk_transmuted = ntsp->smk_task; } } return 0; } #ifdef CONFIG_IO_URING /** * smack_uring_override_creds - Is io_uring cred override allowed? * @new: the target creds * * Check to see if the current task is allowed to override it's credentials * to service an io_uring operation. */ static int smack_uring_override_creds(const struct cred *new) { struct task_smack *tsp = smack_cred(current_cred()); struct task_smack *nsp = smack_cred(new); /* * Allow the degenerate case where the new Smack value is * the same as the current Smack value. */ if (tsp->smk_task == nsp->smk_task) return 0; if (smack_privileged_cred(CAP_MAC_OVERRIDE, current_cred())) return 0; return -EPERM; } /** * smack_uring_sqpoll - check if a io_uring polling thread can be created * * Check to see if the current task is allowed to create a new io_uring * kernel polling thread. */ static int smack_uring_sqpoll(void) { if (smack_privileged_cred(CAP_MAC_ADMIN, current_cred())) return 0; return -EPERM; } /** * smack_uring_cmd - check on file operations for io_uring * @ioucmd: the command in question * * Make a best guess about whether a io_uring "command" should * be allowed. Use the same logic used for determining if the * file could be opened for read in the absence of better criteria. */ static int smack_uring_cmd(struct io_uring_cmd *ioucmd) { struct file *file = ioucmd->file; struct smk_audit_info ad; struct task_smack *tsp; struct inode *inode; int rc; if (!file) return -EINVAL; tsp = smack_cred(file->f_cred); inode = file_inode(file); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad); rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc); return rc; } #endif /* CONFIG_IO_URING */ struct lsm_blob_sizes smack_blob_sizes __ro_after_init = { .lbs_cred = sizeof(struct task_smack), .lbs_file = sizeof(struct smack_known *), .lbs_inode = sizeof(struct inode_smack), .lbs_ipc = sizeof(struct smack_known *), .lbs_key = sizeof(struct smack_known *), .lbs_msg_msg = sizeof(struct smack_known *), .lbs_sock = sizeof(struct socket_smack), .lbs_superblock = sizeof(struct superblock_smack), .lbs_xattr_count = SMACK_INODE_INIT_XATTRS, }; static const struct lsm_id smack_lsmid = { .name = "smack", .id = LSM_ID_SMACK, }; static struct security_hook_list smack_hooks[] __ro_after_init = { LSM_HOOK_INIT(ptrace_access_check, smack_ptrace_access_check), LSM_HOOK_INIT(ptrace_traceme, smack_ptrace_traceme), LSM_HOOK_INIT(syslog, smack_syslog), LSM_HOOK_INIT(fs_context_submount, smack_fs_context_submount), LSM_HOOK_INIT(fs_context_dup, smack_fs_context_dup), LSM_HOOK_INIT(fs_context_parse_param, smack_fs_context_parse_param), LSM_HOOK_INIT(sb_alloc_security, smack_sb_alloc_security), LSM_HOOK_INIT(sb_free_mnt_opts, smack_free_mnt_opts), LSM_HOOK_INIT(sb_eat_lsm_opts, smack_sb_eat_lsm_opts), LSM_HOOK_INIT(sb_statfs, smack_sb_statfs), LSM_HOOK_INIT(sb_set_mnt_opts, smack_set_mnt_opts), LSM_HOOK_INIT(bprm_creds_for_exec, smack_bprm_creds_for_exec), LSM_HOOK_INIT(inode_alloc_security, smack_inode_alloc_security), LSM_HOOK_INIT(inode_init_security, smack_inode_init_security), LSM_HOOK_INIT(inode_link, smack_inode_link), LSM_HOOK_INIT(inode_unlink, smack_inode_unlink), LSM_HOOK_INIT(inode_rmdir, smack_inode_rmdir), LSM_HOOK_INIT(inode_rename, smack_inode_rename), LSM_HOOK_INIT(inode_permission, smack_inode_permission), LSM_HOOK_INIT(inode_setattr, smack_inode_setattr), LSM_HOOK_INIT(inode_getattr, smack_inode_getattr), LSM_HOOK_INIT(inode_xattr_skipcap, smack_inode_xattr_skipcap), LSM_HOOK_INIT(inode_setxattr, smack_inode_setxattr), LSM_HOOK_INIT(inode_post_setxattr, smack_inode_post_setxattr), LSM_HOOK_INIT(inode_getxattr, smack_inode_getxattr), LSM_HOOK_INIT(inode_removexattr, smack_inode_removexattr), LSM_HOOK_INIT(inode_set_acl, smack_inode_set_acl), LSM_HOOK_INIT(inode_get_acl, smack_inode_get_acl), LSM_HOOK_INIT(inode_remove_acl, smack_inode_remove_acl), LSM_HOOK_INIT(inode_getsecurity, smack_inode_getsecurity), LSM_HOOK_INIT(inode_setsecurity, smack_inode_setsecurity), LSM_HOOK_INIT(inode_listsecurity, smack_inode_listsecurity), LSM_HOOK_INIT(inode_getlsmprop, smack_inode_getlsmprop), LSM_HOOK_INIT(file_alloc_security, smack_file_alloc_security), LSM_HOOK_INIT(file_ioctl, smack_file_ioctl), LSM_HOOK_INIT(file_ioctl_compat, smack_file_ioctl), LSM_HOOK_INIT(file_lock, smack_file_lock), LSM_HOOK_INIT(file_fcntl, smack_file_fcntl), LSM_HOOK_INIT(mmap_file, smack_mmap_file), LSM_HOOK_INIT(mmap_addr, cap_mmap_addr), LSM_HOOK_INIT(file_set_fowner, smack_file_set_fowner), LSM_HOOK_INIT(file_send_sigiotask, smack_file_send_sigiotask), LSM_HOOK_INIT(file_receive, smack_file_receive), LSM_HOOK_INIT(file_open, smack_file_open), LSM_HOOK_INIT(cred_alloc_blank, smack_cred_alloc_blank), LSM_HOOK_INIT(cred_free, smack_cred_free), LSM_HOOK_INIT(cred_prepare, smack_cred_prepare), LSM_HOOK_INIT(cred_transfer, smack_cred_transfer), LSM_HOOK_INIT(cred_getsecid, smack_cred_getsecid), LSM_HOOK_INIT(cred_getlsmprop, smack_cred_getlsmprop), LSM_HOOK_INIT(kernel_act_as, smack_kernel_act_as), LSM_HOOK_INIT(kernel_create_files_as, smack_kernel_create_files_as), LSM_HOOK_INIT(task_setpgid, smack_task_setpgid), LSM_HOOK_INIT(task_getpgid, smack_task_getpgid), LSM_HOOK_INIT(task_getsid, smack_task_getsid), LSM_HOOK_INIT(current_getlsmprop_subj, smack_current_getlsmprop_subj), LSM_HOOK_INIT(task_getlsmprop_obj, smack_task_getlsmprop_obj), LSM_HOOK_INIT(task_setnice, smack_task_setnice), LSM_HOOK_INIT(task_setioprio, smack_task_setioprio), LSM_HOOK_INIT(task_getioprio, smack_task_getioprio), LSM_HOOK_INIT(task_setscheduler, smack_task_setscheduler), LSM_HOOK_INIT(task_getscheduler, smack_task_getscheduler), LSM_HOOK_INIT(task_movememory, smack_task_movememory), LSM_HOOK_INIT(task_kill, smack_task_kill), LSM_HOOK_INIT(task_to_inode, smack_task_to_inode), LSM_HOOK_INIT(ipc_permission, smack_ipc_permission), LSM_HOOK_INIT(ipc_getlsmprop, smack_ipc_getlsmprop), LSM_HOOK_INIT(msg_msg_alloc_security, smack_msg_msg_alloc_security), LSM_HOOK_INIT(msg_queue_alloc_security, smack_ipc_alloc_security), LSM_HOOK_INIT(msg_queue_associate, smack_msg_queue_associate), LSM_HOOK_INIT(msg_queue_msgctl, smack_msg_queue_msgctl), LSM_HOOK_INIT(msg_queue_msgsnd, smack_msg_queue_msgsnd), LSM_HOOK_INIT(msg_queue_msgrcv, smack_msg_queue_msgrcv), LSM_HOOK_INIT(shm_alloc_security, smack_ipc_alloc_security), LSM_HOOK_INIT(shm_associate, smack_shm_associate), LSM_HOOK_INIT(shm_shmctl, smack_shm_shmctl), LSM_HOOK_INIT(shm_shmat, smack_shm_shmat), LSM_HOOK_INIT(sem_alloc_security, smack_ipc_alloc_security), LSM_HOOK_INIT(sem_associate, smack_sem_associate), LSM_HOOK_INIT(sem_semctl, smack_sem_semctl), LSM_HOOK_INIT(sem_semop, smack_sem_semop), LSM_HOOK_INIT(d_instantiate, smack_d_instantiate), LSM_HOOK_INIT(getselfattr, smack_getselfattr), LSM_HOOK_INIT(setselfattr, smack_setselfattr), LSM_HOOK_INIT(getprocattr, smack_getprocattr), LSM_HOOK_INIT(setprocattr, smack_setprocattr), LSM_HOOK_INIT(unix_stream_connect, smack_unix_stream_connect), LSM_HOOK_INIT(unix_may_send, smack_unix_may_send), LSM_HOOK_INIT(socket_post_create, smack_socket_post_create), LSM_HOOK_INIT(socket_socketpair, smack_socket_socketpair), #ifdef SMACK_IPV6_PORT_LABELING LSM_HOOK_INIT(socket_bind, smack_socket_bind), #endif LSM_HOOK_INIT(socket_connect, smack_socket_connect), LSM_HOOK_INIT(socket_sendmsg, smack_socket_sendmsg), LSM_HOOK_INIT(socket_sock_rcv_skb, smack_socket_sock_rcv_skb), LSM_HOOK_INIT(socket_getpeersec_stream, smack_socket_getpeersec_stream), LSM_HOOK_INIT(socket_getpeersec_dgram, smack_socket_getpeersec_dgram), LSM_HOOK_INIT(sk_alloc_security, smack_sk_alloc_security), #ifdef SMACK_IPV6_PORT_LABELING LSM_HOOK_INIT(sk_free_security, smack_sk_free_security), #endif LSM_HOOK_INIT(sk_clone_security, smack_sk_clone_security), LSM_HOOK_INIT(inet_conn_request, smack_inet_conn_request), LSM_HOOK_INIT(inet_csk_clone, smack_inet_csk_clone), /* key management security hooks */ #ifdef CONFIG_KEYS LSM_HOOK_INIT(key_alloc, smack_key_alloc), LSM_HOOK_INIT(key_permission, smack_key_permission), LSM_HOOK_INIT(key_getsecurity, smack_key_getsecurity), #ifdef CONFIG_KEY_NOTIFICATIONS LSM_HOOK_INIT(watch_key, smack_watch_key), #endif #endif /* CONFIG_KEYS */ #ifdef CONFIG_WATCH_QUEUE LSM_HOOK_INIT(post_notification, smack_post_notification), #endif /* Audit hooks */ #ifdef CONFIG_AUDIT LSM_HOOK_INIT(audit_rule_init, smack_audit_rule_init), LSM_HOOK_INIT(audit_rule_known, smack_audit_rule_known), LSM_HOOK_INIT(audit_rule_match, smack_audit_rule_match), #endif /* CONFIG_AUDIT */ LSM_HOOK_INIT(ismaclabel, smack_ismaclabel), LSM_HOOK_INIT(secid_to_secctx, smack_secid_to_secctx), LSM_HOOK_INIT(lsmprop_to_secctx, smack_lsmprop_to_secctx), LSM_HOOK_INIT(secctx_to_secid, smack_secctx_to_secid), LSM_HOOK_INIT(inode_notifysecctx, smack_inode_notifysecctx), LSM_HOOK_INIT(inode_setsecctx, smack_inode_setsecctx), LSM_HOOK_INIT(inode_getsecctx, smack_inode_getsecctx), LSM_HOOK_INIT(inode_copy_up, smack_inode_copy_up), LSM_HOOK_INIT(inode_copy_up_xattr, smack_inode_copy_up_xattr), LSM_HOOK_INIT(dentry_create_files_as, smack_dentry_create_files_as), #ifdef CONFIG_IO_URING LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds), LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll), LSM_HOOK_INIT(uring_cmd, smack_uring_cmd), #endif }; static __init void init_smack_known_list(void) { /* * Initialize rule list locks */ mutex_init(&smack_known_huh.smk_rules_lock); mutex_init(&smack_known_hat.smk_rules_lock); mutex_init(&smack_known_floor.smk_rules_lock); mutex_init(&smack_known_star.smk_rules_lock); mutex_init(&smack_known_web.smk_rules_lock); /* * Initialize rule lists */ INIT_LIST_HEAD(&smack_known_huh.smk_rules); INIT_LIST_HEAD(&smack_known_hat.smk_rules); INIT_LIST_HEAD(&smack_known_star.smk_rules); INIT_LIST_HEAD(&smack_known_floor.smk_rules); INIT_LIST_HEAD(&smack_known_web.smk_rules); /* * Create the known labels list */ smk_insert_entry(&smack_known_huh); smk_insert_entry(&smack_known_hat); smk_insert_entry(&smack_known_star); smk_insert_entry(&smack_known_floor); smk_insert_entry(&smack_known_web); } /** * smack_init - initialize the smack system * * Returns 0 on success, -ENOMEM is there's no memory */ static __init int smack_init(void) { struct cred *cred = (struct cred *) current->cred; struct task_smack *tsp; smack_rule_cache = KMEM_CACHE(smack_rule, 0); if (!smack_rule_cache) return -ENOMEM; /* * Set the security state for the initial task. */ tsp = smack_cred(cred); init_task_smack(tsp, &smack_known_floor, &smack_known_floor); /* * Register with LSM */ security_add_hooks(smack_hooks, ARRAY_SIZE(smack_hooks), &smack_lsmid); smack_enabled = 1; pr_info("Smack: Initializing.\n"); #ifdef CONFIG_SECURITY_SMACK_NETFILTER pr_info("Smack: Netfilter enabled.\n"); #endif #ifdef SMACK_IPV6_PORT_LABELING pr_info("Smack: IPv6 port labeling enabled.\n"); #endif #ifdef SMACK_IPV6_SECMARK_LABELING pr_info("Smack: IPv6 Netfilter enabled.\n"); #endif /* initialize the smack_known_list */ init_smack_known_list(); /* Inform the audit system that secctx is used */ audit_cfg_lsm(&smack_lsmid, AUDIT_CFG_LSM_SECCTX_SUBJECT | AUDIT_CFG_LSM_SECCTX_OBJECT); return 0; } int __init smack_initcall(void) { int rc_fs = init_smk_fs(); int rc_nf = smack_nf_ip_init(); return rc_fs ? rc_fs : rc_nf; } /* * Smack requires early initialization in order to label * all processes and objects when they are created. */ DEFINE_LSM(smack) = { .id = &smack_lsmid, .flags = LSM_FLAG_LEGACY_MAJOR | LSM_FLAG_EXCLUSIVE, .blobs = &smack_blob_sizes, .init = smack_init, .initcall_device = smack_initcall, };
1 3 2 2 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 /* SPDX-License-Identifier: GPL-2.0 */ /* * Declarations of AX.25 type objects. * * Alan Cox (GW4PTS) 10/11/93 */ #ifndef _AX25_H #define _AX25_H #include <linux/ax25.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/refcount.h> #include <net/neighbour.h> #include <net/sock.h> #include <linux/seq_file.h> #define AX25_T1CLAMPLO 1 #define AX25_T1CLAMPHI (30 * HZ) #define AX25_BPQ_HEADER_LEN 16 #define AX25_KISS_HEADER_LEN 1 #define AX25_HEADER_LEN 17 #define AX25_ADDR_LEN 7 #define AX25_DIGI_HEADER_LEN (AX25_MAX_DIGIS * AX25_ADDR_LEN) #define AX25_MAX_HEADER_LEN (AX25_HEADER_LEN + AX25_DIGI_HEADER_LEN) /* AX.25 Protocol IDs */ #define AX25_P_ROSE 0x01 #define AX25_P_VJCOMP 0x06 /* Compressed TCP/IP packet */ /* Van Jacobsen (RFC 1144) */ #define AX25_P_VJUNCOMP 0x07 /* Uncompressed TCP/IP packet */ /* Van Jacobsen (RFC 1144) */ #define AX25_P_SEGMENT 0x08 /* Segmentation fragment */ #define AX25_P_TEXNET 0xc3 /* TEXTNET datagram protocol */ #define AX25_P_LQ 0xc4 /* Link Quality Protocol */ #define AX25_P_ATALK 0xca /* Appletalk */ #define AX25_P_ATALK_ARP 0xcb /* Appletalk ARP */ #define AX25_P_IP 0xcc /* ARPA Internet Protocol */ #define AX25_P_ARP 0xcd /* ARPA Address Resolution */ #define AX25_P_FLEXNET 0xce /* FlexNet */ #define AX25_P_NETROM 0xcf /* NET/ROM */ #define AX25_P_TEXT 0xF0 /* No layer 3 protocol impl. */ /* AX.25 Segment control values */ #define AX25_SEG_REM 0x7F #define AX25_SEG_FIRST 0x80 #define AX25_CBIT 0x80 /* Command/Response bit */ #define AX25_EBIT 0x01 /* HDLC Address Extension bit */ #define AX25_HBIT 0x80 /* Has been repeated bit */ #define AX25_SSSID_SPARE 0x60 /* Unused bits in SSID for standard AX.25 */ #define AX25_ESSID_SPARE 0x20 /* Unused bits in SSID for extended AX.25 */ #define AX25_DAMA_FLAG 0x20 /* Well, it is *NOT* unused! (dl1bke 951121 */ #define AX25_COND_ACK_PENDING 0x01 #define AX25_COND_REJECT 0x02 #define AX25_COND_PEER_RX_BUSY 0x04 #define AX25_COND_OWN_RX_BUSY 0x08 #define AX25_COND_DAMA_MODE 0x10 #ifndef _LINUX_NETDEVICE_H #include <linux/netdevice.h> #endif /* Upper sub-layer (LAPB) definitions */ /* Control field templates */ #define AX25_I 0x00 /* Information frames */ #define AX25_S 0x01 /* Supervisory frames */ #define AX25_RR 0x01 /* Receiver ready */ #define AX25_RNR 0x05 /* Receiver not ready */ #define AX25_REJ 0x09 /* Reject */ #define AX25_U 0x03 /* Unnumbered frames */ #define AX25_SABM 0x2f /* Set Asynchronous Balanced Mode */ #define AX25_SABME 0x6f /* Set Asynchronous Balanced Mode Extended */ #define AX25_DISC 0x43 /* Disconnect */ #define AX25_DM 0x0f /* Disconnected mode */ #define AX25_UA 0x63 /* Unnumbered acknowledge */ #define AX25_FRMR 0x87 /* Frame reject */ #define AX25_UI 0x03 /* Unnumbered information */ #define AX25_XID 0xaf /* Exchange information */ #define AX25_TEST 0xe3 /* Test */ #define AX25_PF 0x10 /* Poll/final bit for standard AX.25 */ #define AX25_EPF 0x01 /* Poll/final bit for extended AX.25 */ #define AX25_ILLEGAL 0x100 /* Impossible to be a real frame type */ #define AX25_POLLOFF 0 #define AX25_POLLON 1 /* AX25 L2 C-bit */ #define AX25_COMMAND 1 #define AX25_RESPONSE 2 /* Define Link State constants. */ enum { AX25_STATE_0, /* Listening */ AX25_STATE_1, /* SABM sent */ AX25_STATE_2, /* DISC sent */ AX25_STATE_3, /* Established */ AX25_STATE_4 /* Recovery */ }; #define AX25_MODULUS 8 /* Standard AX.25 modulus */ #define AX25_EMODULUS 128 /* Extended AX.25 modulus */ enum { AX25_PROTO_STD_SIMPLEX, AX25_PROTO_STD_DUPLEX, #ifdef CONFIG_AX25_DAMA_SLAVE AX25_PROTO_DAMA_SLAVE, #ifdef CONFIG_AX25_DAMA_MASTER AX25_PROTO_DAMA_MASTER, #define AX25_PROTO_MAX AX25_PROTO_DAMA_MASTER #endif #endif __AX25_PROTO_MAX, AX25_PROTO_MAX = __AX25_PROTO_MAX -1 }; enum { AX25_VALUES_IPDEFMODE, /* 0=DG 1=VC */ AX25_VALUES_AXDEFMODE, /* 0=Normal 1=Extended Seq Nos */ AX25_VALUES_BACKOFF, /* 0=None 1=Linear 2=Exponential */ AX25_VALUES_CONMODE, /* Allow connected modes - 0=No 1=no "PID text" 2=all PIDs */ AX25_VALUES_WINDOW, /* Default window size for standard AX.25 */ AX25_VALUES_EWINDOW, /* Default window size for extended AX.25 */ AX25_VALUES_T1, /* Default T1 timeout value */ AX25_VALUES_T2, /* Default T2 timeout value */ AX25_VALUES_T3, /* Default T3 timeout value */ AX25_VALUES_IDLE, /* Connected mode idle timer */ AX25_VALUES_N2, /* Default N2 value */ AX25_VALUES_PACLEN, /* AX.25 MTU */ AX25_VALUES_PROTOCOL, /* Std AX.25, DAMA Slave, DAMA Master */ #ifdef CONFIG_AX25_DAMA_SLAVE AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */ #endif AX25_MAX_VALUES /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */ }; #define AX25_DEF_IPDEFMODE 0 /* Datagram */ #define AX25_DEF_AXDEFMODE 0 /* Normal */ #define AX25_DEF_BACKOFF 1 /* Linear backoff */ #define AX25_DEF_CONMODE 2 /* Connected mode allowed */ #define AX25_DEF_WINDOW 2 /* Window=2 */ #define AX25_DEF_EWINDOW 32 /* Module-128 Window=32 */ #define AX25_DEF_T1 10000 /* T1=10s */ #define AX25_DEF_T2 3000 /* T2=3s */ #define AX25_DEF_T3 300000 /* T3=300s */ #define AX25_DEF_N2 10 /* N2=10 */ #define AX25_DEF_IDLE 0 /* Idle=None */ #define AX25_DEF_PACLEN 256 /* Paclen=256 */ #define AX25_DEF_PROTOCOL AX25_PROTO_STD_SIMPLEX /* Standard AX.25 */ #define AX25_DEF_DS_TIMEOUT 180000 /* DAMA timeout 3 minutes */ typedef struct ax25_uid_assoc { struct hlist_node uid_node; refcount_t refcount; kuid_t uid; ax25_address call; } ax25_uid_assoc; #define ax25_uid_for_each(__ax25, list) \ hlist_for_each_entry(__ax25, list, uid_node) #define ax25_uid_hold(ax25) \ refcount_inc(&((ax25)->refcount)) static inline void ax25_uid_put(ax25_uid_assoc *assoc) { if (refcount_dec_and_test(&assoc->refcount)) { kfree(assoc); } } typedef struct { ax25_address calls[AX25_MAX_DIGIS]; unsigned char repeated[AX25_MAX_DIGIS]; unsigned char ndigi; signed char lastrepeat; } ax25_digi; typedef struct ax25_route { struct ax25_route *next; ax25_address callsign; struct net_device *dev; ax25_digi *digipeat; char ip_mode; } ax25_route; void __ax25_put_route(ax25_route *ax25_rt); extern rwlock_t ax25_route_lock; static inline void ax25_route_lock_use(void) { read_lock(&ax25_route_lock); } static inline void ax25_route_lock_unuse(void) { read_unlock(&ax25_route_lock); } typedef struct { char slave; /* slave_mode? */ struct timer_list slave_timer; /* timeout timer */ unsigned short slave_timeout; /* when? */ } ax25_dama_info; struct ctl_table; typedef struct ax25_dev { struct list_head list; struct net_device *dev; netdevice_tracker dev_tracker; struct net_device *forward; struct ctl_table_header *sysheader; int values[AX25_MAX_VALUES]; #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; #endif refcount_t refcount; bool device_up; struct rcu_head rcu; } ax25_dev; typedef struct ax25_cb { struct hlist_node ax25_node; ax25_address source_addr, dest_addr; ax25_digi *digipeat; ax25_dev *ax25_dev; netdevice_tracker dev_tracker; unsigned char iamdigi; unsigned char state, modulus, pidincl; unsigned short vs, vr, va; unsigned char condition, backoff; unsigned char n2, n2count; struct timer_list t1timer, t2timer, t3timer, idletimer; unsigned long t1, t2, t3, idle, rtt; unsigned short paclen, fragno, fraglen; struct sk_buff_head write_queue; struct sk_buff_head reseq_queue; struct sk_buff_head ack_queue; struct sk_buff_head frag_queue; unsigned char window; struct timer_list timer, dtimer; struct sock *sk; /* Backlink to socket */ refcount_t refcount; } ax25_cb; struct ax25_sock { struct sock sk; struct ax25_cb *cb; }; #define ax25_sk(ptr) container_of_const(ptr, struct ax25_sock, sk) static inline struct ax25_cb *sk_to_ax25(const struct sock *sk) { return ax25_sk(sk)->cb; } #define ax25_for_each(__ax25, list) \ hlist_for_each_entry(__ax25, list, ax25_node) #define ax25_cb_hold(__ax25) \ refcount_inc(&((__ax25)->refcount)) static __inline__ void ax25_cb_put(ax25_cb *ax25) { if (refcount_dec_and_test(&ax25->refcount)) { kfree(ax25->digipeat); kfree(ax25); } } static inline void ax25_dev_hold(ax25_dev *ax25_dev) { refcount_inc(&ax25_dev->refcount); } static inline void ax25_dev_put(ax25_dev *ax25_dev) { if (refcount_dec_and_test(&ax25_dev->refcount)) kfree_rcu(ax25_dev, rcu); } static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; skb_reset_mac_header(skb); skb->pkt_type = PACKET_HOST; return htons(ETH_P_AX25); } /* af_ax25.c */ extern struct hlist_head ax25_list; extern spinlock_t ax25_list_lock; void ax25_cb_add(ax25_cb *); struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int); struct sock *ax25_get_socket(ax25_address *, ax25_address *, int); ax25_cb *ax25_find_cb(const ax25_address *, ax25_address *, ax25_digi *, struct net_device *); void ax25_send_to_raw(ax25_address *, struct sk_buff *, int); void ax25_destroy_socket(ax25_cb *); ax25_cb * __must_check ax25_create_cb(void); void ax25_fillin_cb(ax25_cb *, ax25_dev *); struct sock *ax25_make_new(struct sock *, struct ax25_dev *); /* ax25_addr.c */ extern const ax25_address ax25_bcast; extern const ax25_address ax25_defaddr; extern const ax25_address null_ax25_address; char *ax2asc(char *buf, const ax25_address *); void asc2ax(ax25_address *addr, const char *callsign); int ax25cmp(const ax25_address *, const ax25_address *); int ax25digicmp(const ax25_digi *, const ax25_digi *); const unsigned char *ax25_addr_parse(const unsigned char *, int, ax25_address *, ax25_address *, ax25_digi *, int *, int *); int ax25_addr_build(unsigned char *, const ax25_address *, const ax25_address *, const ax25_digi *, int, int); int ax25_addr_size(const ax25_digi *); void ax25_digi_invert(const ax25_digi *, ax25_digi *); /* ax25_dev.c */ extern spinlock_t ax25_dev_lock; #if IS_ENABLED(CONFIG_AX25) static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev) { return rcu_dereference_rtnl(dev->ax25_ptr); } #endif ax25_dev *ax25_addr_ax25dev(ax25_address *); void ax25_dev_device_up(struct net_device *); void ax25_dev_device_down(struct net_device *); int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *); struct net_device *ax25_fwd_dev(struct net_device *); void ax25_dev_free(void); /* ax25_ds_in.c */ int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_ds_subr.c */ void ax25_ds_nr_error_recovery(ax25_cb *); void ax25_ds_enquiry_response(ax25_cb *); void ax25_ds_establish_data_link(ax25_cb *); void ax25_dev_dama_off(ax25_dev *); void ax25_dama_on(ax25_cb *); void ax25_dama_off(ax25_cb *); /* ax25_ds_timer.c */ void ax25_ds_setup_timer(ax25_dev *); void ax25_ds_set_timer(ax25_dev *); void ax25_ds_del_timer(ax25_dev *); void ax25_ds_timer(ax25_cb *); void ax25_ds_t1_timeout(ax25_cb *); void ax25_ds_heartbeat_expiry(ax25_cb *); void ax25_ds_t3timer_expiry(ax25_cb *); void ax25_ds_idletimer_expiry(ax25_cb *); /* ax25_iface.c */ struct ax25_protocol { struct ax25_protocol *next; unsigned int pid; int (*func)(struct sk_buff *, ax25_cb *); }; void ax25_register_pid(struct ax25_protocol *ap); void ax25_protocol_release(unsigned int); struct ax25_linkfail { struct hlist_node lf_node; void (*func)(ax25_cb *, int); }; void ax25_linkfail_register(struct ax25_linkfail *lf); void ax25_linkfail_release(struct ax25_linkfail *lf); int __must_check ax25_listen_register(const ax25_address *, struct net_device *); void ax25_listen_release(const ax25_address *, struct net_device *); int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *); int ax25_listen_mine(const ax25_address *, struct net_device *); void ax25_link_failed(ax25_cb *, int); int ax25_protocol_is_registered(unsigned int); /* ax25_in.c */ int ax25_rx_iframe(ax25_cb *, struct sk_buff *); int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); /* ax25_ip.c */ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb); extern const struct header_ops ax25_header_ops; /* ax25_out.c */ ax25_cb *ax25_send_frame(struct sk_buff *, int, const ax25_address *, ax25_address *, ax25_digi *, struct net_device *); void ax25_output(ax25_cb *, int, struct sk_buff *); void ax25_kick(ax25_cb *); void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int); void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev); int ax25_check_iframes_acked(ax25_cb *, unsigned short); /* ax25_route.c */ void ax25_rt_device_down(struct net_device *); int ax25_rt_ioctl(unsigned int, void __user *); extern const struct seq_operations ax25_rt_seqops; ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); void ax25_rt_free(void); /* ax25_std_in.c */ int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int); /* ax25_std_subr.c */ void ax25_std_nr_error_recovery(ax25_cb *); void ax25_std_establish_data_link(ax25_cb *); void ax25_std_transmit_enquiry(ax25_cb *); void ax25_std_enquiry_response(ax25_cb *); void ax25_std_timeout_response(ax25_cb *); /* ax25_std_timer.c */ void ax25_std_heartbeat_expiry(ax25_cb *); void ax25_std_t1timer_expiry(ax25_cb *); void ax25_std_t2timer_expiry(ax25_cb *); void ax25_std_t3timer_expiry(ax25_cb *); void ax25_std_idletimer_expiry(ax25_cb *); /* ax25_subr.c */ void ax25_clear_queues(ax25_cb *); void ax25_frames_acked(ax25_cb *, unsigned short); void ax25_requeue_frames(ax25_cb *); int ax25_validate_nr(ax25_cb *, unsigned short); int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *); void ax25_send_control(ax25_cb *, int, int, int); void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *); void ax25_calculate_t1(ax25_cb *); void ax25_calculate_rtt(ax25_cb *); void ax25_disconnect(ax25_cb *, int); /* ax25_timer.c */ void ax25_setup_timers(ax25_cb *); void ax25_start_heartbeat(ax25_cb *); void ax25_start_t1timer(ax25_cb *); void ax25_start_t2timer(ax25_cb *); void ax25_start_t3timer(ax25_cb *); void ax25_start_idletimer(ax25_cb *); void ax25_stop_heartbeat(ax25_cb *); void ax25_stop_t1timer(ax25_cb *); void ax25_stop_t2timer(ax25_cb *); void ax25_stop_t3timer(ax25_cb *); void ax25_stop_idletimer(ax25_cb *); int ax25_t1timer_running(ax25_cb *); unsigned long ax25_display_timer(struct timer_list *); /* ax25_uid.c */ extern int ax25_uid_policy; ax25_uid_assoc *ax25_findbyuid(kuid_t); int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *); extern const struct seq_operations ax25_uid_seqops; void ax25_uid_free(void); /* sysctl_net_ax25.c */ #ifdef CONFIG_SYSCTL int ax25_register_dev_sysctl(ax25_dev *ax25_dev); void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); #else static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; } static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {} #endif /* CONFIG_SYSCTL */ #endif
4 4 4 2 1 1 1 1 1 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> static struct ax25_protocol *protocol_list; static DEFINE_RWLOCK(protocol_list_lock); static HLIST_HEAD(ax25_linkfail_list); static DEFINE_SPINLOCK(linkfail_lock); static struct listen_struct { struct listen_struct *next; ax25_address callsign; struct net_device *dev; } *listen_list = NULL; static DEFINE_SPINLOCK(listen_lock); /* * Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT, * AX25_P_IP or AX25_P_ARP ... */ void ax25_register_pid(struct ax25_protocol *ap) { write_lock_bh(&protocol_list_lock); ap->next = protocol_list; protocol_list = ap; write_unlock_bh(&protocol_list_lock); } EXPORT_SYMBOL_GPL(ax25_register_pid); void ax25_protocol_release(unsigned int pid) { struct ax25_protocol *protocol; write_lock_bh(&protocol_list_lock); protocol = protocol_list; if (protocol == NULL) goto out; if (protocol->pid == pid) { protocol_list = protocol->next; goto out; } while (protocol != NULL && protocol->next != NULL) { if (protocol->next->pid == pid) { protocol->next = protocol->next->next; goto out; } protocol = protocol->next; } out: write_unlock_bh(&protocol_list_lock); } EXPORT_SYMBOL(ax25_protocol_release); void ax25_linkfail_register(struct ax25_linkfail *lf) { spin_lock_bh(&linkfail_lock); hlist_add_head(&lf->lf_node, &ax25_linkfail_list); spin_unlock_bh(&linkfail_lock); } EXPORT_SYMBOL(ax25_linkfail_register); void ax25_linkfail_release(struct ax25_linkfail *lf) { spin_lock_bh(&linkfail_lock); hlist_del_init(&lf->lf_node); spin_unlock_bh(&linkfail_lock); } EXPORT_SYMBOL(ax25_linkfail_release); int ax25_listen_register(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; if (ax25_listen_mine(callsign, dev)) return 0; if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL) return -ENOMEM; listen->callsign = *callsign; listen->dev = dev; spin_lock_bh(&listen_lock); listen->next = listen_list; listen_list = listen; spin_unlock_bh(&listen_lock); return 0; } EXPORT_SYMBOL(ax25_listen_register); void ax25_listen_release(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *s, *listen; spin_lock_bh(&listen_lock); listen = listen_list; if (listen == NULL) { spin_unlock_bh(&listen_lock); return; } if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) { listen_list = listen->next; spin_unlock_bh(&listen_lock); kfree(listen); return; } while (listen != NULL && listen->next != NULL) { if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) { s = listen->next; listen->next = listen->next->next; spin_unlock_bh(&listen_lock); kfree(s); return; } listen = listen->next; } spin_unlock_bh(&listen_lock); } EXPORT_SYMBOL(ax25_listen_release); int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *) { int (*res)(struct sk_buff *, ax25_cb *) = NULL; struct ax25_protocol *protocol; read_lock(&protocol_list_lock); for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) if (protocol->pid == pid) { res = protocol->func; break; } read_unlock(&protocol_list_lock); return res; } int ax25_listen_mine(const ax25_address *callsign, struct net_device *dev) { struct listen_struct *listen; spin_lock_bh(&listen_lock); for (listen = listen_list; listen != NULL; listen = listen->next) if (ax25cmp(&listen->callsign, callsign) == 0 && (listen->dev == dev || listen->dev == NULL)) { spin_unlock_bh(&listen_lock); return 1; } spin_unlock_bh(&listen_lock); return 0; } void ax25_link_failed(ax25_cb *ax25, int reason) { struct ax25_linkfail *lf; spin_lock_bh(&linkfail_lock); hlist_for_each_entry(lf, &ax25_linkfail_list, lf_node) lf->func(ax25, reason); spin_unlock_bh(&linkfail_lock); } int ax25_protocol_is_registered(unsigned int pid) { struct ax25_protocol *protocol; int res = 0; read_lock_bh(&protocol_list_lock); for (protocol = protocol_list; protocol != NULL; protocol = protocol->next) if (protocol->pid == pid) { res = 1; break; } read_unlock_bh(&protocol_list_lock); return res; }
1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. ** ** ******************************************************************************* ******************************************************************************/ /* * lowcomms.c * * This is the "low-level" comms layer. * * It is responsible for sending/receiving messages * from other nodes in the cluster. * * Cluster nodes are referred to by their nodeids. nodeids are * simply 32 bit numbers to the locking module - if they need to * be expanded for the cluster infrastructure then that is its * responsibility. It is this layer's * responsibility to resolve these into IP address or * whatever it needs for inter-node communication. * * The comms level is two kernel threads that deal mainly with * the receiving of messages from other nodes and passing them * up to the mid-level comms layer (which understands the * message format) for execution by the locking core, and * a send thread which does all the setting up of connections * to remote nodes and the sending of data. Threads are not allowed * to send their own data because it may cause them to wait in times * of high load. Also, this way, the sending thread can collect together * messages bound for one node and send them in one block. * * lowcomms will choose to use either TCP or SCTP as its transport layer * depending on the configuration variable 'protocol'. This should be set * to 0 (default) for TCP or 1 for SCTP. It should be configured using a * cluster-wide mechanism as it must be the same on all nodes of the cluster * for the DLM to function. * */ #include <asm/ioctls.h> #include <net/sock.h> #include <net/tcp.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mutex.h> #include <linux/sctp.h> #include <linux/slab.h> #include <net/sctp/sctp.h> #include <net/ipv6.h> #include <trace/events/dlm.h> #include <trace/events/sock.h> #include "dlm_internal.h" #include "lowcomms.h" #include "midcomms.h" #include "memory.h" #include "config.h" #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000) #define DLM_MAX_PROCESS_BUFFERS 24 #define NEEDED_RMEM (4*1024*1024) struct connection { struct socket *sock; /* NULL if not connected */ uint32_t nodeid; /* So we know who we are in the list */ /* this semaphore is used to allow parallel recv/send in read * lock mode. When we release a sock we need to held the write lock. * * However this is locking code and not nice. When we remove the * othercon handling we can look into other mechanism to synchronize * io handling to call sock_release() at the right time. */ struct rw_semaphore sock_lock; unsigned long flags; #define CF_APP_LIMITED 0 #define CF_RECV_PENDING 1 #define CF_SEND_PENDING 2 #define CF_RECV_INTR 3 #define CF_IO_STOP 4 #define CF_IS_OTHERCON 5 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; int retries; struct hlist_node list; /* due some connect()/accept() races we currently have this cross over * connection attempt second connection for one node. * * There is a solution to avoid the race by introducing a connect * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to * connect. Otherside can connect but will only be considered that * the other side wants to have a reconnect. * * However changing to this behaviour will break backwards compatible. * In a DLM protocol major version upgrade we should remove this! */ struct connection *othercon; struct work_struct rwork; /* receive worker */ struct work_struct swork; /* send worker */ wait_queue_head_t shutdown_wait; unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE]; int rx_leftover; int mark; int addr_count; int curr_addr_index; struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT]; spinlock_t addrs_lock; struct rcu_head rcu; }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) struct listen_connection { struct socket *sock; struct work_struct rwork; }; #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) /* An entry waiting to be sent */ struct writequeue_entry { struct list_head list; struct page *page; int offset; int len; int end; int users; bool dirty; struct connection *con; struct list_head msgs; struct kref ref; }; struct dlm_msg { struct writequeue_entry *entry; struct dlm_msg *orig_msg; bool retransmit; void *ppc; int len; int idx; /* new()/commit() idx exchange */ struct list_head list; struct kref ref; }; struct processqueue_entry { unsigned char *buf; int nodeid; int buflen; struct list_head list; }; struct dlm_proto_ops { bool try_new_addr; const char *name; int proto; int how; void (*sockopts)(struct socket *sock); int (*bind)(struct socket *sock); int (*listen_validate)(void); void (*listen_sockopts)(struct socket *sock); int (*listen_bind)(struct socket *sock); }; static struct listen_sock_callbacks { void (*sk_error_report)(struct sock *); void (*sk_data_ready)(struct sock *); void (*sk_state_change)(struct sock *); void (*sk_write_space)(struct sock *); } listen_sock; static struct listen_connection listen_con; static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT]; static int dlm_local_count; /* Work queues */ static struct workqueue_struct *io_workqueue; static struct workqueue_struct *process_workqueue; static struct hlist_head connection_hash[CONN_HASH_SIZE]; static DEFINE_SPINLOCK(connections_lock); DEFINE_STATIC_SRCU(connections_srcu); static const struct dlm_proto_ops *dlm_proto_ops; #define DLM_IO_SUCCESS 0 #define DLM_IO_END 1 #define DLM_IO_EOF 2 #define DLM_IO_RESCHED 3 #define DLM_IO_FLUSH 4 static void process_recv_sockets(struct work_struct *work); static void process_send_sockets(struct work_struct *work); static void process_dlm_messages(struct work_struct *work); static DECLARE_WORK(process_work, process_dlm_messages); static DEFINE_SPINLOCK(processqueue_lock); static bool process_dlm_messages_pending; static DECLARE_WAIT_QUEUE_HEAD(processqueue_wq); static atomic_t processqueue_count; static LIST_HEAD(processqueue); bool dlm_lowcomms_is_running(void) { return !!listen_con.sock; } static void lowcomms_queue_swork(struct connection *con) { assert_spin_locked(&con->writequeue_lock); if (!test_bit(CF_IO_STOP, &con->flags) && !test_bit(CF_APP_LIMITED, &con->flags) && !test_and_set_bit(CF_SEND_PENDING, &con->flags)) queue_work(io_workqueue, &con->swork); } static void lowcomms_queue_rwork(struct connection *con) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk)); #endif if (!test_bit(CF_IO_STOP, &con->flags) && !test_and_set_bit(CF_RECV_PENDING, &con->flags)) queue_work(io_workqueue, &con->rwork); } static void writequeue_entry_ctor(void *data) { struct writequeue_entry *entry = data; INIT_LIST_HEAD(&entry->msgs); } struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) { return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry), 0, 0, writequeue_entry_ctor); } struct kmem_cache *dlm_lowcomms_msg_cache_create(void) { return KMEM_CACHE(dlm_msg, 0); } /* need to held writequeue_lock */ static struct writequeue_entry *con_next_wq(struct connection *con) { struct writequeue_entry *e; e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry, list); /* if len is zero nothing is to send, if there are users filling * buffers we wait until the users are done so we can send more. */ if (!e || e->users || e->len == 0) return NULL; return e; } static struct connection *__find_con(int nodeid, int r) { struct connection *con; hlist_for_each_entry_rcu(con, &connection_hash[r], list) { if (con->nodeid == nodeid) return con; } return NULL; } static void dlm_con_init(struct connection *con, int nodeid) { con->nodeid = nodeid; init_rwsem(&con->sock_lock); INIT_LIST_HEAD(&con->writequeue); spin_lock_init(&con->writequeue_lock); INIT_WORK(&con->swork, process_send_sockets); INIT_WORK(&con->rwork, process_recv_sockets); spin_lock_init(&con->addrs_lock); init_waitqueue_head(&con->shutdown_wait); } /* * If 'allocation' is zero then we don't attempt to create a new * connection structure for this node. */ static struct connection *nodeid2con(int nodeid, gfp_t alloc) { struct connection *con, *tmp; int r; r = nodeid_hash(nodeid); con = __find_con(nodeid, r); if (con || !alloc) return con; con = kzalloc(sizeof(*con), alloc); if (!con) return NULL; dlm_con_init(con, nodeid); spin_lock(&connections_lock); /* Because multiple workqueues/threads calls this function it can * race on multiple cpu's. Instead of locking hot path __find_con() * we just check in rare cases of recently added nodes again * under protection of connections_lock. If this is the case we * abort our connection creation and return the existing connection. */ tmp = __find_con(nodeid, r); if (tmp) { spin_unlock(&connections_lock); kfree(con); return tmp; } hlist_add_head_rcu(&con->list, &connection_hash[r]); spin_unlock(&connections_lock); return con; } static int addr_compare(const struct sockaddr_storage *x, const struct sockaddr_storage *y) { switch (x->ss_family) { case AF_INET: { struct sockaddr_in *sinx = (struct sockaddr_in *)x; struct sockaddr_in *siny = (struct sockaddr_in *)y; if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr) return 0; if (sinx->sin_port != siny->sin_port) return 0; break; } case AF_INET6: { struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x; struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y; if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr)) return 0; if (sinx->sin6_port != siny->sin6_port) return 0; break; } default: return 0; } return 1; } static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, struct sockaddr *sa_out, bool try_new_addr, unsigned int *mark) { struct sockaddr_storage sas; struct connection *con; int idx; if (!dlm_local_count) return -1; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } spin_lock(&con->addrs_lock); if (!con->addr_count) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } memcpy(&sas, &con->addr[con->curr_addr_index], sizeof(struct sockaddr_storage)); if (try_new_addr) { con->curr_addr_index++; if (con->curr_addr_index == con->addr_count) con->curr_addr_index = 0; } *mark = con->mark; spin_unlock(&con->addrs_lock); if (sas_out) memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); if (!sa_out) { srcu_read_unlock(&connections_srcu, idx); return 0; } if (dlm_local_addr[0].ss_family == AF_INET) { struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; ret4->sin_addr.s_addr = in4->sin_addr.s_addr; } else { struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas; struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out; ret6->sin6_addr = in6->sin6_addr; } srcu_read_unlock(&connections_srcu, idx); return 0; } static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid, unsigned int *mark) { struct connection *con; int i, idx, addr_i; idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { WARN_ON_ONCE(!con->addr_count); spin_lock(&con->addrs_lock); for (addr_i = 0; addr_i < con->addr_count; addr_i++) { if (addr_compare(&con->addr[addr_i], addr)) { *nodeid = con->nodeid; *mark = con->mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } } spin_unlock(&con->addrs_lock); } } srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } static bool dlm_lowcomms_con_has_addr(const struct connection *con, const struct sockaddr_storage *addr) { int i; for (i = 0; i < con->addr_count; i++) { if (addr_compare(&con->addr[i], addr)) return true; } return false; } int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr) { struct connection *con; bool ret; int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, GFP_NOFS); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOMEM; } spin_lock(&con->addrs_lock); if (!con->addr_count) { memcpy(&con->addr[0], addr, sizeof(*addr)); con->addr_count = 1; con->mark = dlm_config.ci_mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } ret = dlm_lowcomms_con_has_addr(con, addr); if (ret) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -EEXIST; } if (con->addr_count >= DLM_MAX_ADDR_COUNT) { spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return -ENOSPC; } memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr)); srcu_read_unlock(&connections_srcu, idx); spin_unlock(&con->addrs_lock); return 0; } /* Data available on socket or listen socket received a connect */ static void lowcomms_data_ready(struct sock *sk) { struct connection *con = sock2con(sk); trace_sk_data_ready(sk); set_bit(CF_RECV_INTR, &con->flags); lowcomms_queue_rwork(con); } static void lowcomms_write_space(struct sock *sk) { struct connection *con = sock2con(sk); clear_bit(SOCK_NOSPACE, &con->sock->flags); spin_lock_bh(&con->writequeue_lock); if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { con->sock->sk->sk_write_pending--; clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); } lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } static void lowcomms_state_change(struct sock *sk) { /* SCTP layer is not calling sk_data_ready when the connection * is done, so we catch the signal through here. */ if (sk->sk_shutdown & RCV_SHUTDOWN) lowcomms_data_ready(sk); } static void lowcomms_listen_data_ready(struct sock *sk) { trace_sk_data_ready(sk); queue_work(io_workqueue, &listen_con.rwork); } int dlm_lowcomms_connect_node(int nodeid) { struct connection *con; int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } down_read(&con->sock_lock); if (!con->sock) { spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } up_read(&con->sock_lock); srcu_read_unlock(&connections_srcu, idx); cond_resched(); return 0; } int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark) { struct connection *con; int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (!con) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } spin_lock(&con->addrs_lock); con->mark = mark; spin_unlock(&con->addrs_lock); srcu_read_unlock(&connections_srcu, idx); return 0; } static void lowcomms_error_report(struct sock *sk) { struct connection *con = sock2con(sk); struct inet_sock *inet; inet = inet_sk(sk); switch (sk->sk_family) { case AF_INET: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "sending to node %d at %pI4, dport %d, " "sk_err=%d/%d\n", dlm_our_nodeid(), con->nodeid, &inet->inet_daddr, ntohs(inet->inet_dport), sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "sending to node %d at %pI6c, " "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), con->nodeid, &sk->sk_v6_daddr, ntohs(inet->inet_dport), sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; #endif default: printk_ratelimited(KERN_ERR "dlm: node %d: socket error " "invalid socket family %d set, " "sk_err=%d/%d\n", dlm_our_nodeid(), sk->sk_family, sk->sk_err, READ_ONCE(sk->sk_err_soft)); break; } dlm_midcomms_unack_msg_resend(con->nodeid); listen_sock.sk_error_report(sk); } static void restore_callbacks(struct sock *sk) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(!lockdep_sock_is_held(sk)); #endif sk->sk_user_data = NULL; sk->sk_data_ready = listen_sock.sk_data_ready; sk->sk_state_change = listen_sock.sk_state_change; sk->sk_write_space = listen_sock.sk_write_space; sk->sk_error_report = listen_sock.sk_error_report; } /* Make a socket active */ static void add_sock(struct socket *sock, struct connection *con) { struct sock *sk = sock->sk; lock_sock(sk); con->sock = sock; sk->sk_user_data = con; sk->sk_data_ready = lowcomms_data_ready; sk->sk_write_space = lowcomms_write_space; if (dlm_config.ci_protocol == DLM_PROTO_SCTP) sk->sk_state_change = lowcomms_state_change; sk->sk_allocation = GFP_NOFS; sk->sk_use_task_frag = false; sk->sk_error_report = lowcomms_error_report; release_sock(sk); } /* Add the port number to an IPv6 or 4 sockaddr and return the address length */ static void make_sockaddr(struct sockaddr_storage *saddr, __be16 port, int *addr_len) { saddr->ss_family = dlm_local_addr[0].ss_family; if (saddr->ss_family == AF_INET) { struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; in4_addr->sin_port = port; *addr_len = sizeof(struct sockaddr_in); memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); } else { struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; in6_addr->sin6_port = port; *addr_len = sizeof(struct sockaddr_in6); } memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); } static void dlm_page_release(struct kref *kref) { struct writequeue_entry *e = container_of(kref, struct writequeue_entry, ref); __free_page(e->page); dlm_free_writequeue(e); } static void dlm_msg_release(struct kref *kref) { struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref); kref_put(&msg->entry->ref, dlm_page_release); dlm_free_msg(msg); } static void free_entry(struct writequeue_entry *e) { struct dlm_msg *msg, *tmp; list_for_each_entry_safe(msg, tmp, &e->msgs, list) { if (msg->orig_msg) { msg->orig_msg->retransmit = false; kref_put(&msg->orig_msg->ref, dlm_msg_release); } list_del(&msg->list); kref_put(&msg->ref, dlm_msg_release); } list_del(&e->list); kref_put(&e->ref, dlm_page_release); } static void dlm_close_sock(struct socket **sock) { lock_sock((*sock)->sk); restore_callbacks((*sock)->sk); release_sock((*sock)->sk); sock_release(*sock); *sock = NULL; } static void allow_connection_io(struct connection *con) { if (con->othercon) clear_bit(CF_IO_STOP, &con->othercon->flags); clear_bit(CF_IO_STOP, &con->flags); } static void stop_connection_io(struct connection *con) { if (con->othercon) stop_connection_io(con->othercon); spin_lock_bh(&con->writequeue_lock); set_bit(CF_IO_STOP, &con->flags); spin_unlock_bh(&con->writequeue_lock); down_write(&con->sock_lock); if (con->sock) { lock_sock(con->sock->sk); restore_callbacks(con->sock->sk); release_sock(con->sock->sk); } up_write(&con->sock_lock); cancel_work_sync(&con->swork); cancel_work_sync(&con->rwork); } /* Close a remote connection and tidy up */ static void close_connection(struct connection *con, bool and_other) { struct writequeue_entry *e; if (con->othercon && and_other) close_connection(con->othercon, false); down_write(&con->sock_lock); if (!con->sock) { up_write(&con->sock_lock); return; } dlm_close_sock(&con->sock); /* if we send a writequeue entry only a half way, we drop the * whole entry because reconnection and that we not start of the * middle of a msg which will confuse the other end. * * we can always drop messages because retransmits, but what we * cannot allow is to transmit half messages which may be processed * at the other side. * * our policy is to start on a clean state when disconnects, we don't * know what's send/received on transport layer in this case. */ spin_lock_bh(&con->writequeue_lock); if (!list_empty(&con->writequeue)) { e = list_first_entry(&con->writequeue, struct writequeue_entry, list); if (e->dirty) free_entry(e); } spin_unlock_bh(&con->writequeue_lock); con->rx_leftover = 0; con->retries = 0; clear_bit(CF_APP_LIMITED, &con->flags); clear_bit(CF_RECV_PENDING, &con->flags); clear_bit(CF_SEND_PENDING, &con->flags); up_write(&con->sock_lock); } static void shutdown_connection(struct connection *con, bool and_other) { int ret; if (con->othercon && and_other) shutdown_connection(con->othercon, false); flush_workqueue(io_workqueue); down_read(&con->sock_lock); /* nothing to shutdown */ if (!con->sock) { up_read(&con->sock_lock); return; } ret = kernel_sock_shutdown(con->sock, dlm_proto_ops->how); up_read(&con->sock_lock); if (ret) { log_print("Connection %p failed to shutdown: %d will force close", con, ret); goto force_close; } else { ret = wait_event_timeout(con->shutdown_wait, !con->sock, DLM_SHUTDOWN_WAIT_TIMEOUT); if (ret == 0) { log_print("Connection %p shutdown timed out, will force close", con); goto force_close; } } return; force_close: close_connection(con, false); } static struct processqueue_entry *new_processqueue_entry(int nodeid, int buflen) { struct processqueue_entry *pentry; pentry = kmalloc(sizeof(*pentry), GFP_NOFS); if (!pentry) return NULL; pentry->buf = kmalloc(buflen, GFP_NOFS); if (!pentry->buf) { kfree(pentry); return NULL; } pentry->nodeid = nodeid; return pentry; } static void free_processqueue_entry(struct processqueue_entry *pentry) { kfree(pentry->buf); kfree(pentry); } static void process_dlm_messages(struct work_struct *work) { struct processqueue_entry *pentry; spin_lock_bh(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (WARN_ON_ONCE(!pentry)) { process_dlm_messages_pending = false; spin_unlock_bh(&processqueue_lock); return; } list_del(&pentry->list); if (atomic_dec_and_test(&processqueue_count)) wake_up(&processqueue_wq); spin_unlock_bh(&processqueue_lock); for (;;) { dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, pentry->buflen); free_processqueue_entry(pentry); spin_lock_bh(&processqueue_lock); pentry = list_first_entry_or_null(&processqueue, struct processqueue_entry, list); if (!pentry) { process_dlm_messages_pending = false; spin_unlock_bh(&processqueue_lock); break; } list_del(&pentry->list); if (atomic_dec_and_test(&processqueue_count)) wake_up(&processqueue_wq); spin_unlock_bh(&processqueue_lock); } } /* Data received from remote end */ static int receive_from_sock(struct connection *con, int buflen) { struct processqueue_entry *pentry; int ret, buflen_real; struct msghdr msg; struct kvec iov; pentry = new_processqueue_entry(con->nodeid, buflen); if (!pentry) return DLM_IO_RESCHED; memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover); /* calculate new buffer parameter regarding last receive and * possible leftover bytes */ iov.iov_base = pentry->buf + con->rx_leftover; iov.iov_len = buflen - con->rx_leftover; memset(&msg, 0, sizeof(msg)); msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; clear_bit(CF_RECV_INTR, &con->flags); again: ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); trace_dlm_recv(con->nodeid, ret); if (ret == -EAGAIN) { lock_sock(con->sock->sk); if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) { release_sock(con->sock->sk); goto again; } clear_bit(CF_RECV_PENDING, &con->flags); release_sock(con->sock->sk); free_processqueue_entry(pentry); return DLM_IO_END; } else if (ret == 0) { /* close will clear CF_RECV_PENDING */ free_processqueue_entry(pentry); return DLM_IO_EOF; } else if (ret < 0) { free_processqueue_entry(pentry); return ret; } /* new buflen according readed bytes and leftover from last receive */ buflen_real = ret + con->rx_leftover; ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf, buflen_real); if (ret < 0) { free_processqueue_entry(pentry); return ret; } pentry->buflen = ret; /* calculate leftover bytes from process and put it into begin of * the receive buffer, so next receive we have the full message * at the start address of the receive buffer. */ con->rx_leftover = buflen_real - ret; memmove(con->rx_leftover_buf, pentry->buf + ret, con->rx_leftover); spin_lock_bh(&processqueue_lock); ret = atomic_inc_return(&processqueue_count); list_add_tail(&pentry->list, &processqueue); if (!process_dlm_messages_pending) { process_dlm_messages_pending = true; queue_work(process_workqueue, &process_work); } spin_unlock_bh(&processqueue_lock); if (ret > DLM_MAX_PROCESS_BUFFERS) return DLM_IO_FLUSH; return DLM_IO_SUCCESS; } /* Listening socket is busy, accept a connection */ static int accept_from_sock(void) { struct sockaddr_storage peeraddr; int len, idx, result, nodeid; struct connection *newcon; struct socket *newsock; unsigned int mark; result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK); if (result == -EAGAIN) return DLM_IO_END; else if (result < 0) goto accept_err; /* Get the connected socket's peer */ memset(&peeraddr, 0, sizeof(peeraddr)); len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); if (len < 0) { result = -ECONNABORTED; goto accept_err; } /* Get the new node's NODEID */ make_sockaddr(&peeraddr, 0, &len); if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) { switch (peeraddr.ss_family) { case AF_INET: { struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr; log_print("connect from non cluster IPv4 node %pI4", &sin->sin_addr); break; } #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr; log_print("connect from non cluster IPv6 node %pI6c", &sin6->sin6_addr); break; } #endif default: log_print("invalid family from non cluster node"); break; } sock_release(newsock); return -1; } log_print("got connection from %d", nodeid); /* Check to see if we already have a connection to this node. This * could happen if the two nodes initiate a connection at roughly * the same time and the connections cross on the wire. * In this case we store the incoming one in "othercon" */ idx = srcu_read_lock(&connections_srcu); newcon = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!newcon)) { srcu_read_unlock(&connections_srcu, idx); result = -ENOENT; goto accept_err; } sock_set_mark(newsock->sk, mark); down_write(&newcon->sock_lock); if (newcon->sock) { struct connection *othercon = newcon->othercon; if (!othercon) { othercon = kzalloc(sizeof(*othercon), GFP_NOFS); if (!othercon) { log_print("failed to allocate incoming socket"); up_write(&newcon->sock_lock); srcu_read_unlock(&connections_srcu, idx); result = -ENOMEM; goto accept_err; } dlm_con_init(othercon, nodeid); lockdep_set_subclass(&othercon->sock_lock, 1); newcon->othercon = othercon; set_bit(CF_IS_OTHERCON, &othercon->flags); } else { /* close other sock con if we have something new */ close_connection(othercon, false); } down_write(&othercon->sock_lock); add_sock(newsock, othercon); /* check if we receved something while adding */ lock_sock(othercon->sock->sk); lowcomms_queue_rwork(othercon); release_sock(othercon->sock->sk); up_write(&othercon->sock_lock); } else { /* accept copies the sk after we've saved the callbacks, so we don't want to save them a second time or comm errors will result in calling sk_error_report recursively. */ add_sock(newsock, newcon); /* check if we receved something while adding */ lock_sock(newcon->sock->sk); lowcomms_queue_rwork(newcon); release_sock(newcon->sock->sk); } up_write(&newcon->sock_lock); srcu_read_unlock(&connections_srcu, idx); return DLM_IO_SUCCESS; accept_err: if (newsock) sock_release(newsock); return result; } /* * writequeue_entry_complete - try to delete and free write queue entry * @e: write queue entry to try to delete * @completed: bytes completed * * writequeue_lock must be held. */ static void writequeue_entry_complete(struct writequeue_entry *e, int completed) { e->offset += completed; e->len -= completed; /* signal that page was half way transmitted */ e->dirty = true; if (e->len == 0 && e->users == 0) free_entry(e); } /* * sctp_bind_addrs - bind a SCTP socket to all our addresses */ static int sctp_bind_addrs(struct socket *sock, __be16 port) { struct sockaddr_storage localaddr; struct sockaddr_unsized *addr = (struct sockaddr_unsized *)&localaddr; int i, addr_len, result = 0; for (i = 0; i < dlm_local_count; i++) { memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr)); make_sockaddr(&localaddr, port, &addr_len); if (!i) result = kernel_bind(sock, addr, addr_len); else result = sock_bind_add(sock->sk, addr, addr_len); if (result < 0) { log_print("Can't bind to %d addr number %d, %d.\n", port, i + 1, result); break; } } return result; } /* Get local addresses */ static void init_local(void) { struct sockaddr_storage sas; int i; dlm_local_count = 0; for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { if (dlm_our_addr(&sas, i)) break; memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas)); } } static struct writequeue_entry *new_writequeue_entry(struct connection *con) { struct writequeue_entry *entry; entry = dlm_allocate_writequeue(); if (!entry) return NULL; entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO); if (!entry->page) { dlm_free_writequeue(entry); return NULL; } entry->offset = 0; entry->len = 0; entry->end = 0; entry->dirty = false; entry->con = con; entry->users = 1; kref_init(&entry->ref); return entry; } static struct writequeue_entry *new_wq_entry(struct connection *con, int len, char **ppc, void (*cb)(void *data), void *data) { struct writequeue_entry *e; spin_lock_bh(&con->writequeue_lock); if (!list_empty(&con->writequeue)) { e = list_last_entry(&con->writequeue, struct writequeue_entry, list); if (DLM_WQ_REMAIN_BYTES(e) >= len) { kref_get(&e->ref); *ppc = page_address(e->page) + e->end; if (cb) cb(data); e->end += len; e->users++; goto out; } } e = new_writequeue_entry(con); if (!e) goto out; kref_get(&e->ref); *ppc = page_address(e->page); e->end += len; if (cb) cb(data); list_add_tail(&e->list, &con->writequeue); out: spin_unlock_bh(&con->writequeue_lock); return e; }; static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, char **ppc, void (*cb)(void *data), void *data) { struct writequeue_entry *e; struct dlm_msg *msg; msg = dlm_allocate_msg(); if (!msg) return NULL; kref_init(&msg->ref); e = new_wq_entry(con, len, ppc, cb, data); if (!e) { dlm_free_msg(msg); return NULL; } msg->retransmit = false; msg->orig_msg = NULL; msg->ppc = *ppc; msg->len = len; msg->entry = e; return msg; } /* avoid false positive for nodes_srcu, unlock happens in * dlm_lowcomms_commit_msg which is a must call if success */ #ifndef __CHECKER__ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc, void (*cb)(void *data), void *data) { struct connection *con; struct dlm_msg *msg; int idx; if (len > DLM_MAX_SOCKET_BUFSIZE || len < sizeof(struct dlm_header)) { BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE); log_print("failed to allocate a buffer of size %d", len); WARN_ON_ONCE(1); return NULL; } idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return NULL; } msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data); if (!msg) { srcu_read_unlock(&connections_srcu, idx); return NULL; } /* for dlm_lowcomms_commit_msg() */ kref_get(&msg->ref); /* we assume if successful commit must called */ msg->idx = idx; return msg; } #endif static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg) { struct writequeue_entry *e = msg->entry; struct connection *con = e->con; int users; spin_lock_bh(&con->writequeue_lock); kref_get(&msg->ref); list_add(&msg->list, &e->msgs); users = --e->users; if (users) goto out; e->len = DLM_WQ_LENGTH_BYTES(e); lowcomms_queue_swork(con); out: spin_unlock_bh(&con->writequeue_lock); return; } /* avoid false positive for nodes_srcu, lock was happen in * dlm_lowcomms_new_msg */ #ifndef __CHECKER__ void dlm_lowcomms_commit_msg(struct dlm_msg *msg) { _dlm_lowcomms_commit_msg(msg); srcu_read_unlock(&connections_srcu, msg->idx); /* because dlm_lowcomms_new_msg() */ kref_put(&msg->ref, dlm_msg_release); } #endif void dlm_lowcomms_put_msg(struct dlm_msg *msg) { kref_put(&msg->ref, dlm_msg_release); } /* does not held connections_srcu, usage lowcomms_error_report only */ int dlm_lowcomms_resend_msg(struct dlm_msg *msg) { struct dlm_msg *msg_resend; char *ppc; if (msg->retransmit) return 1; msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, &ppc, NULL, NULL); if (!msg_resend) return -ENOMEM; msg->retransmit = true; kref_get(&msg->ref); msg_resend->orig_msg = msg; memcpy(ppc, msg->ppc, msg->len); _dlm_lowcomms_commit_msg(msg_resend); dlm_lowcomms_put_msg(msg_resend); return 0; } /* Send a message */ static int send_to_sock(struct connection *con) { struct writequeue_entry *e; struct bio_vec bvec; struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL, }; int len, offset, ret; spin_lock_bh(&con->writequeue_lock); e = con_next_wq(con); if (!e) { clear_bit(CF_SEND_PENDING, &con->flags); spin_unlock_bh(&con->writequeue_lock); return DLM_IO_END; } len = e->len; offset = e->offset; WARN_ON_ONCE(len == 0 && e->users == 0); spin_unlock_bh(&con->writequeue_lock); bvec_set_page(&bvec, e->page, len, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); ret = sock_sendmsg(con->sock, &msg); trace_dlm_send(con->nodeid, ret); if (ret == -EAGAIN || ret == 0) { lock_sock(con->sock->sk); spin_lock_bh(&con->writequeue_lock); if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { /* Notify TCP that we're limited by the * application window size. */ set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags); con->sock->sk->sk_write_pending++; clear_bit(CF_SEND_PENDING, &con->flags); spin_unlock_bh(&con->writequeue_lock); release_sock(con->sock->sk); /* wait for write_space() event */ return DLM_IO_END; } spin_unlock_bh(&con->writequeue_lock); release_sock(con->sock->sk); return DLM_IO_RESCHED; } else if (ret < 0) { return ret; } spin_lock_bh(&con->writequeue_lock); writequeue_entry_complete(e, ret); spin_unlock_bh(&con->writequeue_lock); return DLM_IO_SUCCESS; } static void clean_one_writequeue(struct connection *con) { struct writequeue_entry *e, *safe; spin_lock_bh(&con->writequeue_lock); list_for_each_entry_safe(e, safe, &con->writequeue, list) { free_entry(e); } spin_unlock_bh(&con->writequeue_lock); } static void connection_release(struct rcu_head *rcu) { struct connection *con = container_of(rcu, struct connection, rcu); WARN_ON_ONCE(!list_empty(&con->writequeue)); WARN_ON_ONCE(con->sock); kfree(con); } /* Called from recovery when it knows that a node has left the cluster */ int dlm_lowcomms_close(int nodeid) { struct connection *con; int idx; log_print("closing connection to node %d", nodeid); idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, 0); if (WARN_ON_ONCE(!con)) { srcu_read_unlock(&connections_srcu, idx); return -ENOENT; } stop_connection_io(con); log_print("io handling for node: %d stopped", nodeid); close_connection(con, true); spin_lock(&connections_lock); hlist_del_rcu(&con->list); spin_unlock(&connections_lock); clean_one_writequeue(con); call_srcu(&connections_srcu, &con->rcu, connection_release); if (con->othercon) { clean_one_writequeue(con->othercon); call_srcu(&connections_srcu, &con->othercon->rcu, connection_release); } srcu_read_unlock(&connections_srcu, idx); /* for debugging we print when we are done to compare with other * messages in between. This function need to be correctly synchronized * with io handling */ log_print("closing connection to node %d done", nodeid); return 0; } /* Receive worker function */ static void process_recv_sockets(struct work_struct *work) { struct connection *con = container_of(work, struct connection, rwork); int ret, buflen; down_read(&con->sock_lock); if (!con->sock) { up_read(&con->sock_lock); return; } buflen = READ_ONCE(dlm_config.ci_buffer_size); do { ret = receive_from_sock(con, buflen); } while (ret == DLM_IO_SUCCESS); up_read(&con->sock_lock); switch (ret) { case DLM_IO_END: /* CF_RECV_PENDING cleared */ break; case DLM_IO_EOF: close_connection(con, false); wake_up(&con->shutdown_wait); /* CF_RECV_PENDING cleared */ break; case DLM_IO_FLUSH: /* we can't flush the process_workqueue here because a * WQ_MEM_RECLAIM workequeue can occurr a deadlock for a non * WQ_MEM_RECLAIM workqueue such as process_workqueue. Instead * we have a waitqueue to wait until all messages are * processed. * * This handling is only necessary to backoff the sender and * not queue all messages from the socket layer into DLM * processqueue. When DLM is capable to parse multiple messages * on an e.g. per socket basis this handling can might be * removed. Especially in a message burst we are too slow to * process messages and the queue will fill up memory. */ wait_event(processqueue_wq, !atomic_read(&processqueue_count)); fallthrough; case DLM_IO_RESCHED: cond_resched(); queue_work(io_workqueue, &con->rwork); /* CF_RECV_PENDING not cleared */ break; default: if (ret < 0) { if (test_bit(CF_IS_OTHERCON, &con->flags)) { close_connection(con, false); } else { spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); } /* CF_RECV_PENDING cleared for othercon * we trigger send queue if not already done * and process_send_sockets will handle it */ break; } WARN_ON_ONCE(1); break; } } static void process_listen_recv_socket(struct work_struct *work) { int ret; if (WARN_ON_ONCE(!listen_con.sock)) return; do { ret = accept_from_sock(); } while (ret == DLM_IO_SUCCESS); if (ret < 0) log_print("critical error accepting connection: %d", ret); } static int dlm_connect(struct connection *con) { struct sockaddr_storage addr; int result, addr_len; struct socket *sock; unsigned int mark; memset(&addr, 0, sizeof(addr)); result = nodeid_to_addr(con->nodeid, &addr, NULL, dlm_proto_ops->try_new_addr, &mark); if (result < 0) { log_print("no address for nodeid %d", con->nodeid); return result; } /* Create a socket to communicate with */ result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, SOCK_STREAM, dlm_proto_ops->proto, &sock); if (result < 0) return result; sock_set_mark(sock->sk, mark); dlm_proto_ops->sockopts(sock); result = dlm_proto_ops->bind(sock); if (result < 0) { sock_release(sock); return result; } add_sock(sock, con); log_print_ratelimited("connecting to %d", con->nodeid); make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len); result = kernel_connect(sock, (struct sockaddr_unsized *)&addr, addr_len, 0); switch (result) { case -EINPROGRESS: /* not an error */ fallthrough; case 0: break; default: if (result < 0) dlm_close_sock(&con->sock); break; } return result; } /* Send worker function */ static void process_send_sockets(struct work_struct *work) { struct connection *con = container_of(work, struct connection, swork); int ret; WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags)); down_read(&con->sock_lock); if (!con->sock) { up_read(&con->sock_lock); down_write(&con->sock_lock); if (!con->sock) { ret = dlm_connect(con); switch (ret) { case 0: break; default: /* CF_SEND_PENDING not cleared */ up_write(&con->sock_lock); log_print("connect to node %d try %d error %d", con->nodeid, con->retries++, ret); msleep(1000); /* For now we try forever to reconnect. In * future we should send a event to cluster * manager to fence itself after certain amount * of retries. */ queue_work(io_workqueue, &con->swork); return; } } downgrade_write(&con->sock_lock); } do { ret = send_to_sock(con); } while (ret == DLM_IO_SUCCESS); up_read(&con->sock_lock); switch (ret) { case DLM_IO_END: /* CF_SEND_PENDING cleared */ break; case DLM_IO_RESCHED: /* CF_SEND_PENDING not cleared */ cond_resched(); queue_work(io_workqueue, &con->swork); break; default: if (ret < 0) { close_connection(con, false); /* CF_SEND_PENDING cleared */ spin_lock_bh(&con->writequeue_lock); lowcomms_queue_swork(con); spin_unlock_bh(&con->writequeue_lock); break; } WARN_ON_ONCE(1); break; } } static void work_stop(void) { if (io_workqueue) { destroy_workqueue(io_workqueue); io_workqueue = NULL; } if (process_workqueue) { destroy_workqueue(process_workqueue); process_workqueue = NULL; } } static int work_start(void) { io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!io_workqueue) { log_print("can't start dlm_io"); return -ENOMEM; } process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH | WQ_PERCPU, 0); if (!process_workqueue) { log_print("can't start dlm_process"); destroy_workqueue(io_workqueue); io_workqueue = NULL; return -ENOMEM; } return 0; } void dlm_lowcomms_shutdown(void) { struct connection *con; int i, idx; /* stop lowcomms_listen_data_ready calls */ lock_sock(listen_con.sock->sk); listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready; release_sock(listen_con.sock->sk); cancel_work_sync(&listen_con.rwork); dlm_close_sock(&listen_con.sock); idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { shutdown_connection(con, true); stop_connection_io(con); flush_workqueue(process_workqueue); close_connection(con, true); clean_one_writequeue(con); if (con->othercon) clean_one_writequeue(con->othercon); allow_connection_io(con); } } srcu_read_unlock(&connections_srcu, idx); } void dlm_lowcomms_stop(void) { work_stop(); dlm_proto_ops = NULL; } static int dlm_listen_for_all(void) { struct socket *sock; int result; log_print("Using %s for communications", dlm_proto_ops->name); result = dlm_proto_ops->listen_validate(); if (result < 0) return result; result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, SOCK_STREAM, dlm_proto_ops->proto, &sock); if (result < 0) { log_print("Can't create comms socket: %d", result); return result; } sock_set_mark(sock->sk, dlm_config.ci_mark); dlm_proto_ops->listen_sockopts(sock); result = dlm_proto_ops->listen_bind(sock); if (result < 0) goto out; lock_sock(sock->sk); listen_sock.sk_data_ready = sock->sk->sk_data_ready; listen_sock.sk_write_space = sock->sk->sk_write_space; listen_sock.sk_error_report = sock->sk->sk_error_report; listen_sock.sk_state_change = sock->sk->sk_state_change; listen_con.sock = sock; sock->sk->sk_allocation = GFP_NOFS; sock->sk->sk_use_task_frag = false; sock->sk->sk_data_ready = lowcomms_listen_data_ready; release_sock(sock->sk); result = sock->ops->listen(sock, 128); if (result < 0) { dlm_close_sock(&listen_con.sock); return result; } return 0; out: sock_release(sock); return result; } static int dlm_tcp_bind(struct socket *sock) { struct sockaddr_storage src_addr; int result, addr_len; /* Bind to our cluster-known address connecting to avoid * routing problems. */ memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr)); make_sockaddr(&src_addr, 0, &addr_len); result = kernel_bind(sock, (struct sockaddr_unsized *)&src_addr, addr_len); if (result < 0) { /* This *may* not indicate a critical error */ log_print("could not bind for connect: %d", result); } return 0; } static int dlm_tcp_listen_validate(void) { /* We don't support multi-homed hosts */ if (dlm_local_count > 1) { log_print("Detect multi-homed hosts but use only the first IP address."); log_print("Try SCTP, if you want to enable multi-link."); } return 0; } static void dlm_tcp_sockopts(struct socket *sock) { /* Turn off Nagle's algorithm */ tcp_sock_set_nodelay(sock->sk); } static void dlm_tcp_listen_sockopts(struct socket *sock) { dlm_tcp_sockopts(sock); sock_set_reuseaddr(sock->sk); } static int dlm_tcp_listen_bind(struct socket *sock) { int addr_len; /* Bind to our port */ make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len); return kernel_bind(sock, (struct sockaddr_unsized *)&dlm_local_addr[0], addr_len); } static const struct dlm_proto_ops dlm_tcp_ops = { .name = "TCP", .proto = IPPROTO_TCP, .how = SHUT_WR, .sockopts = dlm_tcp_sockopts, .bind = dlm_tcp_bind, .listen_validate = dlm_tcp_listen_validate, .listen_sockopts = dlm_tcp_listen_sockopts, .listen_bind = dlm_tcp_listen_bind, }; static int dlm_sctp_bind(struct socket *sock) { return sctp_bind_addrs(sock, 0); } static int dlm_sctp_listen_validate(void) { if (!IS_ENABLED(CONFIG_IP_SCTP)) { log_print("SCTP is not enabled by this kernel"); return -EOPNOTSUPP; } request_module("sctp"); return 0; } static int dlm_sctp_bind_listen(struct socket *sock) { return sctp_bind_addrs(sock, dlm_config.ci_tcp_port); } static void dlm_sctp_sockopts(struct socket *sock) { /* Turn off Nagle's algorithm */ sctp_sock_set_nodelay(sock->sk); sock_set_rcvbuf(sock->sk, NEEDED_RMEM); } static const struct dlm_proto_ops dlm_sctp_ops = { .name = "SCTP", .proto = IPPROTO_SCTP, .how = SHUT_RDWR, .try_new_addr = true, .sockopts = dlm_sctp_sockopts, .bind = dlm_sctp_bind, .listen_validate = dlm_sctp_listen_validate, .listen_sockopts = dlm_sctp_sockopts, .listen_bind = dlm_sctp_bind_listen, }; int dlm_lowcomms_start(void) { int error; init_local(); if (!dlm_local_count) { error = -ENOTCONN; log_print("no local IP address has been set"); goto fail; } error = work_start(); if (error) goto fail; /* Start listening */ switch (dlm_config.ci_protocol) { case DLM_PROTO_TCP: dlm_proto_ops = &dlm_tcp_ops; break; case DLM_PROTO_SCTP: dlm_proto_ops = &dlm_sctp_ops; break; default: log_print("Invalid protocol identifier %d set", dlm_config.ci_protocol); error = -EINVAL; goto fail_proto_ops; } error = dlm_listen_for_all(); if (error) goto fail_listen; return 0; fail_listen: dlm_proto_ops = NULL; fail_proto_ops: work_stop(); fail: return error; } void dlm_lowcomms_init(void) { int i; for (i = 0; i < CONN_HASH_SIZE; i++) INIT_HLIST_HEAD(&connection_hash[i]); INIT_WORK(&listen_con.rwork, process_listen_recv_socket); } void dlm_lowcomms_exit(void) { struct connection *con; int i, idx; idx = srcu_read_lock(&connections_srcu); for (i = 0; i < CONN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(con, &connection_hash[i], list) { spin_lock(&connections_lock); hlist_del_rcu(&con->list); spin_unlock(&connections_lock); if (con->othercon) call_srcu(&connections_srcu, &con->othercon->rcu, connection_release); call_srcu(&connections_srcu, &con->rcu, connection_release); } } srcu_read_unlock(&connections_srcu, idx); }
6 1 3 1 2 2 1 2 1 1 4 3 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 // SPDX-License-Identifier: GPL-2.0-only /* * 32bit Socket syscall emulation. Based on arch/sparc64/kernel/sys_sparc32.c. * * Copyright (C) 2000 VA Linux Co * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 2000 Hewlett-Packard Co. * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> * Copyright (C) 2000,2001 Andi Kleen, SuSE Labs */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/file.h> #include <linux/icmpv6.h> #include <linux/socket.h> #include <linux/syscalls.h> #include <linux/filter.h> #include <linux/compat.h> #include <linux/security.h> #include <linux/audit.h> #include <linux/export.h> #include <net/scm.h> #include <net/sock.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/uaccess.h> #include <net/compat.h> int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr *msg, struct sockaddr __user **save_addr) { ssize_t err; kmsg->msg_flags = msg->msg_flags; kmsg->msg_namelen = msg->msg_namelen; if (!msg->msg_name) kmsg->msg_namelen = 0; if (kmsg->msg_namelen < 0) return -EINVAL; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); kmsg->msg_control_is_user = true; kmsg->msg_get_inq = 0; kmsg->msg_control_user = compat_ptr(msg->msg_control); kmsg->msg_controllen = msg->msg_controllen; if (save_addr) *save_addr = compat_ptr(msg->msg_name); if (msg->msg_name && kmsg->msg_namelen) { if (!save_addr) { err = move_addr_to_kernel(compat_ptr(msg->msg_name), kmsg->msg_namelen, kmsg->msg_name); if (err < 0) return err; } } else { kmsg->msg_name = NULL; kmsg->msg_namelen = 0; } if (msg->msg_iovlen > UIO_MAXIOV) return -EMSGSIZE; kmsg->msg_iocb = NULL; kmsg->msg_ubuf = NULL; return 0; } int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, struct sockaddr __user **save_addr, struct iovec **iov) { struct compat_msghdr msg; ssize_t err; if (copy_from_user(&msg, umsg, sizeof(*umsg))) return -EFAULT; err = __get_compat_msghdr(kmsg, &msg, save_addr); if (err) return err; err = import_iovec(save_addr ? ITER_DEST : ITER_SOURCE, compat_ptr(msg.msg_iov), msg.msg_iovlen, UIO_FASTIOV, iov, &kmsg->msg_iter); return err < 0 ? err : 0; } /* Bleech... */ #define CMSG_COMPAT_ALIGN(len) ALIGN((len), sizeof(s32)) #define CMSG_COMPAT_DATA(cmsg) \ ((void __user *)((char __user *)(cmsg) + sizeof(struct compat_cmsghdr))) #define CMSG_COMPAT_SPACE(len) \ (sizeof(struct compat_cmsghdr) + CMSG_COMPAT_ALIGN(len)) #define CMSG_COMPAT_LEN(len) \ (sizeof(struct compat_cmsghdr) + (len)) #define CMSG_COMPAT_FIRSTHDR(msg) \ (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \ (struct compat_cmsghdr __user *)((msg)->msg_control_user) : \ (struct compat_cmsghdr __user *)NULL) #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \ ((ucmlen) >= sizeof(struct compat_cmsghdr) && \ (ucmlen) <= (unsigned long) \ ((mhdr)->msg_controllen - \ ((char __user *)(ucmsg) - (char __user *)(mhdr)->msg_control_user))) static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg, struct compat_cmsghdr __user *cmsg, int cmsg_len) { char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len); if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control_user) > msg->msg_controllen) return NULL; return (struct compat_cmsghdr __user *)ptr; } /* There is a lot of hair here because the alignment rules (and * thus placement) of cmsg headers and length are different for * 32-bit apps. -DaveM */ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk, unsigned char *stackbuf, int stackbuf_size) { struct compat_cmsghdr __user *ucmsg; struct cmsghdr *kcmsg, *kcmsg_base; compat_size_t ucmlen; __kernel_size_t kcmlen, tmp; int err = -EFAULT; BUILD_BUG_ON(sizeof(struct compat_cmsghdr) != CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr))); kcmlen = 0; kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf; ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); while (ucmsg != NULL) { if (get_user(ucmlen, &ucmsg->cmsg_len)) return -EFAULT; /* Catch bogons. */ if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg)) return -EINVAL; tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr)); tmp = CMSG_ALIGN(tmp); kcmlen += tmp; ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen); } if (kcmlen == 0) return -EINVAL; /* The kcmlen holds the 64-bit version of the control length. * It may not be modified as we do not stick it into the kmsg * until we have successfully copied over all of the data * from the user. */ if (kcmlen > stackbuf_size) kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL); if (kcmsg == NULL) return -ENOMEM; /* Now copy them over neatly. */ memset(kcmsg, 0, kcmlen); ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg); while (ucmsg != NULL) { struct compat_cmsghdr cmsg; if (copy_from_user(&cmsg, ucmsg, sizeof(cmsg))) goto Efault; if (!CMSG_COMPAT_OK(cmsg.cmsg_len, ucmsg, kmsg)) goto Einval; tmp = ((cmsg.cmsg_len - sizeof(*ucmsg)) + sizeof(struct cmsghdr)); if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp)) goto Einval; kcmsg->cmsg_len = tmp; kcmsg->cmsg_level = cmsg.cmsg_level; kcmsg->cmsg_type = cmsg.cmsg_type; tmp = CMSG_ALIGN(tmp); if (copy_from_user(CMSG_DATA(kcmsg), CMSG_COMPAT_DATA(ucmsg), (cmsg.cmsg_len - sizeof(*ucmsg)))) goto Efault; /* Advance. */ kcmsg = (struct cmsghdr *)((char *)kcmsg + tmp); ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, cmsg.cmsg_len); } /* * check the length of messages copied in is the same as the * what we get from the first loop */ if ((char *)kcmsg - (char *)kcmsg_base != kcmlen) goto Einval; /* Ok, looks like we made it. Hook it up and return success. */ kmsg->msg_control_is_user = false; kmsg->msg_control = kcmsg_base; kmsg->msg_controllen = kcmlen; return 0; Einval: err = -EINVAL; Efault: if (kcmsg_base != (struct cmsghdr *)stackbuf) sock_kfree_s(sk, kcmsg_base, kcmlen); return err; } int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data) { struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control_user; struct compat_cmsghdr cmhdr; struct old_timeval32 ctv; struct old_timespec32 cts[3]; int cmlen; if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) { kmsg->msg_flags |= MSG_CTRUNC; return 0; /* XXX: return error? check spec. */ } if (!COMPAT_USE_64BIT_TIME) { if (level == SOL_SOCKET && type == SO_TIMESTAMP_OLD) { struct __kernel_old_timeval *tv = (struct __kernel_old_timeval *)data; ctv.tv_sec = tv->tv_sec; ctv.tv_usec = tv->tv_usec; data = &ctv; len = sizeof(ctv); } if (level == SOL_SOCKET && (type == SO_TIMESTAMPNS_OLD || type == SO_TIMESTAMPING_OLD)) { int count = type == SO_TIMESTAMPNS_OLD ? 1 : 3; int i; struct __kernel_old_timespec *ts = data; for (i = 0; i < count; i++) { cts[i].tv_sec = ts[i].tv_sec; cts[i].tv_nsec = ts[i].tv_nsec; } data = &cts; len = sizeof(cts[0]) * count; } } cmlen = CMSG_COMPAT_LEN(len); if (kmsg->msg_controllen < cmlen) { kmsg->msg_flags |= MSG_CTRUNC; cmlen = kmsg->msg_controllen; } cmhdr.cmsg_level = level; cmhdr.cmsg_type = type; cmhdr.cmsg_len = cmlen; if (copy_to_user(cm, &cmhdr, sizeof cmhdr)) return -EFAULT; if (copy_to_user(CMSG_COMPAT_DATA(cm), data, cmlen - sizeof(struct compat_cmsghdr))) return -EFAULT; cmlen = CMSG_COMPAT_SPACE(len); if (kmsg->msg_controllen < cmlen) cmlen = kmsg->msg_controllen; kmsg->msg_control_user += cmlen; kmsg->msg_controllen -= cmlen; return 0; } static int scm_max_fds_compat(struct msghdr *msg) { if (msg->msg_controllen <= sizeof(struct compat_cmsghdr)) return 0; return (msg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int); } void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm) { struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *)msg->msg_control_user; unsigned int o_flags = (msg->msg_flags & MSG_CMSG_CLOEXEC) ? O_CLOEXEC : 0; int fdmax = min_t(int, scm_max_fds_compat(msg), scm->fp->count); int __user *cmsg_data = CMSG_COMPAT_DATA(cm); int err = 0, i; for (i = 0; i < fdmax; i++) { err = scm_recv_one_fd(scm->fp->fp[i], cmsg_data + i, o_flags); if (err < 0) break; } if (i > 0) { int cmlen = CMSG_COMPAT_LEN(i * sizeof(int)); err = put_user(SOL_SOCKET, &cm->cmsg_level); if (!err) err = put_user(SCM_RIGHTS, &cm->cmsg_type); if (!err) err = put_user(cmlen, &cm->cmsg_len); if (!err) { cmlen = CMSG_COMPAT_SPACE(i * sizeof(int)); if (msg->msg_controllen < cmlen) cmlen = msg->msg_controllen; msg->msg_control_user += cmlen; msg->msg_controllen -= cmlen; } } if (i < scm->fp->count || (scm->fp->count && fdmax <= 0)) msg->msg_flags |= MSG_CTRUNC; /* * All of the files that fit in the message have had their usage counts * incremented, so we just free the list. */ __scm_destroy(scm); } /* Argument list sizes for compat_sys_socketcall */ #define AL(x) ((x) * sizeof(u32)) static unsigned char nas[21] = { AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), AL(4), AL(5), AL(4) }; #undef AL static inline long __compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT, false); } COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) { return __compat_sys_sendmsg(fd, msg, flags); } static inline long __compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags) { return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, false); } COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { return __compat_sys_sendmmsg(fd, mmsg, vlen, flags); } static inline long __compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT, false); } COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) { return __compat_sys_recvmsg(fd, msg, flags); } static inline long __compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, unsigned int flags, struct sockaddr __user *addr, int __user *addrlen) { return __sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); } COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags) { return __compat_sys_recvfrom(fd, buf, len, flags, NULL, NULL); } COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int __user *, addrlen) { return __compat_sys_recvfrom(fd, buf, len, flags, addr, addrlen); } COMPAT_SYSCALL_DEFINE5(recvmmsg_time64, int, fd, struct compat_mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct __kernel_timespec __user *, timeout) { return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, timeout, NULL); } #ifdef CONFIG_COMPAT_32BIT_TIME COMPAT_SYSCALL_DEFINE5(recvmmsg_time32, int, fd, struct compat_mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags, struct old_timespec32 __user *, timeout) { return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL, timeout); } #endif COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args) { u32 a[AUDITSC_ARGS]; unsigned int len; u32 a0, a1; int ret; if (call < SYS_SOCKET || call > SYS_SENDMMSG) return -EINVAL; len = nas[call]; if (len > sizeof(a)) return -EINVAL; if (copy_from_user(a, args, len)) return -EFAULT; ret = audit_socketcall_compat(len / sizeof(a[0]), a); if (ret) return ret; a0 = a[0]; a1 = a[1]; switch (call) { case SYS_SOCKET: ret = __sys_socket(a0, a1, a[2]); break; case SYS_BIND: ret = __sys_bind(a0, compat_ptr(a1), a[2]); break; case SYS_CONNECT: ret = __sys_connect(a0, compat_ptr(a1), a[2]); break; case SYS_LISTEN: ret = __sys_listen(a0, a1); break; case SYS_ACCEPT: ret = __sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), 0); break; case SYS_GETSOCKNAME: ret = __sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]), 0); break; case SYS_GETPEERNAME: ret = __sys_getsockname(a0, compat_ptr(a1), compat_ptr(a[2]), 1); break; case SYS_SOCKETPAIR: ret = __sys_socketpair(a0, a1, a[2], compat_ptr(a[3])); break; case SYS_SEND: ret = __sys_sendto(a0, compat_ptr(a1), a[2], a[3], NULL, 0); break; case SYS_SENDTO: ret = __sys_sendto(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), a[5]); break; case SYS_RECV: ret = __compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3], NULL, NULL); break; case SYS_RECVFROM: ret = __compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), compat_ptr(a[5])); break; case SYS_SHUTDOWN: ret = __sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: ret = __sys_setsockopt(a0, a1, a[2], compat_ptr(a[3]), a[4]); break; case SYS_GETSOCKOPT: ret = __sys_getsockopt(a0, a1, a[2], compat_ptr(a[3]), compat_ptr(a[4])); break; case SYS_SENDMSG: ret = __compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); break; case SYS_SENDMMSG: ret = __compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]); break; case SYS_RECVMSG: ret = __compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); break; case SYS_RECVMMSG: ret = __sys_recvmmsg(a0, compat_ptr(a1), a[2], a[3] | MSG_CMSG_COMPAT, NULL, compat_ptr(a[4])); break; case SYS_ACCEPT4: ret = __sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]); break; default: ret = -EINVAL; break; } return ret; }
20 19 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 // SPDX-License-Identifier: GPL-2.0-only /* * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias. * * Copyright (C) 2015, Intel Corp. * Author: Mika Westerberg <mika.westerberg@linux.intel.com> * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/export.h> #include <linux/nls.h> #include "internal.h" static ssize_t acpi_object_path(acpi_handle handle, char *buf) { struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL}; int result; result = acpi_get_name(handle, ACPI_FULL_PATHNAME, &path); if (result) return result; result = sprintf(buf, "%s\n", (char *)path.pointer); kfree(path.pointer); return result; } struct acpi_data_node_attr { struct attribute attr; ssize_t (*show)(struct acpi_data_node *, char *); ssize_t (*store)(struct acpi_data_node *, const char *, size_t count); }; #define DATA_NODE_ATTR(_name) \ static struct acpi_data_node_attr data_node_##_name = \ __ATTR(_name, 0444, data_node_show_##_name, NULL) static ssize_t data_node_show_path(struct acpi_data_node *dn, char *buf) { return dn->handle ? acpi_object_path(dn->handle, buf) : 0; } DATA_NODE_ATTR(path); static struct attribute *acpi_data_node_default_attrs[] = { &data_node_path.attr, NULL }; ATTRIBUTE_GROUPS(acpi_data_node_default); #define to_data_node(k) container_of(k, struct acpi_data_node, kobj) #define to_attr(a) container_of(a, struct acpi_data_node_attr, attr) static ssize_t acpi_data_node_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct acpi_data_node *dn = to_data_node(kobj); struct acpi_data_node_attr *dn_attr = to_attr(attr); return dn_attr->show ? dn_attr->show(dn, buf) : -ENXIO; } static const struct sysfs_ops acpi_data_node_sysfs_ops = { .show = acpi_data_node_attr_show, }; static void acpi_data_node_release(struct kobject *kobj) { struct acpi_data_node *dn = to_data_node(kobj); complete(&dn->kobj_done); } static const struct kobj_type acpi_data_node_ktype = { .sysfs_ops = &acpi_data_node_sysfs_ops, .default_groups = acpi_data_node_default_groups, .release = acpi_data_node_release, }; static void acpi_expose_nondev_subnodes(struct kobject *kobj, struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry(dn, list, sibling) { int ret; init_completion(&dn->kobj_done); ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype, kobj, "%s", dn->name); if (!ret) acpi_expose_nondev_subnodes(&dn->kobj, &dn->data); else if (dn->handle) acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret); } } static void acpi_hide_nondev_subnodes(struct acpi_device_data *data) { struct list_head *list = &data->subnodes; struct acpi_data_node *dn; if (list_empty(list)) return; list_for_each_entry_reverse(dn, list, sibling) { acpi_hide_nondev_subnodes(&dn->data); kobject_put(&dn->kobj); } } /** * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Creates hid/cid(s) string needed for modalias and uevent * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get: * char *modalias: "acpi:IBM0001:ACPI0001" * Return: 0: no _HID and no _CID * -EINVAL: output error * -ENOMEM: output is truncated */ static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { int len; int count; struct acpi_hardware_id *id; /* Avoid unnecessarily loading modules for non present devices. */ if (!acpi_device_is_present(acpi_dev)) return 0; /* * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the * device's list. */ count = 0; list_for_each_entry(id, &acpi_dev->pnp.ids, list) if (strcmp(id->id, ACPI_DT_NAMESPACE_HID)) count++; if (!count) return 0; len = snprintf(modalias, size, "acpi:"); if (len >= size) return -ENOMEM; size -= len; list_for_each_entry(id, &acpi_dev->pnp.ids, list) { if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID)) continue; count = snprintf(&modalias[len], size, "%s:", id->id); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } /** * create_of_modalias - Creates DT compatible string for modalias and uevent * @acpi_dev: ACPI device object. * @modalias: Buffer to print into. * @size: Size of the buffer. * * Expose DT compatible modalias as of:NnameTCcompatible. This function should * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of * ACPI/PNP IDs. */ static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias, int size) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; const union acpi_object *of_compatible, *obj; acpi_status status; int len, count; int i, nval; char *c; status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf); if (ACPI_FAILURE(status)) return -ENODEV; /* DT strings are all in lower case */ for (c = buf.pointer; *c != '\0'; c++) *c = tolower(*c); len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer); ACPI_FREE(buf.pointer); if (len >= size) return -ENOMEM; size -= len; of_compatible = acpi_dev->data.of_compatible; if (of_compatible->type == ACPI_TYPE_PACKAGE) { nval = of_compatible->package.count; obj = of_compatible->package.elements; } else { /* Must be ACPI_TYPE_STRING. */ nval = 1; obj = of_compatible; } for (i = 0; i < nval; i++, obj++) { count = snprintf(&modalias[len], size, "C%s", obj->string.pointer); if (count >= size) return -ENOMEM; len += count; size -= count; } return len; } int __acpi_device_uevent_modalias(const struct acpi_device *adev, struct kobj_uevent_env *env) { int len; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; if (adev->data.of_compatible) len = create_of_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); else len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], sizeof(env->buf) - env->buflen); if (len < 0) return len; env->buflen += len; return 0; } /** * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @env: Environment variables of the kobject uevent. * * Create the uevent modalias field for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env) { return __acpi_device_uevent_modalias(acpi_companion_match(dev), env); } EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias); static int __acpi_device_modalias(const struct acpi_device *adev, char *buf, int size) { int len, count; if (!adev) return -ENODEV; if (list_empty(&adev->pnp.ids)) return 0; len = create_pnp_modalias(adev, buf, size - 1); if (len < 0) { return len; } else if (len > 0) { buf[len++] = '\n'; size -= len; } if (!adev->data.of_compatible) return len; count = create_of_modalias(adev, buf + len, size - 1); if (count < 0) { return count; } else if (count > 0) { len += count; buf[len++] = '\n'; } return len; } /** * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices. * @dev: Struct device to get ACPI device node. * @buf: The buffer to save pnp_modalias and of_modalias. * @size: Size of buffer. * * Create the modalias sysfs attribute for ACPI-enumerated devices. * * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001". */ int acpi_device_modalias(struct device *dev, char *buf, int size) { return __acpi_device_modalias(acpi_companion_match(dev), buf, size); } EXPORT_SYMBOL_GPL(acpi_device_modalias); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { return __acpi_device_modalias(to_acpi_device(dev), buf, 1024); } static DEVICE_ATTR_RO(modalias); static ssize_t real_power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); int state; int ret; ret = acpi_device_get_power(adev, &state); if (ret) return ret; return sprintf(buf, "%s\n", acpi_power_state_string(state)); } static DEVICE_ATTR_RO(real_power_state); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *adev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state)); } static DEVICE_ATTR_RO(power_state); static ssize_t eject_store(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct acpi_device *acpi_device = to_acpi_device(d); acpi_object_type not_used; acpi_status status; if (!count || buf[0] != '1') return -EINVAL; if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled) && !d->driver) return -ENODEV; status = acpi_get_type(acpi_device->handle, &not_used); if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable) return -ENODEV; acpi_dev_get(acpi_device); status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT); if (ACPI_SUCCESS(status)) return count; acpi_dev_put(acpi_device); acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT, ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL); return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN; } static DEVICE_ATTR_WO(eject); static ssize_t hid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev)); } static DEVICE_ATTR_RO(hid); static ssize_t uid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return sprintf(buf, "%s\n", acpi_device_uid(acpi_dev)); } static DEVICE_ATTR_RO(uid); static ssize_t adr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); if (acpi_dev->pnp.bus_address > U32_MAX) return sprintf(buf, "0x%016llx\n", acpi_dev->pnp.bus_address); else return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address); } static DEVICE_ATTR_RO(adr); static ssize_t path_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); return acpi_object_path(acpi_dev->handle, buf); } static DEVICE_ATTR_RO(path); /* sysfs file that shows description text from the ACPI _STR method */ static ssize_t description_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *str_obj; acpi_status status; int result; status = acpi_evaluate_object_typed(acpi_dev->handle, "_STR", NULL, &buffer, ACPI_TYPE_BUFFER); if (ACPI_FAILURE(status)) return -EIO; str_obj = buffer.pointer; /* * The _STR object contains a Unicode identifier for a device. * We need to convert to utf-8 so it can be displayed. */ result = utf16s_to_utf8s( (wchar_t *)str_obj->buffer.pointer, str_obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[result++] = '\n'; ACPI_FREE(str_obj); return result; } static DEVICE_ATTR_RO(description); static ssize_t sun_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sun; status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sun); } static DEVICE_ATTR_RO(sun); static ssize_t hrv_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long hrv; status = acpi_evaluate_integer(acpi_dev->handle, "_HRV", NULL, &hrv); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", hrv); } static DEVICE_ATTR_RO(hrv); static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); acpi_status status; unsigned long long sta; status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status)) return -EIO; return sprintf(buf, "%llu\n", sta); } static DEVICE_ATTR_RO(status); static struct attribute *acpi_attrs[] = { &dev_attr_path.attr, &dev_attr_hid.attr, &dev_attr_modalias.attr, &dev_attr_description.attr, &dev_attr_adr.attr, &dev_attr_uid.attr, &dev_attr_sun.attr, &dev_attr_hrv.attr, &dev_attr_status.attr, &dev_attr_eject.attr, &dev_attr_power_state.attr, &dev_attr_real_power_state.attr, NULL }; static bool acpi_show_attr(struct acpi_device *dev, const struct device_attribute *attr) { /* * Devices gotten from FADT don't have a "path" attribute */ if (attr == &dev_attr_path) return dev->handle; if (attr == &dev_attr_hid || attr == &dev_attr_modalias) return !list_empty(&dev->pnp.ids); if (attr == &dev_attr_description) return acpi_has_method(dev->handle, "_STR"); if (attr == &dev_attr_adr) return dev->pnp.type.bus_address; if (attr == &dev_attr_uid) return acpi_device_uid(dev); if (attr == &dev_attr_sun) return acpi_has_method(dev->handle, "_SUN"); if (attr == &dev_attr_hrv) return acpi_has_method(dev->handle, "_HRV"); if (attr == &dev_attr_status) return acpi_has_method(dev->handle, "_STA"); /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. */ if (attr == &dev_attr_eject) return acpi_has_method(dev->handle, "_EJ0"); if (attr == &dev_attr_power_state) return dev->flags.power_manageable; if (attr == &dev_attr_real_power_state) return dev->flags.power_manageable && dev->power.flags.power_resources; dev_warn_once(&dev->dev, "Unexpected attribute: %s\n", attr->attr.name); return false; } static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { struct acpi_device *dev = to_acpi_device(kobj_to_dev(kobj)); if (acpi_show_attr(dev, container_of(attr, struct device_attribute, attr))) return attr->mode; else return 0; } static const struct attribute_group acpi_group = { .attrs = acpi_attrs, .is_visible = acpi_attr_is_visible, }; const struct attribute_group *acpi_groups[] = { &acpi_group, NULL }; /** * acpi_device_setup_files - Create sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_setup_files(struct acpi_device *dev) { acpi_expose_nondev_subnodes(&dev->dev.kobj, &dev->data); } /** * acpi_device_remove_files - Remove sysfs attributes of an ACPI device. * @dev: ACPI device object. */ void acpi_device_remove_files(struct acpi_device *dev) { acpi_hide_nondev_subnodes(&dev->data); }
7 111 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 // SPDX-License-Identifier: GPL-2.0-only /* * Unified UUID/GUID definition * * Copyright (C) 2009, 2016 Intel Corp. * Huang Ying <ying.huang@intel.com> */ #include <linux/kernel.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/uuid.h> #include <linux/random.h> const guid_t guid_null; EXPORT_SYMBOL(guid_null); const uuid_t uuid_null; EXPORT_SYMBOL(uuid_null); const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; /** * generate_random_uuid - generate a random UUID * @uuid: where to put the generated UUID * * Random UUID interface * * Used to create a Boot ID or a filesystem UUID/GUID, but can be * useful for other kernel drivers. */ void generate_random_uuid(unsigned char uuid[16]) { get_random_bytes(uuid, 16); /* Set UUID version to 4 --- truly random generation */ uuid[6] = (uuid[6] & 0x0F) | 0x40; /* Set the UUID variant to DCE */ uuid[8] = (uuid[8] & 0x3F) | 0x80; } EXPORT_SYMBOL(generate_random_uuid); void generate_random_guid(unsigned char guid[16]) { get_random_bytes(guid, 16); /* Set GUID version to 4 --- truly random generation */ guid[7] = (guid[7] & 0x0F) | 0x40; /* Set the GUID variant to DCE */ guid[8] = (guid[8] & 0x3F) | 0x80; } EXPORT_SYMBOL(generate_random_guid); static void __uuid_gen_common(__u8 b[16]) { get_random_bytes(b, 16); /* reversion 0b10 */ b[8] = (b[8] & 0x3F) | 0x80; } void guid_gen(guid_t *lu) { __uuid_gen_common(lu->b); /* version 4 : random generation */ lu->b[7] = (lu->b[7] & 0x0F) | 0x40; } EXPORT_SYMBOL_GPL(guid_gen); void uuid_gen(uuid_t *bu) { __uuid_gen_common(bu->b); /* version 4 : random generation */ bu->b[6] = (bu->b[6] & 0x0F) | 0x40; } EXPORT_SYMBOL_GPL(uuid_gen); /** * uuid_is_valid - checks if a UUID string is valid * @uuid: UUID string to check * * Description: * It checks if the UUID string is following the format: * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx * * where x is a hex digit. * * Return: true if input is valid UUID string. */ bool uuid_is_valid(const char *uuid) { unsigned int i; for (i = 0; i < UUID_STRING_LEN; i++) { if (i == 8 || i == 13 || i == 18 || i == 23) { if (uuid[i] != '-') return false; } else if (!isxdigit(uuid[i])) { return false; } } return true; } EXPORT_SYMBOL(uuid_is_valid); static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16]) { static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34}; unsigned int i; if (!uuid_is_valid(uuid)) return -EINVAL; for (i = 0; i < 16; i++) { int hi = hex_to_bin(uuid[si[i] + 0]); int lo = hex_to_bin(uuid[si[i] + 1]); b[ei[i]] = (hi << 4) | lo; } return 0; } int guid_parse(const char *uuid, guid_t *u) { return __uuid_parse(uuid, u->b, guid_index); } EXPORT_SYMBOL(guid_parse); int uuid_parse(const char *uuid, uuid_t *u) { return __uuid_parse(uuid, u->b, uuid_index); } EXPORT_SYMBOL(uuid_parse);
607 62 62 220 264 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 #undef TRACE_SYSTEM #define TRACE_SYSTEM neigh #if !defined(_TRACE_NEIGH_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NEIGH_H #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/tracepoint.h> #include <net/neighbour.h> #define neigh_state_str(state) \ __print_symbolic(state, \ { NUD_INCOMPLETE, "incomplete" }, \ { NUD_REACHABLE, "reachable" }, \ { NUD_STALE, "stale" }, \ { NUD_DELAY, "delay" }, \ { NUD_PROBE, "probe" }, \ { NUD_FAILED, "failed" }, \ { NUD_NOARP, "noarp" }, \ { NUD_PERMANENT, "permanent"}) TRACE_EVENT(neigh_create, TP_PROTO(struct neigh_table *tbl, struct net_device *dev, const void *pkey, const struct neighbour *n, bool exempt_from_gc), TP_ARGS(tbl, dev, pkey, n, exempt_from_gc), TP_STRUCT__entry( __field(u32, family) __string(dev, dev ? dev->name : "NULL") __field(int, entries) __field(u8, created) __field(u8, gc_exempt) __array(u8, primary_key4, 4) __array(u8, primary_key6, 16) ), TP_fast_assign( __be32 *p32; __entry->family = tbl->family; __assign_str(dev); __entry->entries = atomic_read(&tbl->gc_entries); __entry->created = n != NULL; __entry->gc_exempt = exempt_from_gc; p32 = (__be32 *)__entry->primary_key4; if (tbl->family == AF_INET) *p32 = *(__be32 *)pkey; else *p32 = 0; #if IS_ENABLED(CONFIG_IPV6) if (tbl->family == AF_INET6) { struct in6_addr *pin6; pin6 = (struct in6_addr *)__entry->primary_key6; *pin6 = *(struct in6_addr *)pkey; } #endif ), TP_printk("family %d dev %s entries %d primary_key4 %pI4 primary_key6 %pI6c created %d gc_exempt %d", __entry->family, __get_str(dev), __entry->entries, __entry->primary_key4, __entry->primary_key6, __entry->created, __entry->gc_exempt) ); TRACE_EVENT(neigh_update, TP_PROTO(struct neighbour *n, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid), TP_ARGS(n, lladdr, new, flags, nlmsg_pid), TP_STRUCT__entry( __field(u32, family)