54 54 54 48 54 51 51 51 52 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 | // SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) /* * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b * reference implementation, but it has been heavily modified for use in the * kernel. The reference implementation was: * * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under * the terms of the CC0, the OpenSSL Licence, or the Apache Public License * 2.0, at your option. The terms of these licenses can be found at: * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about BLAKE2 can be found at https://blake2.net. */ #include <asm/unaligned.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) { S->t[0] += inc; S->t[1] += (S->t[0] < inc); } #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2b_sigma[r][2*i+0]]; \ d = ror64(d ^ a, 32); \ c = c + d; \ b = ror64(b ^ c, 24); \ a = a + b + m[blake2b_sigma[r][2*i+1]]; \ d = ror64(d ^ a, 16); \ c = c + d; \ b = ror64(b ^ c, 63); \ } while (0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) static void blake2b_compress_one_generic(struct blake2b_state *S, const u8 block[BLAKE2B_BLOCK_SIZE]) { u64 m[16]; u64 v[16]; size_t i; for (i = 0; i < 16; ++i) m[i] = get_unaligned_le64(block + i * sizeof(m[i])); for (i = 0; i < 8; ++i) v[i] = S->h[i]; v[ 8] = BLAKE2B_IV0; v[ 9] = BLAKE2B_IV1; v[10] = BLAKE2B_IV2; v[11] = BLAKE2B_IV3; v[12] = BLAKE2B_IV4 ^ S->t[0]; v[13] = BLAKE2B_IV5 ^ S->t[1]; v[14] = BLAKE2B_IV6 ^ S->f[0]; v[15] = BLAKE2B_IV7 ^ S->f[1]; ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); ROUND(10); ROUND(11); #ifdef CONFIG_CC_IS_CLANG #pragma nounroll /* https://bugs.llvm.org/show_bug.cgi?id=45803 */ #endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } #undef G #undef ROUND void blake2b_compress_generic(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc) { do { blake2b_increment_counter(state, inc); blake2b_compress_one_generic(state, block); block += BLAKE2B_BLOCK_SIZE; } while (--nblocks); } EXPORT_SYMBOL(blake2b_compress_generic); static int crypto_blake2b_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { return crypto_blake2b_update(desc, in, inlen, blake2b_compress_generic); } static int crypto_blake2b_final_generic(struct shash_desc *desc, u8 *out) { return crypto_blake2b_final(desc, out, blake2b_compress_generic); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ { \ .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 100, \ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ .digestsize = digest_size, \ .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_generic, \ .final = crypto_blake2b_final_generic, \ .descsize = sizeof(struct blake2b_state), \ } static struct shash_alg blake2b_algs[] = { BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", BLAKE2B_160_HASH_SIZE), BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", BLAKE2B_256_HASH_SIZE), BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", BLAKE2B_384_HASH_SIZE), BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_mod_init(void) { return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } static void __exit blake2b_mod_fini(void) { crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } subsys_initcall(blake2b_mod_init); module_exit(blake2b_mod_fini); MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); MODULE_DESCRIPTION("BLAKE2b generic implementation"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("blake2b-160"); MODULE_ALIAS_CRYPTO("blake2b-160-generic"); MODULE_ALIAS_CRYPTO("blake2b-256"); MODULE_ALIAS_CRYPTO("blake2b-256-generic"); MODULE_ALIAS_CRYPTO("blake2b-384"); MODULE_ALIAS_CRYPTO("blake2b-384-generic"); MODULE_ALIAS_CRYPTO("blake2b-512"); MODULE_ALIAS_CRYPTO("blake2b-512-generic"); |
1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 | // SPDX-License-Identifier: GPL-2.0+ /* * spcp8x5 USB to serial adaptor driver * * Copyright (C) 2010-2013 Johan Hovold (jhovold@gmail.com) * Copyright (C) 2006 Linxb (xubin.lin@worldplus.com.cn) * Copyright (C) 2006 S1 Corp. * * Original driver for 2.6.10 pl2303 driver by * Greg Kroah-Hartman (greg@kroah.com) * Changes for 2.6.20 by Harald Klein <hari@vt100.at> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver" #define SPCP825_QUIRK_NO_UART_STATUS 0x01 #define SPCP825_QUIRK_NO_WORK_MODE 0x02 #define SPCP8x5_007_VID 0x04FC #define SPCP8x5_007_PID 0x0201 #define SPCP8x5_008_VID 0x04fc #define SPCP8x5_008_PID 0x0235 #define SPCP8x5_PHILIPS_VID 0x0471 #define SPCP8x5_PHILIPS_PID 0x081e #define SPCP8x5_INTERMATIC_VID 0x04FC #define SPCP8x5_INTERMATIC_PID 0x0204 #define SPCP8x5_835_VID 0x04fc #define SPCP8x5_835_PID 0x0231 static const struct usb_device_id id_table[] = { { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)}, { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)}, { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)}, { USB_DEVICE(SPCP8x5_008_VID, SPCP8x5_008_PID)}, { USB_DEVICE(SPCP8x5_007_VID, SPCP8x5_007_PID), .driver_info = SPCP825_QUIRK_NO_UART_STATUS | SPCP825_QUIRK_NO_WORK_MODE }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct spcp8x5_usb_ctrl_arg { u8 type; u8 cmd; u8 cmd_type; u16 value; u16 index; u16 length; }; /* spcp8x5 spec register define */ #define MCR_CONTROL_LINE_RTS 0x02 #define MCR_CONTROL_LINE_DTR 0x01 #define MCR_DTR 0x01 #define MCR_RTS 0x02 #define MSR_STATUS_LINE_DCD 0x80 #define MSR_STATUS_LINE_RI 0x40 #define MSR_STATUS_LINE_DSR 0x20 #define MSR_STATUS_LINE_CTS 0x10 /* verdor command here , we should define myself */ #define SET_DEFAULT 0x40 #define SET_DEFAULT_TYPE 0x20 #define SET_UART_FORMAT 0x40 #define SET_UART_FORMAT_TYPE 0x21 #define SET_UART_FORMAT_SIZE_5 0x00 #define SET_UART_FORMAT_SIZE_6 0x01 #define SET_UART_FORMAT_SIZE_7 0x02 #define SET_UART_FORMAT_SIZE_8 0x03 #define SET_UART_FORMAT_STOP_1 0x00 #define SET_UART_FORMAT_STOP_2 0x04 #define SET_UART_FORMAT_PAR_NONE 0x00 #define SET_UART_FORMAT_PAR_ODD 0x10 #define SET_UART_FORMAT_PAR_EVEN 0x30 #define SET_UART_FORMAT_PAR_MASK 0xD0 #define SET_UART_FORMAT_PAR_SPACE 0x90 #define GET_UART_STATUS_TYPE 0xc0 #define GET_UART_STATUS 0x22 #define GET_UART_STATUS_MSR 0x06 #define SET_UART_STATUS 0x40 #define SET_UART_STATUS_TYPE 0x23 #define SET_UART_STATUS_MCR 0x0004 #define SET_UART_STATUS_MCR_DTR 0x01 #define SET_UART_STATUS_MCR_RTS 0x02 #define SET_UART_STATUS_MCR_LOOP 0x10 #define SET_WORKING_MODE 0x40 #define SET_WORKING_MODE_TYPE 0x24 #define SET_WORKING_MODE_U2C 0x00 #define SET_WORKING_MODE_RS485 0x01 #define SET_WORKING_MODE_PDMA 0x02 #define SET_WORKING_MODE_SPP 0x03 #define SET_FLOWCTL_CHAR 0x40 #define SET_FLOWCTL_CHAR_TYPE 0x25 #define GET_VERSION 0xc0 #define GET_VERSION_TYPE 0x26 #define SET_REGISTER 0x40 #define SET_REGISTER_TYPE 0x27 #define GET_REGISTER 0xc0 #define GET_REGISTER_TYPE 0x28 #define SET_RAM 0x40 #define SET_RAM_TYPE 0x31 #define GET_RAM 0xc0 #define GET_RAM_TYPE 0x32 /* how come ??? */ #define UART_STATE 0x08 #define UART_STATE_TRANSIENT_MASK 0x75 #define UART_DCD 0x01 #define UART_DSR 0x02 #define UART_BREAK_ERROR 0x04 #define UART_RING 0x08 #define UART_FRAME_ERROR 0x10 #define UART_PARITY_ERROR 0x20 #define UART_OVERRUN_ERROR 0x40 #define UART_CTS 0x80 struct spcp8x5_private { unsigned quirks; spinlock_t lock; u8 line_control; }; static int spcp8x5_probe(struct usb_serial *serial, const struct usb_device_id *id) { usb_set_serial_data(serial, (void *)id); return 0; } static int spcp8x5_port_probe(struct usb_serial_port *port) { const struct usb_device_id *id = usb_get_serial_data(port->serial); struct spcp8x5_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); priv->quirks = id->driver_info; usb_set_serial_port_data(port, priv); port->port.drain_delay = 256; return 0; } static void spcp8x5_port_remove(struct usb_serial_port *port) { struct spcp8x5_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int spcp8x5_set_ctrl_line(struct usb_serial_port *port, u8 mcr) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); struct usb_device *dev = port->serial->dev; int retval; if (priv->quirks & SPCP825_QUIRK_NO_UART_STATUS) return -EPERM; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), SET_UART_STATUS_TYPE, SET_UART_STATUS, mcr, 0x04, NULL, 0, 100); if (retval != 0) { dev_err(&port->dev, "failed to set control lines: %d\n", retval); } return retval; } static int spcp8x5_get_msr(struct usb_serial_port *port, u8 *status) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); struct usb_device *dev = port->serial->dev; u8 *buf; int ret; if (priv->quirks & SPCP825_QUIRK_NO_UART_STATUS) return -EPERM; buf = kzalloc(1, GFP_KERNEL); if (!buf) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), GET_UART_STATUS, GET_UART_STATUS_TYPE, 0, GET_UART_STATUS_MSR, buf, 1, 100); if (ret < 1) { dev_err(&port->dev, "failed to get modem status: %d\n", ret); if (ret >= 0) ret = -EIO; goto out; } dev_dbg(&port->dev, "0xc0:0x22:0:6 %d - 0x02%x\n", ret, *buf); *status = *buf; ret = 0; out: kfree(buf); return ret; } static void spcp8x5_set_work_mode(struct usb_serial_port *port, u16 value, u16 index) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); struct usb_device *dev = port->serial->dev; int ret; if (priv->quirks & SPCP825_QUIRK_NO_WORK_MODE) return; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), SET_WORKING_MODE_TYPE, SET_WORKING_MODE, value, index, NULL, 0, 100); dev_dbg(&port->dev, "value = %#x , index = %#x\n", value, index); if (ret < 0) dev_err(&port->dev, "failed to set work mode: %d\n", ret); } static int spcp8x5_carrier_raised(struct usb_serial_port *port) { u8 msr; int ret; ret = spcp8x5_get_msr(port, &msr); if (ret || msr & MSR_STATUS_LINE_DCD) return 1; return 0; } static void spcp8x5_dtr_rts(struct usb_serial_port *port, int on) { struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 control; spin_lock_irqsave(&priv->lock, flags); if (on) priv->line_control = MCR_CONTROL_LINE_DTR | MCR_CONTROL_LINE_RTS; else priv->line_control &= ~ (MCR_CONTROL_LINE_DTR | MCR_CONTROL_LINE_RTS); control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); spcp8x5_set_ctrl_line(port, control); } static void spcp8x5_init_termios(struct tty_struct *tty) { tty_encode_baud_rate(tty, 115200, 115200); } static void spcp8x5_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int cflag = tty->termios.c_cflag; unsigned short uartdata; unsigned char buf[2] = {0, 0}; int baud; int i; u8 control; /* check that they really want us to change something */ if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios)) return; /* set DTR/RTS active */ spin_lock_irqsave(&priv->lock, flags); control = priv->line_control; if (old_termios && (old_termios->c_cflag & CBAUD) == B0) { priv->line_control |= MCR_DTR; if (!(old_termios->c_cflag & CRTSCTS)) priv->line_control |= MCR_RTS; } if (control != priv->line_control) { control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); spcp8x5_set_ctrl_line(port, control); } else { spin_unlock_irqrestore(&priv->lock, flags); } /* Set Baud Rate */ baud = tty_get_baud_rate(tty); switch (baud) { case 300: buf[0] = 0x00; break; case 600: buf[0] = 0x01; break; case 1200: buf[0] = 0x02; break; case 2400: buf[0] = 0x03; break; case 4800: buf[0] = 0x04; break; case 9600: buf[0] = 0x05; break; case 19200: buf[0] = 0x07; break; case 38400: buf[0] = 0x09; break; case 57600: buf[0] = 0x0a; break; case 115200: buf[0] = 0x0b; break; case 230400: buf[0] = 0x0c; break; case 460800: buf[0] = 0x0d; break; case 921600: buf[0] = 0x0e; break; /* case 1200000: buf[0] = 0x0f; break; */ /* case 2400000: buf[0] = 0x10; break; */ case 3000000: buf[0] = 0x11; break; /* case 6000000: buf[0] = 0x12; break; */ case 0: case 1000000: buf[0] = 0x0b; break; default: dev_err(&port->dev, "unsupported baudrate, using 9600\n"); } /* Set Data Length : 00:5bit, 01:6bit, 10:7bit, 11:8bit */ switch (cflag & CSIZE) { case CS5: buf[1] |= SET_UART_FORMAT_SIZE_5; break; case CS6: buf[1] |= SET_UART_FORMAT_SIZE_6; break; case CS7: buf[1] |= SET_UART_FORMAT_SIZE_7; break; default: case CS8: buf[1] |= SET_UART_FORMAT_SIZE_8; break; } /* Set Stop bit2 : 0:1bit 1:2bit */ buf[1] |= (cflag & CSTOPB) ? SET_UART_FORMAT_STOP_2 : SET_UART_FORMAT_STOP_1; /* Set Parity bit3-4 01:Odd 11:Even */ if (cflag & PARENB) { buf[1] |= (cflag & PARODD) ? SET_UART_FORMAT_PAR_ODD : SET_UART_FORMAT_PAR_EVEN ; } else { buf[1] |= SET_UART_FORMAT_PAR_NONE; } uartdata = buf[0] | buf[1]<<8; i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), SET_UART_FORMAT_TYPE, SET_UART_FORMAT, uartdata, 0, NULL, 0, 100); if (i < 0) dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n", uartdata, i); dev_dbg(&port->dev, "0x21:0x40:0:0 %d\n", i); if (cflag & CRTSCTS) { /* enable hardware flow control */ spcp8x5_set_work_mode(port, 0x000a, SET_WORKING_MODE_U2C); } } static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct spcp8x5_private *priv = usb_get_serial_port_data(port); int ret; usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), 0x09, 0x00, 0x01, 0x00, NULL, 0x00, 100); if (ret) return ret; spcp8x5_set_ctrl_line(port, priv->line_control); if (tty) spcp8x5_set_termios(tty, port, NULL); return usb_serial_generic_open(tty, port); } static int spcp8x5_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; u8 control; spin_lock_irqsave(&priv->lock, flags); if (set & TIOCM_RTS) priv->line_control |= MCR_RTS; if (set & TIOCM_DTR) priv->line_control |= MCR_DTR; if (clear & TIOCM_RTS) priv->line_control &= ~MCR_RTS; if (clear & TIOCM_DTR) priv->line_control &= ~MCR_DTR; control = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); return spcp8x5_set_ctrl_line(port, control); } static int spcp8x5_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct spcp8x5_private *priv = usb_get_serial_port_data(port); unsigned long flags; unsigned int mcr; u8 status; unsigned int result; result = spcp8x5_get_msr(port, &status); if (result) return result; spin_lock_irqsave(&priv->lock, flags); mcr = priv->line_control; spin_unlock_irqrestore(&priv->lock, flags); result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0) | ((mcr & MCR_RTS) ? TIOCM_RTS : 0) | ((status & MSR_STATUS_LINE_CTS) ? TIOCM_CTS : 0) | ((status & MSR_STATUS_LINE_DSR) ? TIOCM_DSR : 0) | ((status & MSR_STATUS_LINE_RI) ? TIOCM_RI : 0) | ((status & MSR_STATUS_LINE_DCD) ? TIOCM_CD : 0); return result; } static struct usb_serial_driver spcp8x5_device = { .driver = { .owner = THIS_MODULE, .name = "SPCP8x5", }, .id_table = id_table, .num_ports = 1, .num_bulk_in = 1, .num_bulk_out = 1, .open = spcp8x5_open, .dtr_rts = spcp8x5_dtr_rts, .carrier_raised = spcp8x5_carrier_raised, .set_termios = spcp8x5_set_termios, .init_termios = spcp8x5_init_termios, .tiocmget = spcp8x5_tiocmget, .tiocmset = spcp8x5_tiocmset, .probe = spcp8x5_probe, .port_probe = spcp8x5_port_probe, .port_remove = spcp8x5_port_remove, }; static struct usb_serial_driver * const serial_drivers[] = { &spcp8x5_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
7 7 4 2 4 6 1 6 1 2 2 1 3 3 5 6 7 7 6 1 2 4 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Glue Code for assembler optimized version of 3DES * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/algapi.h> #include <crypto/des.h> #include <crypto/internal/skcipher.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> struct des3_ede_x86_ctx { struct des3_ede_ctx enc; struct des3_ede_ctx dec; }; /* regular block cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, const u8 *src); static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc.expkey; des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); } static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec.expkey; des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); } static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc.expkey; des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); } static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec.expkey; des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); } static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static int ecb_crypt(struct skcipher_request *req, const u32 *expkey) { const unsigned int bsize = DES3_EDE_BLOCK_SIZE; struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { u8 *wsrc = walk.src.virt.addr; u8 *wdst = walk.dst.virt.addr; /* Process four block batch */ if (nbytes >= bsize * 3) { do { des3_ede_x86_64_crypt_blk_3way(expkey, wdst, wsrc); wsrc += bsize * 3; wdst += bsize * 3; nbytes -= bsize * 3; } while (nbytes >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); wsrc += bsize; wdst += bsize; nbytes -= bsize; } while (nbytes >= bsize); done: err = skcipher_walk_done(&walk, nbytes); } return err; } static int ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); return ecb_crypt(req, ctx->enc.expkey); } static int ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); return ecb_crypt(req, ctx->dec.expkey); } static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx, struct skcipher_walk *walk) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 *iv = (u64 *)walk->iv; do { *dst = *src ^ *iv; des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); iv = dst; src += 1; dst += 1; nbytes -= bsize; } while (nbytes >= bsize); *(u64 *)walk->iv = *iv; return nbytes; } static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { nbytes = __cbc_encrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } return err; } static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx, struct skcipher_walk *walk) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ivs[3 - 1]; u64 last_iv; /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; last_iv = *src; /* Process four block batch */ if (nbytes >= bsize * 3) { do { nbytes -= bsize * 3 - bsize; src -= 3 - 1; dst -= 3 - 1; ivs[0] = src[0]; ivs[1] = src[1]; des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); dst[1] ^= ivs[0]; dst[2] ^= ivs[1]; nbytes -= bsize; if (nbytes < bsize) goto done; *dst ^= *(src - 1); src -= 1; dst -= 1; } while (nbytes >= bsize * 3); } /* Handle leftovers */ for (;;) { des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) break; *dst ^= *(src - 1); src -= 1; dst -= 1; } done: *dst ^= *(u64 *)walk->iv; *(u64 *)walk->iv = last_iv; return nbytes; } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { nbytes = __cbc_decrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } return err; } static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); u32 i, j, tmp; int err; err = des3_ede_expand_key(&ctx->enc, key, keylen); if (err == -ENOKEY) { if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) err = -EINVAL; else err = 0; } if (err) { memset(ctx, 0, sizeof(*ctx)); return err; } /* Fix encryption context for this implementation and form decryption * context. */ j = DES3_EDE_EXPKEY_WORDS - 2; for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { tmp = ror32(ctx->enc.expkey[i + 1], 4); ctx->enc.expkey[i + 1] = tmp; ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0]; ctx->dec.expkey[j + 1] = tmp; } return 0; } static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return des3_ede_x86_setkey(&tfm->base, key, keylen); } static struct crypto_alg des3_ede_cipher = { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_x86_setkey, .cia_encrypt = des3_ede_x86_encrypt, .cia_decrypt = des3_ede_x86_decrypt, } } }; static struct skcipher_alg des3_ede_skciphers[] = { { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3_ede-asm", .base.cra_priority = 300, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = des3_ede_x86_setkey_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3_ede-asm", .base.cra_priority = 300, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, des3_ede-x86_64 is slower than generic C * implementation because use of 64bit rotates (which are really * slow on P4). Therefore blacklist P4s. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init des3_ede_x86_init(void) { int err; if (!force && is_blacklisted_cpu()) { pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); return -ENODEV; } err = crypto_register_alg(&des3_ede_cipher); if (err) return err; err = crypto_register_skciphers(des3_ede_skciphers, ARRAY_SIZE(des3_ede_skciphers)); if (err) crypto_unregister_alg(&des3_ede_cipher); return err; } static void __exit des3_ede_x86_fini(void) { crypto_unregister_alg(&des3_ede_cipher); crypto_unregister_skciphers(des3_ede_skciphers, ARRAY_SIZE(des3_ede_skciphers)); } module_init(des3_ede_x86_init); module_exit(des3_ede_x86_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_ALIAS_CRYPTO("des3_ede-asm"); MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); |
1548 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | // SPDX-License-Identifier: GPL-2.0-only /* * "security" table * * This is for use by Mandatory Access Control (MAC) security models, * which need to be able to manage security policy in separate context * to DAC. * * Based on iptable_mangle.c * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org> * Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com> */ #include <linux/module.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/slab.h> #include <net/ip.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>"); MODULE_DESCRIPTION("iptables security table, for MAC rules"); #define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \ (1 << NF_INET_FORWARD) | \ (1 << NF_INET_LOCAL_OUT) static const struct xt_table security_table = { .name = "security", .valid_hooks = SECURITY_VALID_HOOKS, .me = THIS_MODULE, .af = NFPROTO_IPV4, .priority = NF_IP_PRI_SECURITY, }; static unsigned int iptable_security_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { return ipt_do_table(skb, state, priv); } static struct nf_hook_ops *sectbl_ops __read_mostly; static int iptable_security_table_init(struct net *net) { struct ipt_replace *repl; int ret; repl = ipt_alloc_initial_table(&security_table); if (repl == NULL) return -ENOMEM; ret = ipt_register_table(net, &security_table, repl, sectbl_ops); kfree(repl); return ret; } static void __net_exit iptable_security_net_pre_exit(struct net *net) { ipt_unregister_table_pre_exit(net, "security"); } static void __net_exit iptable_security_net_exit(struct net *net) { ipt_unregister_table_exit(net, "security"); } static struct pernet_operations iptable_security_net_ops = { .pre_exit = iptable_security_net_pre_exit, .exit = iptable_security_net_exit, }; static int __init iptable_security_init(void) { int ret = xt_register_template(&security_table, iptable_security_table_init); if (ret < 0) return ret; sectbl_ops = xt_hook_ops_alloc(&security_table, iptable_security_hook); if (IS_ERR(sectbl_ops)) { xt_unregister_template(&security_table); return PTR_ERR(sectbl_ops); } ret = register_pernet_subsys(&iptable_security_net_ops); if (ret < 0) { xt_unregister_template(&security_table); kfree(sectbl_ops); return ret; } return ret; } static void __exit iptable_security_fini(void) { unregister_pernet_subsys(&iptable_security_net_ops); kfree(sectbl_ops); xt_unregister_template(&security_table); } module_init(iptable_security_init); module_exit(iptable_security_fini); |
11 1 1 2 3 4 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 | // SPDX-License-Identifier: GPL-2.0-or-later /* * mpls tunnels An implementation mpls tunnels using the light weight tunnel * infrastructure * * Authors: Roopa Prabhu, <roopa@cumulusnetworks.com> */ #include <linux/types.h> #include <linux/skbuff.h> #include <linux/net.h> #include <linux/module.h> #include <linux/mpls.h> #include <linux/vmalloc.h> #include <net/ip.h> #include <net/dst.h> #include <net/lwtunnel.h> #include <net/netevent.h> #include <net/netns/generic.h> #include <net/ip6_fib.h> #include <net/route.h> #include <net/mpls_iptunnel.h> #include <linux/mpls_iptunnel.h> #include "internal.h" static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = { [MPLS_IPTUNNEL_DST] = { .len = sizeof(u32) }, [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 }, }; static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en) { /* The size of the layer 2.5 labels to be added for this route */ return en->labels * sizeof(struct mpls_shim_hdr); } static int mpls_xmit(struct sk_buff *skb) { struct mpls_iptunnel_encap *tun_encap_info; struct mpls_shim_hdr *hdr; struct net_device *out_dev; unsigned int hh_len; unsigned int new_header_size; unsigned int mtu; struct dst_entry *dst = skb_dst(skb); struct rtable *rt = NULL; struct rt6_info *rt6 = NULL; struct mpls_dev *out_mdev; struct net *net; int err = 0; bool bos; int i; unsigned int ttl; /* Find the output device */ out_dev = dst->dev; net = dev_net(out_dev); skb_orphan(skb); if (!mpls_output_possible(out_dev) || !dst->lwtstate || skb_warn_if_lro(skb)) goto drop; skb_forward_csum(skb); tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate); /* Obtain the ttl using the following set of rules. * * LWT ttl propagation setting: * - disabled => use default TTL value from LWT * - enabled => use TTL value from IPv4/IPv6 header * - default => * Global ttl propagation setting: * - disabled => use default TTL value from global setting * - enabled => use TTL value from IPv4/IPv6 header */ if (dst->ops->family == AF_INET) { if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED) ttl = tun_encap_info->default_ttl; else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT && !net->mpls.ip_ttl_propagate) ttl = net->mpls.default_ttl; else ttl = ip_hdr(skb)->ttl; rt = (struct rtable *)dst; } else if (dst->ops->family == AF_INET6) { if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED) ttl = tun_encap_info->default_ttl; else if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DEFAULT && !net->mpls.ip_ttl_propagate) ttl = net->mpls.default_ttl; else ttl = ipv6_hdr(skb)->hop_limit; rt6 = (struct rt6_info *)dst; } else { goto drop; } /* Verify the destination can hold the packet */ new_header_size = mpls_encap_size(tun_encap_info); mtu = mpls_dev_mtu(out_dev); if (mpls_pkt_too_big(skb, mtu - new_header_size)) goto drop; hh_len = LL_RESERVED_SPACE(out_dev); if (!out_dev->header_ops) hh_len = 0; /* Ensure there is enough space for the headers in the skb */ if (skb_cow(skb, hh_len + new_header_size)) goto drop; skb_set_inner_protocol(skb, skb->protocol); skb_reset_inner_network_header(skb); skb_push(skb, new_header_size); skb_reset_network_header(skb); skb->dev = out_dev; skb->protocol = htons(ETH_P_MPLS_UC); /* Push the new labels */ hdr = mpls_hdr(skb); bos = true; for (i = tun_encap_info->labels - 1; i >= 0; i--) { hdr[i] = mpls_entry_encode(tun_encap_info->label[i], ttl, 0, bos); bos = false; } mpls_stats_inc_outucastpkts(out_dev, skb); if (rt) { if (rt->rt_gw_family == AF_INET6) err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt->rt_gw6, skb); else err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gw4, skb); } else if (rt6) { if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) { /* 6PE (RFC 4798) */ err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3], skb); } else err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway, skb); } if (err) net_dbg_ratelimited("%s: packet transmission failed: %d\n", __func__, err); return LWTUNNEL_XMIT_DONE; drop: out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL; if (out_mdev) MPLS_INC_STATS(out_mdev, tx_errors); kfree_skb(skb); return -EINVAL; } static int mpls_build_state(struct net *net, struct nlattr *nla, unsigned int family, const void *cfg, struct lwtunnel_state **ts, struct netlink_ext_ack *extack) { struct mpls_iptunnel_encap *tun_encap_info; struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1]; struct lwtunnel_state *newts; u8 n_labels; int ret; ret = nla_parse_nested_deprecated(tb, MPLS_IPTUNNEL_MAX, nla, mpls_iptunnel_policy, extack); if (ret < 0) return ret; if (!tb[MPLS_IPTUNNEL_DST]) { NL_SET_ERR_MSG(extack, "MPLS_IPTUNNEL_DST attribute is missing"); return -EINVAL; } /* determine number of labels */ if (nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS, &n_labels, NULL, extack)) return -EINVAL; newts = lwtunnel_state_alloc(struct_size(tun_encap_info, label, n_labels)); if (!newts) return -ENOMEM; tun_encap_info = mpls_lwtunnel_encap(newts); ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], n_labels, &tun_encap_info->labels, tun_encap_info->label, extack); if (ret) goto errout; tun_encap_info->ttl_propagate = MPLS_TTL_PROP_DEFAULT; if (tb[MPLS_IPTUNNEL_TTL]) { tun_encap_info->default_ttl = nla_get_u8(tb[MPLS_IPTUNNEL_TTL]); /* TTL 0 implies propagate from IP header */ tun_encap_info->ttl_propagate = tun_encap_info->default_ttl ? MPLS_TTL_PROP_DISABLED : MPLS_TTL_PROP_ENABLED; } newts->type = LWTUNNEL_ENCAP_MPLS; newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT; newts->headroom = mpls_encap_size(tun_encap_info); *ts = newts; return 0; errout: kfree(newts); *ts = NULL; return ret; } static int mpls_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { struct mpls_iptunnel_encap *tun_encap_info; tun_encap_info = mpls_lwtunnel_encap(lwtstate); if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels, tun_encap_info->label)) goto nla_put_failure; if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT && nla_put_u8(skb, MPLS_IPTUNNEL_TTL, tun_encap_info->default_ttl)) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; } static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate) { struct mpls_iptunnel_encap *tun_encap_info; int nlsize; tun_encap_info = mpls_lwtunnel_encap(lwtstate); nlsize = nla_total_size(tun_encap_info->labels * 4); if (tun_encap_info->ttl_propagate != MPLS_TTL_PROP_DEFAULT) nlsize += nla_total_size(1); return nlsize; } static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b) { struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a); struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b); int l; if (a_hdr->labels != b_hdr->labels || a_hdr->ttl_propagate != b_hdr->ttl_propagate || a_hdr->default_ttl != b_hdr->default_ttl) return 1; for (l = 0; l < a_hdr->labels; l++) if (a_hdr->label[l] != b_hdr->label[l]) return 1; return 0; } static const struct lwtunnel_encap_ops mpls_iptun_ops = { .build_state = mpls_build_state, .xmit = mpls_xmit, .fill_encap = mpls_fill_encap_info, .get_encap_size = mpls_encap_nlsize, .cmp_encap = mpls_encap_cmp, .owner = THIS_MODULE, }; static int __init mpls_iptunnel_init(void) { return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS); } module_init(mpls_iptunnel_init); static void __exit mpls_iptunnel_exit(void) { lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS); } module_exit(mpls_iptunnel_exit); MODULE_ALIAS_RTNL_LWT(MPLS); MODULE_SOFTDEP("post: mpls_gso"); MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels"); MODULE_LICENSE("GPL v2"); |
12 1 3 1 9 5 1 16 1 2 13 14 3 2 2 7 11 78 76 58 11 4 21 22 22 28 2 2 2 32 32 32 31 74 73 54 27 74 74 74 67 9 73 1 68 6 68 6 74 27 26 25 1 23 4 53 1 66 5 70 1 64 8 66 5 3 68 62 7 56 5 4 1 4 1 5 3 2 3 3 3 5 2 1 3 1 2 1 5 2 6 5 10 7 3 3 3 1 2 19 19 1 1 4 14 14 1 1 13 11 3 2 28 62 16 83 28 55 3 2 78 78 19 57 76 120 121 95 37 28 20 7 7 1 7 7 20 15 7 1 58 58 1 4 54 54 44 18 43 1 45 46 40 11 10 33 1 23 10 1 9 27 26 10 17 1 3 1 14 2 9 3 3 3 3 3 29 1 28 9 7 2 27 69 19 19 4 4 2 3 1 4 5 10 5 7 12 12 11 2 2 2 2 2 10 10 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ #include <linux/bpf.h> #include <linux/btf_ids.h> #include <linux/filter.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/net.h> #include <linux/workqueue.h> #include <linux/skmsg.h> #include <linux/list.h> #include <linux/jhash.h> #include <linux/sock_diag.h> #include <net/udp.h> struct bpf_stab { struct bpf_map map; struct sock **sks; struct sk_psock_progs progs; raw_spinlock_t lock; }; #define SOCK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which); static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); static struct bpf_map *sock_map_alloc(union bpf_attr *attr) { struct bpf_stab *stab; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size != 4 || (attr->value_size != sizeof(u32) && attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); stab = kzalloc(sizeof(*stab), GFP_USER | __GFP_ACCOUNT); if (!stab) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&stab->map, attr); raw_spin_lock_init(&stab->lock); stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * sizeof(struct sock *), stab->map.numa_node); if (!stab->sks) { kfree(stab); return ERR_PTR(-ENOMEM); } return &stab->map; } int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) { u32 ufd = attr->target_fd; struct bpf_map *map; struct fd f; int ret; if (attr->attach_flags || attr->replace_bpf_fd) return -EINVAL; f = fdget(ufd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); fdput(f); return ret; } int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) { u32 ufd = attr->target_fd; struct bpf_prog *prog; struct bpf_map *map; struct fd f; int ret; if (attr->attach_flags || attr->replace_bpf_fd) return -EINVAL; f = fdget(ufd); map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); prog = bpf_prog_get(attr->attach_bpf_fd); if (IS_ERR(prog)) { ret = PTR_ERR(prog); goto put_map; } if (prog->type != ptype) { ret = -EINVAL; goto put_prog; } ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); put_prog: bpf_prog_put(prog); put_map: fdput(f); return ret; } static void sock_map_sk_acquire(struct sock *sk) __acquires(&sk->sk_lock.slock) { lock_sock(sk); rcu_read_lock(); } static void sock_map_sk_release(struct sock *sk) __releases(&sk->sk_lock.slock) { rcu_read_unlock(); release_sock(sk); } static void sock_map_add_link(struct sk_psock *psock, struct sk_psock_link *link, struct bpf_map *map, void *link_raw) { link->link_raw = link_raw; link->map = map; spin_lock_bh(&psock->link_lock); list_add_tail(&link->list, &psock->link); spin_unlock_bh(&psock->link_lock); } static void sock_map_del_link(struct sock *sk, struct sk_psock *psock, void *link_raw) { bool strp_stop = false, verdict_stop = false; struct sk_psock_link *link, *tmp; spin_lock_bh(&psock->link_lock); list_for_each_entry_safe(link, tmp, &psock->link, list) { if (link->link_raw == link_raw) { struct bpf_map *map = link->map; struct sk_psock_progs *progs = sock_map_progs(map); if (psock->saved_data_ready && progs->stream_parser) strp_stop = true; if (psock->saved_data_ready && progs->stream_verdict) verdict_stop = true; if (psock->saved_data_ready && progs->skb_verdict) verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); break; } } spin_unlock_bh(&psock->link_lock); if (strp_stop || verdict_stop) { write_lock_bh(&sk->sk_callback_lock); if (strp_stop) sk_psock_stop_strp(sk, psock); if (verdict_stop) sk_psock_stop_verdict(sk, psock); if (psock->psock_update_sk_prot) psock->psock_update_sk_prot(sk, psock, false); write_unlock_bh(&sk->sk_callback_lock); } } static void sock_map_unref(struct sock *sk, void *link_raw) { struct sk_psock *psock = sk_psock(sk); if (likely(psock)) { sock_map_del_link(sk, psock, link_raw); sk_psock_put(sk, psock); } } static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock) { if (!sk->sk_prot->psock_update_sk_prot) return -EINVAL; psock->psock_update_sk_prot = sk->sk_prot->psock_update_sk_prot; return sk->sk_prot->psock_update_sk_prot(sk, psock, false); } static struct sk_psock *sock_map_psock_get_checked(struct sock *sk) { struct sk_psock *psock; rcu_read_lock(); psock = sk_psock(sk); if (psock) { if (sk->sk_prot->close != sock_map_close) { psock = ERR_PTR(-EBUSY); goto out; } if (!refcount_inc_not_zero(&psock->refcnt)) psock = ERR_PTR(-EBUSY); } out: rcu_read_unlock(); return psock; } static int sock_map_link(struct bpf_map *map, struct sock *sk) { struct sk_psock_progs *progs = sock_map_progs(map); struct bpf_prog *stream_verdict = NULL; struct bpf_prog *stream_parser = NULL; struct bpf_prog *skb_verdict = NULL; struct bpf_prog *msg_parser = NULL; struct sk_psock *psock; int ret; stream_verdict = READ_ONCE(progs->stream_verdict); if (stream_verdict) { stream_verdict = bpf_prog_inc_not_zero(stream_verdict); if (IS_ERR(stream_verdict)) return PTR_ERR(stream_verdict); } stream_parser = READ_ONCE(progs->stream_parser); if (stream_parser) { stream_parser = bpf_prog_inc_not_zero(stream_parser); if (IS_ERR(stream_parser)) { ret = PTR_ERR(stream_parser); goto out_put_stream_verdict; } } msg_parser = READ_ONCE(progs->msg_parser); if (msg_parser) { msg_parser = bpf_prog_inc_not_zero(msg_parser); if (IS_ERR(msg_parser)) { ret = PTR_ERR(msg_parser); goto out_put_stream_parser; } } skb_verdict = READ_ONCE(progs->skb_verdict); if (skb_verdict) { skb_verdict = bpf_prog_inc_not_zero(skb_verdict); if (IS_ERR(skb_verdict)) { ret = PTR_ERR(skb_verdict); goto out_put_msg_parser; } } psock = sock_map_psock_get_checked(sk); if (IS_ERR(psock)) { ret = PTR_ERR(psock); goto out_progs; } if (psock) { if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) || (stream_parser && READ_ONCE(psock->progs.stream_parser)) || (skb_verdict && READ_ONCE(psock->progs.skb_verdict)) || (skb_verdict && READ_ONCE(psock->progs.stream_verdict)) || (stream_verdict && READ_ONCE(psock->progs.skb_verdict)) || (stream_verdict && READ_ONCE(psock->progs.stream_verdict))) { sk_psock_put(sk, psock); ret = -EBUSY; goto out_progs; } } else { psock = sk_psock_init(sk, map->numa_node); if (IS_ERR(psock)) { ret = PTR_ERR(psock); goto out_progs; } } if (msg_parser) psock_set_prog(&psock->progs.msg_parser, msg_parser); if (stream_parser) psock_set_prog(&psock->progs.stream_parser, stream_parser); if (stream_verdict) psock_set_prog(&psock->progs.stream_verdict, stream_verdict); if (skb_verdict) psock_set_prog(&psock->progs.skb_verdict, skb_verdict); /* msg_* and stream_* programs references tracked in psock after this * point. Reference dec and cleanup will occur through psock destructor */ ret = sock_map_init_proto(sk, psock); if (ret < 0) { sk_psock_put(sk, psock); goto out; } write_lock_bh(&sk->sk_callback_lock); if (stream_parser && stream_verdict && !psock->saved_data_ready) { ret = sk_psock_init_strp(sk, psock); if (ret) { write_unlock_bh(&sk->sk_callback_lock); sk_psock_put(sk, psock); goto out; } sk_psock_start_strp(sk, psock); } else if (!stream_parser && stream_verdict && !psock->saved_data_ready) { sk_psock_start_verdict(sk,psock); } else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) { sk_psock_start_verdict(sk, psock); } write_unlock_bh(&sk->sk_callback_lock); return 0; out_progs: if (skb_verdict) bpf_prog_put(skb_verdict); out_put_msg_parser: if (msg_parser) bpf_prog_put(msg_parser); out_put_stream_parser: if (stream_parser) bpf_prog_put(stream_parser); out_put_stream_verdict: if (stream_verdict) bpf_prog_put(stream_verdict); out: return ret; } static void sock_map_free(struct bpf_map *map) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); int i; /* After the sync no updates or deletes will be in-flight so it * is safe to walk map and remove entries without risking a race * in EEXIST update case. */ synchronize_rcu(); for (i = 0; i < stab->map.max_entries; i++) { struct sock **psk = &stab->sks[i]; struct sock *sk; sk = xchg(psk, NULL); if (sk) { sock_hold(sk); lock_sock(sk); rcu_read_lock(); sock_map_unref(sk, psk); rcu_read_unlock(); release_sock(sk); sock_put(sk); } } /* wait for psock readers accessing its map link */ synchronize_rcu(); bpf_map_area_free(stab->sks); kfree(stab); } static void sock_map_release_progs(struct bpf_map *map) { psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); } static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(key >= map->max_entries)) return NULL; return READ_ONCE(stab->sks[key]); } static void *sock_map_lookup(struct bpf_map *map, void *key) { struct sock *sk; sk = __sock_map_lookup_elem(map, *(u32 *)key); if (!sk) return NULL; if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) return NULL; return sk; } static void *sock_map_lookup_sys(struct bpf_map *map, void *key) { struct sock *sk; if (map->value_size != sizeof(u64)) return ERR_PTR(-ENOSPC); sk = __sock_map_lookup_elem(map, *(u32 *)key); if (!sk) return ERR_PTR(-ENOENT); __sock_gen_cookie(sk); return &sk->sk_cookie; } static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, struct sock **psk) { struct sock *sk; int err = 0; if (irqs_disabled()) return -EOPNOTSUPP; /* locks here are hardirq-unsafe */ raw_spin_lock_bh(&stab->lock); sk = *psk; if (!sk_test || sk_test == sk) sk = xchg(psk, NULL); if (likely(sk)) sock_map_unref(sk, psk); else err = -EINVAL; raw_spin_unlock_bh(&stab->lock); return err; } static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, void *link_raw) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); __sock_map_delete(stab, sk, link_raw); } static int sock_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); u32 i = *(u32 *)key; struct sock **psk; if (unlikely(i >= map->max_entries)) return -EINVAL; psk = &stab->sks[i]; return __sock_map_delete(stab, NULL, psk); } static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); u32 i = key ? *(u32 *)key : U32_MAX; u32 *key_next = next; if (i == stab->map.max_entries - 1) return -ENOENT; if (i >= stab->map.max_entries) *key_next = 0; else *key_next = i + 1; return 0; } static int sock_map_update_common(struct bpf_map *map, u32 idx, struct sock *sk, u64 flags) { struct bpf_stab *stab = container_of(map, struct bpf_stab, map); struct sk_psock_link *link; struct sk_psock *psock; struct sock *osk; int ret; WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(flags > BPF_EXIST)) return -EINVAL; if (unlikely(idx >= map->max_entries)) return -E2BIG; link = sk_psock_init_link(); if (!link) return -ENOMEM; ret = sock_map_link(map, sk); if (ret < 0) goto out_free; psock = sk_psock(sk); WARN_ON_ONCE(!psock); raw_spin_lock_bh(&stab->lock); osk = stab->sks[idx]; if (osk && flags == BPF_NOEXIST) { ret = -EEXIST; goto out_unlock; } else if (!osk && flags == BPF_EXIST) { ret = -ENOENT; goto out_unlock; } sock_map_add_link(psock, link, map, &stab->sks[idx]); stab->sks[idx] = sk; if (osk) sock_map_unref(osk, &stab->sks[idx]); raw_spin_unlock_bh(&stab->lock); return 0; out_unlock: raw_spin_unlock_bh(&stab->lock); if (psock) sk_psock_put(sk, psock); out_free: sk_psock_free_link(link); return ret; } static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) { return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB || ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB || ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; } static bool sk_is_tcp(const struct sock *sk) { return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP; } static bool sock_map_redirect_allowed(const struct sock *sk) { if (sk_is_tcp(sk)) return sk->sk_state != TCP_LISTEN; else return sk->sk_state == TCP_ESTABLISHED; } static bool sock_map_sk_is_suitable(const struct sock *sk) { return !!sk->sk_prot->psock_update_sk_prot; } static bool sock_map_sk_state_allowed(const struct sock *sk) { if (sk_is_tcp(sk)) return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_LISTEN); return true; } static int sock_hash_update_common(struct bpf_map *map, void *key, struct sock *sk, u64 flags); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags) { struct socket *sock; struct sock *sk; int ret; u64 ufd; if (map->value_size == sizeof(u64)) ufd = *(u64 *)value; else ufd = *(u32 *)value; if (ufd > S32_MAX) return -EINVAL; sock = sockfd_lookup(ufd, &ret); if (!sock) return ret; sk = sock->sk; if (!sk) { ret = -EINVAL; goto out; } if (!sock_map_sk_is_suitable(sk)) { ret = -EOPNOTSUPP; goto out; } sock_map_sk_acquire(sk); if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) ret = sock_map_update_common(map, *(u32 *)key, sk, flags); else ret = sock_hash_update_common(map, key, sk, flags); sock_map_sk_release(sk); out: sockfd_put(sock); return ret; } static int sock_map_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { struct sock *sk = (struct sock *)value; int ret; if (unlikely(!sk || !sk_fullsock(sk))) return -EINVAL; if (!sock_map_sk_is_suitable(sk)) return -EOPNOTSUPP; local_bh_disable(); bh_lock_sock(sk); if (!sock_map_sk_state_allowed(sk)) ret = -EOPNOTSUPP; else if (map->map_type == BPF_MAP_TYPE_SOCKMAP) ret = sock_map_update_common(map, *(u32 *)key, sk, flags); else ret = sock_hash_update_common(map, key, sk, flags); bh_unlock_sock(sk); local_bh_enable(); return ret; } BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); if (likely(sock_map_sk_is_suitable(sops->sk) && sock_map_op_okay(sops))) return sock_map_update_common(map, *(u32 *)key, sops->sk, flags); return -EOPNOTSUPP; } const struct bpf_func_proto bpf_sock_map_update_proto = { .func = bpf_sock_map_update, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, struct bpf_map *, map, u32, key, u64, flags) { struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; sk = __sock_map_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); return SK_PASS; } const struct bpf_func_proto bpf_sk_redirect_map_proto = { .func = bpf_sk_redirect_map, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg, struct bpf_map *, map, u32, key, u64, flags) { struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; sk = __sock_map_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) return SK_DROP; msg->flags = flags; msg->sk_redir = sk; return SK_PASS; } const struct bpf_func_proto bpf_msg_redirect_map_proto = { .func = bpf_msg_redirect_map, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; struct sock_map_seq_info { struct bpf_map *map; struct sock *sk; u32 index; }; struct bpf_iter__sockmap { __bpf_md_ptr(struct bpf_iter_meta *, meta); __bpf_md_ptr(struct bpf_map *, map); __bpf_md_ptr(void *, key); __bpf_md_ptr(struct sock *, sk); }; DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta, struct bpf_map *map, void *key, struct sock *sk) static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info) { if (unlikely(info->index >= info->map->max_entries)) return NULL; info->sk = __sock_map_lookup_elem(info->map, info->index); /* can't return sk directly, since that might be NULL */ return info; } static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { struct sock_map_seq_info *info = seq->private; if (*pos == 0) ++*pos; /* pairs with sock_map_seq_stop */ rcu_read_lock(); return sock_map_seq_lookup_elem(info); } static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) __must_hold(rcu) { struct sock_map_seq_info *info = seq->private; ++*pos; ++info->index; return sock_map_seq_lookup_elem(info); } static int sock_map_seq_show(struct seq_file *seq, void *v) __must_hold(rcu) { struct sock_map_seq_info *info = seq->private; struct bpf_iter__sockmap ctx = {}; struct bpf_iter_meta meta; struct bpf_prog *prog; meta.seq = seq; prog = bpf_iter_get_info(&meta, !v); if (!prog) return 0; ctx.meta = &meta; ctx.map = info->map; if (v) { ctx.key = &info->index; ctx.sk = info->sk; } return bpf_iter_run_prog(prog, &ctx); } static void sock_map_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { if (!v) (void)sock_map_seq_show(seq, NULL); /* pairs with sock_map_seq_start */ rcu_read_unlock(); } static const struct seq_operations sock_map_seq_ops = { .start = sock_map_seq_start, .next = sock_map_seq_next, .stop = sock_map_seq_stop, .show = sock_map_seq_show, }; static int sock_map_init_seq_private(void *priv_data, struct bpf_iter_aux_info *aux) { struct sock_map_seq_info *info = priv_data; bpf_map_inc_with_uref(aux->map); info->map = aux->map; return 0; } static void sock_map_fini_seq_private(void *priv_data) { struct sock_map_seq_info *info = priv_data; bpf_map_put_with_uref(info->map); } static const struct bpf_iter_seq_info sock_map_iter_seq_info = { .seq_ops = &sock_map_seq_ops, .init_seq_private = sock_map_init_seq_private, .fini_seq_private = sock_map_fini_seq_private, .seq_priv_size = sizeof(struct sock_map_seq_info), }; static int sock_map_btf_id; const struct bpf_map_ops sock_map_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc = sock_map_alloc, .map_free = sock_map_free, .map_get_next_key = sock_map_get_next_key, .map_lookup_elem_sys_only = sock_map_lookup_sys, .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_map_delete_elem, .map_lookup_elem = sock_map_lookup, .map_release_uref = sock_map_release_progs, .map_check_btf = map_check_no_btf, .map_btf_name = "bpf_stab", .map_btf_id = &sock_map_btf_id, .iter_seq_info = &sock_map_iter_seq_info, }; struct bpf_shtab_elem { struct rcu_head rcu; u32 hash; struct sock *sk; struct hlist_node node; u8 key[]; }; struct bpf_shtab_bucket { struct hlist_head head; raw_spinlock_t lock; }; struct bpf_shtab { struct bpf_map map; struct bpf_shtab_bucket *buckets; u32 buckets_num; u32 elem_size; struct sk_psock_progs progs; atomic_t count; }; static inline u32 sock_hash_bucket_hash(const void *key, u32 len) { return jhash(key, len, 0); } static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, u32 hash) { return &htab->buckets[hash & (htab->buckets_num - 1)]; } static struct bpf_shtab_elem * sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, u32 key_size) { struct bpf_shtab_elem *elem; hlist_for_each_entry_rcu(elem, head, node) { if (elem->hash == hash && !memcmp(&elem->key, key, key_size)) return elem; } return NULL; } static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; struct bpf_shtab_bucket *bucket; struct bpf_shtab_elem *elem; WARN_ON_ONCE(!rcu_read_lock_held()); hash = sock_hash_bucket_hash(key, key_size); bucket = sock_hash_select_bucket(htab, hash); elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); return elem ? elem->sk : NULL; } static void sock_hash_free_elem(struct bpf_shtab *htab, struct bpf_shtab_elem *elem) { atomic_dec(&htab->count); kfree_rcu(elem, rcu); } static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, void *link_raw) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); struct bpf_shtab_elem *elem_probe, *elem = link_raw; struct bpf_shtab_bucket *bucket; WARN_ON_ONCE(!rcu_read_lock_held()); bucket = sock_hash_select_bucket(htab, elem->hash); /* elem may be deleted in parallel from the map, but access here * is okay since it's going away only after RCU grace period. * However, we need to check whether it's still present. */ raw_spin_lock_bh(&bucket->lock); elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash, elem->key, map->key_size); if (elem_probe && elem_probe == elem) { hlist_del_rcu(&elem->node); sock_map_unref(elem->sk, elem); sock_hash_free_elem(htab, elem); } raw_spin_unlock_bh(&bucket->lock); } static int sock_hash_delete_elem(struct bpf_map *map, void *key) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 hash, key_size = map->key_size; struct bpf_shtab_bucket *bucket; struct bpf_shtab_elem *elem; int ret = -ENOENT; if (irqs_disabled()) return -EOPNOTSUPP; /* locks here are hardirq-unsafe */ hash = sock_hash_bucket_hash(key, key_size); bucket = sock_hash_select_bucket(htab, hash); raw_spin_lock_bh(&bucket->lock); elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); if (elem) { hlist_del_rcu(&elem->node); sock_map_unref(elem->sk, elem); sock_hash_free_elem(htab, elem); ret = 0; } raw_spin_unlock_bh(&bucket->lock); return ret; } static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, void *key, u32 key_size, u32 hash, struct sock *sk, struct bpf_shtab_elem *old) { struct bpf_shtab_elem *new; if (atomic_inc_return(&htab->count) > htab->map.max_entries) { if (!old) { atomic_dec(&htab->count); return ERR_PTR(-E2BIG); } } new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, htab->map.numa_node); if (!new) { atomic_dec(&htab->count); return ERR_PTR(-ENOMEM); } memcpy(new->key, key, key_size); new->sk = sk; new->hash = hash; return new; } static int sock_hash_update_common(struct bpf_map *map, void *key, struct sock *sk, u64 flags) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; struct bpf_shtab_elem *elem, *elem_new; struct bpf_shtab_bucket *bucket; struct sk_psock_link *link; struct sk_psock *psock; int ret; WARN_ON_ONCE(!rcu_read_lock_held()); if (unlikely(flags > BPF_EXIST)) return -EINVAL; link = sk_psock_init_link(); if (!link) return -ENOMEM; ret = sock_map_link(map, sk); if (ret < 0) goto out_free; psock = sk_psock(sk); WARN_ON_ONCE(!psock); hash = sock_hash_bucket_hash(key, key_size); bucket = sock_hash_select_bucket(htab, hash); raw_spin_lock_bh(&bucket->lock); elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size); if (elem && flags == BPF_NOEXIST) { ret = -EEXIST; goto out_unlock; } else if (!elem && flags == BPF_EXIST) { ret = -ENOENT; goto out_unlock; } elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem); if (IS_ERR(elem_new)) { ret = PTR_ERR(elem_new); goto out_unlock; } sock_map_add_link(psock, link, map, elem_new); /* Add new element to the head of the list, so that * concurrent search will find it before old elem. */ hlist_add_head_rcu(&elem_new->node, &bucket->head); if (elem) { hlist_del_rcu(&elem->node); sock_map_unref(elem->sk, elem); sock_hash_free_elem(htab, elem); } raw_spin_unlock_bh(&bucket->lock); return 0; out_unlock: raw_spin_unlock_bh(&bucket->lock); sk_psock_put(sk, psock); out_free: sk_psock_free_link(link); return ret; } static int sock_hash_get_next_key(struct bpf_map *map, void *key, void *key_next) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); struct bpf_shtab_elem *elem, *elem_next; u32 hash, key_size = map->key_size; struct hlist_head *head; int i = 0; if (!key) goto find_first_elem; hash = sock_hash_bucket_hash(key, key_size); head = &sock_hash_select_bucket(htab, hash)->head; elem = sock_hash_lookup_elem_raw(head, hash, key, key_size); if (!elem) goto find_first_elem; elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)), struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; } i = hash & (htab->buckets_num - 1); i++; find_first_elem: for (; i < htab->buckets_num; i++) { head = &sock_hash_select_bucket(htab, i)->head; elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)), struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; } } return -ENOENT; } static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) { struct bpf_shtab *htab; int i, err; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); if (attr->max_entries == 0 || attr->key_size == 0 || (attr->value_size != sizeof(u32) && attr->value_size != sizeof(u64)) || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); if (attr->key_size > MAX_BPF_STACK) return ERR_PTR(-E2BIG); htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); if (!htab) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&htab->map, attr); htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); htab->elem_size = sizeof(struct bpf_shtab_elem) + round_up(htab->map.key_size, 8); if (htab->buckets_num == 0 || htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { err = -EINVAL; goto free_htab; } htab->buckets = bpf_map_area_alloc(htab->buckets_num * sizeof(struct bpf_shtab_bucket), htab->map.numa_node); if (!htab->buckets) { err = -ENOMEM; goto free_htab; } for (i = 0; i < htab->buckets_num; i++) { INIT_HLIST_HEAD(&htab->buckets[i].head); raw_spin_lock_init(&htab->buckets[i].lock); } return &htab->map; free_htab: kfree(htab); return ERR_PTR(err); } static void sock_hash_free(struct bpf_map *map) { struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); struct bpf_shtab_bucket *bucket; struct hlist_head unlink_list; struct bpf_shtab_elem *elem; struct hlist_node *node; int i; /* After the sync no updates or deletes will be in-flight so it * is safe to walk map and remove entries without risking a race * in EEXIST update case. */ synchronize_rcu(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); /* We are racing with sock_hash_delete_from_link to * enter the spin-lock critical section. Every socket on * the list is still linked to sockhash. Since link * exists, psock exists and holds a ref to socket. That * lets us to grab a socket ref too. */ raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry(elem, &bucket->head, node) sock_hold(elem->sk); hlist_move_list(&bucket->head, &unlink_list); raw_spin_unlock_bh(&bucket->lock); /* Process removed entries out of atomic context to * block for socket lock before deleting the psock's * link to sockhash. */ hlist_for_each_entry_safe(elem, node, &unlink_list, node) { hlist_del(&elem->node); lock_sock(elem->sk); rcu_read_lock(); sock_map_unref(elem->sk, elem); rcu_read_unlock(); release_sock(elem->sk); sock_put(elem->sk); sock_hash_free_elem(htab, elem); } cond_resched(); } /* wait for psock readers accessing its map link */ synchronize_rcu(); bpf_map_area_free(htab->buckets); kfree(htab); } static void *sock_hash_lookup_sys(struct bpf_map *map, void *key) { struct sock *sk; if (map->value_size != sizeof(u64)) return ERR_PTR(-ENOSPC); sk = __sock_hash_lookup_elem(map, key); if (!sk) return ERR_PTR(-ENOENT); __sock_gen_cookie(sk); return &sk->sk_cookie; } static void *sock_hash_lookup(struct bpf_map *map, void *key) { struct sock *sk; sk = __sock_hash_lookup_elem(map, key); if (!sk) return NULL; if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt)) return NULL; return sk; } static void sock_hash_release_progs(struct bpf_map *map) { psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); } BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, struct bpf_map *, map, void *, key, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held()); if (likely(sock_map_sk_is_suitable(sops->sk) && sock_map_op_okay(sops))) return sock_hash_update_common(map, key, sops->sk, flags); return -EOPNOTSUPP; } const struct bpf_func_proto bpf_sock_hash_update_proto = { .func = bpf_sock_hash_update, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, struct bpf_map *, map, void *, key, u64, flags) { struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; sk = __sock_hash_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS); return SK_PASS; } const struct bpf_func_proto bpf_sk_redirect_hash_proto = { .func = bpf_sk_redirect_hash, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg, struct bpf_map *, map, void *, key, u64, flags) { struct sock *sk; if (unlikely(flags & ~(BPF_F_INGRESS))) return SK_DROP; sk = __sock_hash_lookup_elem(map, key); if (unlikely(!sk || !sock_map_redirect_allowed(sk))) return SK_DROP; if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk)) return SK_DROP; msg->flags = flags; msg->sk_redir = sk; return SK_PASS; } const struct bpf_func_proto bpf_msg_redirect_hash_proto = { .func = bpf_msg_redirect_hash, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; struct sock_hash_seq_info { struct bpf_map *map; struct bpf_shtab *htab; u32 bucket_id; }; static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info, struct bpf_shtab_elem *prev_elem) { const struct bpf_shtab *htab = info->htab; struct bpf_shtab_bucket *bucket; struct bpf_shtab_elem *elem; struct hlist_node *node; /* try to find next elem in the same bucket */ if (prev_elem) { node = rcu_dereference(hlist_next_rcu(&prev_elem->node)); elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); if (elem) return elem; /* no more elements, continue in the next bucket */ info->bucket_id++; } for (; info->bucket_id < htab->buckets_num; info->bucket_id++) { bucket = &htab->buckets[info->bucket_id]; node = rcu_dereference(hlist_first_rcu(&bucket->head)); elem = hlist_entry_safe(node, struct bpf_shtab_elem, node); if (elem) return elem; } return NULL; } static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos) __acquires(rcu) { struct sock_hash_seq_info *info = seq->private; if (*pos == 0) ++*pos; /* pairs with sock_hash_seq_stop */ rcu_read_lock(); return sock_hash_seq_find_next(info, NULL); } static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos) __must_hold(rcu) { struct sock_hash_seq_info *info = seq->private; ++*pos; return sock_hash_seq_find_next(info, v); } static int sock_hash_seq_show(struct seq_file *seq, void *v) __must_hold(rcu) { struct sock_hash_seq_info *info = seq->private; struct bpf_iter__sockmap ctx = {}; struct bpf_shtab_elem *elem = v; struct bpf_iter_meta meta; struct bpf_prog *prog; meta.seq = seq; prog = bpf_iter_get_info(&meta, !elem); if (!prog) return 0; ctx.meta = &meta; ctx.map = info->map; if (elem) { ctx.key = elem->key; ctx.sk = elem->sk; } return bpf_iter_run_prog(prog, &ctx); } static void sock_hash_seq_stop(struct seq_file *seq, void *v) __releases(rcu) { if (!v) (void)sock_hash_seq_show(seq, NULL); /* pairs with sock_hash_seq_start */ rcu_read_unlock(); } static const struct seq_operations sock_hash_seq_ops = { .start = sock_hash_seq_start, .next = sock_hash_seq_next, .stop = sock_hash_seq_stop, .show = sock_hash_seq_show, }; static int sock_hash_init_seq_private(void *priv_data, struct bpf_iter_aux_info *aux) { struct sock_hash_seq_info *info = priv_data; bpf_map_inc_with_uref(aux->map); info->map = aux->map; info->htab = container_of(aux->map, struct bpf_shtab, map); return 0; } static void sock_hash_fini_seq_private(void *priv_data) { struct sock_hash_seq_info *info = priv_data; bpf_map_put_with_uref(info->map); } static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { .seq_ops = &sock_hash_seq_ops, .init_seq_private = sock_hash_init_seq_private, .fini_seq_private = sock_hash_fini_seq_private, .seq_priv_size = sizeof(struct sock_hash_seq_info), }; static int sock_hash_map_btf_id; const struct bpf_map_ops sock_hash_ops = { .map_meta_equal = bpf_map_meta_equal, .map_alloc = sock_hash_alloc, .map_free = sock_hash_free, .map_get_next_key = sock_hash_get_next_key, .map_update_elem = sock_map_update_elem, .map_delete_elem = sock_hash_delete_elem, .map_lookup_elem = sock_hash_lookup, .map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_release_uref = sock_hash_release_progs, .map_check_btf = map_check_no_btf, .map_btf_name = "bpf_shtab", .map_btf_id = &sock_hash_map_btf_id, .iter_seq_info = &sock_hash_iter_seq_info, }; static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) { switch (map->map_type) { case BPF_MAP_TYPE_SOCKMAP: return &container_of(map, struct bpf_stab, map)->progs; case BPF_MAP_TYPE_SOCKHASH: return &container_of(map, struct bpf_shtab, map)->progs; default: break; } return NULL; } static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); struct bpf_prog **pprog; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: pprog = &progs->msg_parser; break; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) case BPF_SK_SKB_STREAM_PARSER: pprog = &progs->stream_parser; break; #endif case BPF_SK_SKB_STREAM_VERDICT: if (progs->skb_verdict) return -EBUSY; pprog = &progs->stream_verdict; break; case BPF_SK_SKB_VERDICT: if (progs->stream_verdict) return -EBUSY; pprog = &progs->skb_verdict; break; default: return -EOPNOTSUPP; } if (old) return psock_replace_prog(pprog, prog, old); psock_set_prog(pprog, prog); return 0; } static void sock_map_unlink(struct sock *sk, struct sk_psock_link *link) { switch (link->map->map_type) { case BPF_MAP_TYPE_SOCKMAP: return sock_map_delete_from_link(link->map, sk, link->link_raw); case BPF_MAP_TYPE_SOCKHASH: return sock_hash_delete_from_link(link->map, sk, link->link_raw); default: break; } } static void sock_map_remove_links(struct sock *sk, struct sk_psock *psock) { struct sk_psock_link *link; while ((link = sk_psock_link_pop(psock))) { sock_map_unlink(sk, link); sk_psock_free_link(link); } } void sock_map_unhash(struct sock *sk) { void (*saved_unhash)(struct sock *sk); struct sk_psock *psock; rcu_read_lock(); psock = sk_psock(sk); if (unlikely(!psock)) { rcu_read_unlock(); saved_unhash = READ_ONCE(sk->sk_prot)->unhash; } else { saved_unhash = psock->saved_unhash; sock_map_remove_links(sk, psock); rcu_read_unlock(); } if (WARN_ON_ONCE(saved_unhash == sock_map_unhash)) return; if (saved_unhash) saved_unhash(sk); } EXPORT_SYMBOL_GPL(sock_map_unhash); void sock_map_destroy(struct sock *sk) { void (*saved_destroy)(struct sock *sk); struct sk_psock *psock; rcu_read_lock(); psock = sk_psock_get(sk); if (unlikely(!psock)) { rcu_read_unlock(); saved_destroy = READ_ONCE(sk->sk_prot)->destroy; } else { saved_destroy = psock->saved_destroy; sock_map_remove_links(sk, psock); rcu_read_unlock(); sk_psock_stop(psock); sk_psock_put(sk, psock); } if (WARN_ON_ONCE(saved_destroy == sock_map_destroy)) return; if (saved_destroy) saved_destroy(sk); } EXPORT_SYMBOL_GPL(sock_map_destroy); void sock_map_close(struct sock *sk, long timeout) { void (*saved_close)(struct sock *sk, long timeout); struct sk_psock *psock; lock_sock(sk); rcu_read_lock(); psock = sk_psock(sk); if (likely(psock)) { saved_close = psock->saved_close; sock_map_remove_links(sk, psock); psock = sk_psock_get(sk); if (unlikely(!psock)) goto no_psock; rcu_read_unlock(); sk_psock_stop(psock); release_sock(sk); cancel_delayed_work_sync(&psock->work); sk_psock_put(sk, psock); } else { saved_close = READ_ONCE(sk->sk_prot)->close; no_psock: rcu_read_unlock(); release_sock(sk); } /* Make sure we do not recurse. This is a bug. * Leak the socket instead of crashing on a stack overflow. */ if (WARN_ON_ONCE(saved_close == sock_map_close)) return; saved_close(sk, timeout); } EXPORT_SYMBOL_GPL(sock_map_close); static int sock_map_iter_attach_target(struct bpf_prog *prog, union bpf_iter_link_info *linfo, struct bpf_iter_aux_info *aux) { struct bpf_map *map; int err = -EINVAL; if (!linfo->map.map_fd) return -EBADF; map = bpf_map_get_with_uref(linfo->map.map_fd); if (IS_ERR(map)) return PTR_ERR(map); if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) goto put_map; if (prog->aux->max_rdonly_access > map->key_size) { err = -EACCES; goto put_map; } aux->map = map; return 0; put_map: bpf_map_put_with_uref(map); return err; } static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux) { bpf_map_put_with_uref(aux->map); } static struct bpf_iter_reg sock_map_iter_reg = { .target = "sockmap", .attach_target = sock_map_iter_attach_target, .detach_target = sock_map_iter_detach_target, .show_fdinfo = bpf_iter_map_show_fdinfo, .fill_link_info = bpf_iter_map_fill_link_info, .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__sockmap, key), PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY }, { offsetof(struct bpf_iter__sockmap, sk), PTR_TO_BTF_ID_OR_NULL }, }, }; static int __init bpf_sockmap_iter_init(void) { sock_map_iter_reg.ctx_arg_info[1].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK]; return bpf_iter_reg_target(&sock_map_iter_reg); } late_initcall(bpf_sockmap_iter_init); |
401 401 162 235 177 458 2 60 226 191 472 255 256 256 256 213 168 48 4 61 29 82 4 102 101 63 6 57 5 102 102 18 85 130 131 54 171 150 29 29 29 150 90 87 6 1 5 87 87 87 2 87 87 2 108 396 397 397 289 108 396 288 289 24 1 1 1 1 1 4 157 157 132 26 26 142 17 143 157 38 38 391 171 72 5 77 77 197 197 406 131 289 289 288 289 289 2 288 288 289 288 289 233 90 149 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 | // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Generic INET transport hashtables * * Authors: Lotsa people, from code originally in tcp */ #include <linux/module.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/vmalloc.h> #include <linux/memblock.h> #include <net/addrconf.h> #include <net/inet_connection_sock.h> #include <net/inet_hashtables.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/inet6_hashtables.h> #endif #include <net/secure_seq.h> #include <net/ip.h> #include <net/tcp.h> #include <net/sock_reuseport.h> u32 inet_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 inet_ehash_secret __read_mostly; net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, inet_ehash_secret + net_hash_mix(net)); } EXPORT_SYMBOL_GPL(inet_ehashfn); /* This function handles inet_sock, but also timewait and request sockets * for IPv4/IPv6. */ static u32 sk_ehashfn(const struct sock *sk) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6 && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) return inet6_ehashfn(sock_net(sk), &sk->sk_v6_rcv_saddr, sk->sk_num, &sk->sk_v6_daddr, sk->sk_dport); #endif return inet_ehashfn(sock_net(sk), sk->sk_rcv_saddr, sk->sk_num, sk->sk_daddr, sk->sk_dport); } /* * Allocate and initialize a new local port bind bucket. * The bindhash mutex for snum's hash chain must be held here. */ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, const unsigned short snum, int l3mdev) { struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC); if (tb) { write_pnet(&tb->ib_net, net); tb->l3mdev = l3mdev; tb->port = snum; tb->fastreuse = 0; tb->fastreuseport = 0; INIT_HLIST_HEAD(&tb->owners); hlist_add_head(&tb->node, &head->chain); } return tb; } /* * Caller must hold hashbucket lock for this tb with local BH disabled */ void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) { if (hlist_empty(&tb->owners)) { __hlist_del(&tb->node); kmem_cache_free(cachep, tb); } } void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, const unsigned short snum) { inet_sk(sk)->inet_num = snum; sk_add_bind_node(sk, &tb->owners); inet_csk(sk)->icsk_bind_hash = tb; } /* * Get rid of any references to a local port held by the given sock. */ static void __inet_put_port(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num, hashinfo->bhash_size); struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; struct inet_bind_bucket *tb; spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; __sk_del_bind_node(sk); inet_csk(sk)->icsk_bind_hash = NULL; inet_sk(sk)->inet_num = 0; inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); spin_unlock(&head->lock); } void inet_put_port(struct sock *sk) { local_bh_disable(); __inet_put_port(sk); local_bh_enable(); } EXPORT_SYMBOL(inet_put_port); int __inet_inherit_port(const struct sock *sk, struct sock *child) { struct inet_hashinfo *table = sk->sk_prot->h.hashinfo; unsigned short port = inet_sk(child)->inet_num; const int bhash = inet_bhashfn(sock_net(sk), port, table->bhash_size); struct inet_bind_hashbucket *head = &table->bhash[bhash]; struct inet_bind_bucket *tb; int l3mdev; spin_lock(&head->lock); tb = inet_csk(sk)->icsk_bind_hash; if (unlikely(!tb)) { spin_unlock(&head->lock); return -ENOENT; } if (tb->port != port) { l3mdev = inet_sk_bound_l3mdev(sk); /* NOTE: using tproxy and redirecting skbs to a proxy * on a different listener port breaks the assumption * that the listener socket's icsk_bind_hash is the same * as that of the child socket. We have to look up or * create a new bind bucket for the child here. */ inet_bind_bucket_for_each(tb, &head->chain) { if (net_eq(ib_net(tb), sock_net(sk)) && tb->l3mdev == l3mdev && tb->port == port) break; } if (!tb) { tb = inet_bind_bucket_create(table->bind_bucket_cachep, sock_net(sk), head, port, l3mdev); if (!tb) { spin_unlock(&head->lock); return -ENOMEM; } } inet_csk_update_fastreuse(tb, child); } inet_bind_hash(child, tb, port); spin_unlock(&head->lock); return 0; } EXPORT_SYMBOL_GPL(__inet_inherit_port); static struct inet_listen_hashbucket * inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) { u32 hash; #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) hash = ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, inet_sk(sk)->inet_num); else #endif hash = ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); return inet_lhash2_bucket(h, hash); } static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const __be32 daddr, const int dif, const int sdif) { int score = -1; if (net_eq(sock_net(sk), net) && sk->sk_num == hnum && !ipv6_only_sock(sk)) { if (sk->sk_rcv_saddr != daddr) return -1; if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif)) return -1; score = sk->sk_bound_dev_if ? 2 : 1; if (sk->sk_family == PF_INET) score++; if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id()) score++; } return score; } INDIRECT_CALLABLE_DECLARE(inet_ehashfn_t udp_ehashfn); struct sock *inet_lookup_reuseport(struct net *net, struct sock *sk, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, __be32 daddr, unsigned short hnum, inet_ehashfn_t *ehashfn) { struct sock *reuse_sk = NULL; u32 phash; if (sk->sk_reuseport) { phash = INDIRECT_CALL_2(ehashfn, udp_ehashfn, inet_ehashfn, net, daddr, hnum, saddr, sport); reuse_sk = reuseport_select_sock(sk, phash, skb, doff); } return reuse_sk; } EXPORT_SYMBOL_GPL(inet_lookup_reuseport); /* * Here are some nice properties to exploit here. The BSD API * does not allow a listening sock to specify the remote port nor the * remote address for the connection. So always assume those are both * wildcarded during the search since they can never be otherwise. */ /* called with rcu_read_lock() : No refcount taken on the socket */ static struct sock *inet_lhash2_lookup(struct net *net, struct inet_listen_hashbucket *ilb2, struct sk_buff *skb, int doff, const __be32 saddr, __be16 sport, const __be32 daddr, const unsigned short hnum, const int dif, const int sdif) { struct sock *sk, *result = NULL; struct hlist_nulls_node *node; int score, hiscore = 0; sk_nulls_for_each_rcu(sk, node, &ilb2->nulls_head) { score = compute_score(sk, net, hnum, daddr, dif, sdif); if (score > hiscore) { result = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, inet_ehashfn); if (result) return result; result = sk; hiscore = score; } } return result; } static inline struct sock *inet_lookup_run_bpf(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, __be32 daddr, u16 hnum) { struct sock *sk, *reuse_sk; bool no_reuseport; if (hashinfo != &tcp_hashinfo) return NULL; /* only TCP is supported */ no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport, daddr, hnum, &sk); if (no_reuseport || IS_ERR_OR_NULL(sk)) return sk; reuse_sk = inet_lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum, inet_ehashfn); if (reuse_sk) sk = reuse_sk; return sk; } struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be32 saddr, __be16 sport, const __be32 daddr, const unsigned short hnum, const int dif, const int sdif) { struct inet_listen_hashbucket *ilb2; struct sock *result = NULL; unsigned int hash2; /* Lookup redirect from BPF */ if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { result = inet_lookup_run_bpf(net, hashinfo, skb, doff, saddr, sport, daddr, hnum); if (result) goto done; } hash2 = ipv4_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); result = inet_lhash2_lookup(net, ilb2, skb, doff, saddr, sport, daddr, hnum, dif, sdif); if (result) goto done; /* Lookup lhash2 with INADDR_ANY */ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); result = inet_lhash2_lookup(net, ilb2, skb, doff, saddr, sport, htonl(INADDR_ANY), hnum, dif, sdif); done: if (IS_ERR(result)) return NULL; return result; } EXPORT_SYMBOL_GPL(__inet_lookup_listener); /* All sockets share common refcount, but have different destructors */ void sock_gen_put(struct sock *sk) { if (!refcount_dec_and_test(&sk->sk_refcnt)) return; if (sk->sk_state == TCP_TIME_WAIT) inet_twsk_free(inet_twsk(sk)); else if (sk->sk_state == TCP_NEW_SYN_RECV) reqsk_free(inet_reqsk(sk)); else sk_free(sk); } EXPORT_SYMBOL_GPL(sock_gen_put); void sock_edemux(struct sk_buff *skb) { sock_gen_put(skb->sk); } EXPORT_SYMBOL(sock_edemux); struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, const int dif, const int sdif) { INET_ADDR_COOKIE(acookie, saddr, daddr); const __portpair ports = INET_COMBINED_PORTS(sport, hnum); struct sock *sk; const struct hlist_nulls_node *node; /* Optimize here for direct hit, only listening connections can * have wildcards anyways. */ unsigned int hash = inet_ehashfn(net, daddr, hnum, saddr, sport); unsigned int slot = hash & hashinfo->ehash_mask; struct inet_ehash_bucket *head = &hashinfo->ehash[slot]; begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) continue; if (likely(INET_MATCH(net, sk, acookie, ports, dif, sdif))) { if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; if (unlikely(!INET_MATCH(net, sk, acookie, ports, dif, sdif))) { sock_gen_put(sk); goto begin; } goto found; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; out: sk = NULL; found: return sk; } EXPORT_SYMBOL_GPL(__inet_lookup_established); /* called with local bh disabled */ static int __inet_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, __u16 lport, struct inet_timewait_sock **twp) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_sock *inet = inet_sk(sk); __be32 daddr = inet->inet_rcv_saddr; __be32 saddr = inet->inet_daddr; int dif = sk->sk_bound_dev_if; struct net *net = sock_net(sk); int sdif = l3mdev_master_ifindex_by_index(net, dif); INET_ADDR_COOKIE(acookie, saddr, daddr); const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); spinlock_t *lock = inet_ehash_lockp(hinfo, hash); struct sock *sk2; const struct hlist_nulls_node *node; struct inet_timewait_sock *tw = NULL; spin_lock(lock); sk_nulls_for_each(sk2, node, &head->chain) { if (sk2->sk_hash != hash) continue; if (likely(INET_MATCH(net, sk2, acookie, ports, dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); if (twsk_unique(sk, sk2, twp)) break; } goto not_unique; } } /* Must record num and sport now. Otherwise we will see * in hash table socket with a funny identity. */ inet->inet_num = lport; inet->inet_sport = htons(lport); sk->sk_hash = hash; WARN_ON(!sk_unhashed(sk)); __sk_nulls_add_node_rcu(sk, &head->chain); if (tw) { sk_nulls_del_node_init_rcu((struct sock *)tw); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED); } spin_unlock(lock); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); if (twp) { *twp = tw; } else if (tw) { /* Silly. Should hash-dance instead... */ inet_twsk_deschedule_put(tw); } return 0; not_unique: spin_unlock(lock); return -EADDRNOTAVAIL; } static u64 inet_sk_port_offset(const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); return secure_ipv4_port_ephemeral(inet->inet_rcv_saddr, inet->inet_daddr, inet->inet_dport); } /* Searches for an exsiting socket in the ehash bucket list. * Returns true if found, false otherwise. */ static bool inet_ehash_lookup_by_sk(struct sock *sk, struct hlist_nulls_head *list) { const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num); const int sdif = sk->sk_bound_dev_if; const int dif = sk->sk_bound_dev_if; const struct hlist_nulls_node *node; struct net *net = sock_net(sk); struct sock *esk; INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr); sk_nulls_for_each_rcu(esk, node, list) { if (esk->sk_hash != sk->sk_hash) continue; if (sk->sk_family == AF_INET) { if (unlikely(INET_MATCH(net, esk, acookie, ports, dif, sdif))) { return true; } } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { if (unlikely(inet6_match(net, esk, &sk->sk_v6_daddr, &sk->sk_v6_rcv_saddr, ports, dif, sdif))) { return true; } } #endif } return false; } /* Insert a socket into ehash, and eventually remove another one * (The another one can be a SYN_RECV or TIMEWAIT) * If an existing socket already exists, socket sk is not inserted, * and sets found_dup_sk parameter to true. */ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct hlist_nulls_head *list; struct inet_ehash_bucket *head; spinlock_t *lock; bool ret = true; WARN_ON_ONCE(!sk_unhashed(sk)); sk->sk_hash = sk_ehashfn(sk); head = inet_ehash_bucket(hashinfo, sk->sk_hash); list = &head->chain; lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spin_lock(lock); if (osk) { WARN_ON_ONCE(sk->sk_hash != osk->sk_hash); ret = sk_nulls_del_node_init_rcu(osk); } else if (found_dup_sk) { *found_dup_sk = inet_ehash_lookup_by_sk(sk, list); if (*found_dup_sk) ret = false; } if (ret) __sk_nulls_add_node_rcu(sk, list); spin_unlock(lock); return ret; } bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk) { bool ok = inet_ehash_insert(sk, osk, found_dup_sk); if (ok) { sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); } else { this_cpu_inc(*sk->sk_prot->orphan_count); inet_sk_set_state(sk, TCP_CLOSE); sock_set_flag(sk, SOCK_DEAD); inet_csk_destroy_sock(sk); } return ok; } EXPORT_SYMBOL_GPL(inet_ehash_nolisten); static int inet_reuseport_add_sock(struct sock *sk, struct inet_listen_hashbucket *ilb) { struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash; const struct hlist_nulls_node *node; struct sock *sk2; kuid_t uid = sock_i_uid(sk); sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) { if (sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && sk2->sk_bound_dev_if == sk->sk_bound_dev_if && inet_csk(sk2)->icsk_bind_hash == tb && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) return reuseport_add_sock(sk, sk2, inet_rcv_saddr_any(sk)); } return reuseport_alloc(sk, inet_rcv_saddr_any(sk)); } int __inet_hash(struct sock *sk, struct sock *osk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_listen_hashbucket *ilb2; int err = 0; if (sk->sk_state != TCP_LISTEN) { local_bh_disable(); inet_ehash_nolisten(sk, osk, NULL); local_bh_enable(); return 0; } WARN_ON(!sk_unhashed(sk)); ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); spin_lock(&ilb2->lock); if (sk->sk_reuseport) { err = inet_reuseport_add_sock(sk, ilb2); if (err) goto unlock; } sock_set_flag(sk, SOCK_RCU_FREE); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) __sk_nulls_add_node_tail_rcu(sk, &ilb2->nulls_head); else __sk_nulls_add_node_rcu(sk, &ilb2->nulls_head); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); unlock: spin_unlock(&ilb2->lock); return err; } EXPORT_SYMBOL(__inet_hash); int inet_hash(struct sock *sk) { int err = 0; if (sk->sk_state != TCP_CLOSE) err = __inet_hash(sk, NULL); return err; } EXPORT_SYMBOL_GPL(inet_hash); void inet_unhash(struct sock *sk) { struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; if (sk_unhashed(sk)) return; if (sk->sk_state == TCP_LISTEN) { struct inet_listen_hashbucket *ilb2; ilb2 = inet_lhash2_bucket_sk(hashinfo, sk); /* Don't disable bottom halves while acquiring the lock to * avoid circular locking dependency on PREEMPT_RT. */ spin_lock(&ilb2->lock); if (sk_unhashed(sk)) { spin_unlock(&ilb2->lock); return; } if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_stop_listen_sock(sk); __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock(&ilb2->lock); } else { spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spin_lock_bh(lock); if (sk_unhashed(sk)) { spin_unlock_bh(lock); return; } __sk_nulls_del_node_init_rcu(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_unlock_bh(lock); } } EXPORT_SYMBOL_GPL(inet_unhash); /* RFC 6056 3.3.4. Algorithm 4: Double-Hash Port Selection Algorithm * Note that we use 32bit integers (vs RFC 'short integers') * because 2^16 is not a multiple of num_ephemeral and this * property might be used by clever attacker. * * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though * attacks were since demonstrated, thus we use 65536 by default instead * to really give more isolation and privacy, at the expense of 256kB * of kernel memory. */ #define INET_TABLE_PERTURB_SIZE (1 << CONFIG_INET_TABLE_PERTURB_ORDER) static u32 *table_perturb; int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u64 port_offset, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, struct inet_timewait_sock **)) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_timewait_sock *tw = NULL; struct inet_bind_hashbucket *head; int port = inet_sk(sk)->inet_num; struct net *net = sock_net(sk); struct inet_bind_bucket *tb; u32 remaining, offset; int ret, i, low, high; int l3mdev; u32 index; if (port) { local_bh_disable(); ret = check_established(death_row, sk, port, NULL); local_bh_enable(); return ret; } l3mdev = inet_sk_bound_l3mdev(sk); inet_get_local_port_range(net, &low, &high); high++; /* [32768, 60999] -> [32768, 61000[ */ remaining = high - low; if (likely(remaining > 1)) remaining &= ~1U; get_random_slow_once(table_perturb, INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb)); index = port_offset & (INET_TABLE_PERTURB_SIZE - 1); offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32); offset %= remaining; /* In first pass we try ports of @low parity. * inet_csk_get_port() does the opposite choice. */ offset &= ~1U; other_parity_scan: port = low + offset; for (i = 0; i < remaining; i += 2, port += 2) { if (unlikely(port >= high)) port -= remaining; if (inet_is_local_reserved_port(net, port)) continue; head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; spin_lock_bh(&head->lock); /* Does not bother with rcv_saddr checks, because * the established check is already unique enough. */ inet_bind_bucket_for_each(tb, &head->chain) { if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev && tb->port == port) { if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) goto next_port; WARN_ON(hlist_empty(&tb->owners)); if (!check_established(death_row, sk, port, &tw)) goto ok; goto next_port; } } tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net, head, port, l3mdev); if (!tb) { spin_unlock_bh(&head->lock); return -ENOMEM; } tb->fastreuse = -1; tb->fastreuseport = -1; goto ok; next_port: spin_unlock_bh(&head->lock); cond_resched(); } offset++; if ((offset & 1) && remaining > 1) goto other_parity_scan; return -EADDRNOTAVAIL; ok: /* Here we want to add a little bit of randomness to the next source * port that will be chosen. We use a max() with a random here so that * on low contention the randomness is maximal and on high contention * it may be inexistent. */ i = max_t(int, i, (prandom_u32() & 7) * 2); WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2); /* Head lock still held and bh's disabled */ inet_bind_hash(sk, tb, port); if (sk_unhashed(sk)) { inet_sk(sk)->inet_sport = htons(port); inet_ehash_nolisten(sk, (struct sock *)tw, NULL); } if (tw) inet_twsk_bind_unhash(tw, hinfo); spin_unlock(&head->lock); if (tw) inet_twsk_deschedule_put(tw); local_bh_enable(); return 0; } /* * Bind a port for a connect operation and hash it. */ int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { u64 port_offset = 0; if (!inet_sk(sk)->inet_num) port_offset = inet_sk_port_offset(sk); return __inet_hash_connect(death_row, sk, port_offset, __inet_check_established); } EXPORT_SYMBOL_GPL(inet_hash_connect); static void init_hashinfo_lhash2(struct inet_hashinfo *h) { int i; for (i = 0; i <= h->lhash2_mask; i++) { spin_lock_init(&h->lhash2[i].lock); INIT_HLIST_NULLS_HEAD(&h->lhash2[i].nulls_head, i + LISTENING_NULLS_BASE); } } void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, unsigned long numentries, int scale, unsigned long low_limit, unsigned long high_limit) { h->lhash2 = alloc_large_system_hash(name, sizeof(*h->lhash2), numentries, scale, 0, NULL, &h->lhash2_mask, low_limit, high_limit); init_hashinfo_lhash2(h); /* this one is used for source ports of outgoing connections */ table_perturb = alloc_large_system_hash("Table-perturb", sizeof(*table_perturb), INET_TABLE_PERTURB_SIZE, 0, 0, NULL, NULL, INET_TABLE_PERTURB_SIZE, INET_TABLE_PERTURB_SIZE); } int inet_hashinfo2_init_mod(struct inet_hashinfo *h) { h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL); if (!h->lhash2) return -ENOMEM; h->lhash2_mask = INET_LHTABLE_SIZE - 1; /* INET_LHTABLE_SIZE must be a power of 2 */ BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask); init_hashinfo_lhash2(h); return 0; } EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod); int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) { unsigned int locksz = sizeof(spinlock_t); unsigned int i, nblocks = 1; if (locksz != 0) { /* allocate 2 cache lines or at least one spinlock per cpu */ nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); /* no more locks than number of hash buckets */ nblocks = min(nblocks, hashinfo->ehash_mask + 1); hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); if (!hashinfo->ehash_locks) return -ENOMEM; for (i = 0; i < nblocks; i++) spin_lock_init(&hashinfo->ehash_locks[i]); } hashinfo->ehash_locks_mask = nblocks - 1; return 0; } EXPORT_SYMBOL_GPL(inet_ehash_locks_alloc); |
2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) B.A.T.M.A.N. contributors: * * Edo Monticelli, Antonio Quartulli */ #include "tp_meter.h" #include "main.h" #include <linux/atomic.h> #include <linux/build_bug.h> #include <linux/byteorder/generic.h> #include <linux/cache.h> #include <linux/compiler.h> #include <linux/err.h> #include <linux/etherdevice.h> #include <linux/gfp.h> #include <linux/if_ether.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kref.h> #include <linux/kthread.h> #include <linux/limits.h> #include <linux/list.h> #include <linux/minmax.h> #include <linux/netdevice.h> #include <linux/param.h> #include <linux/printk.h> #include <linux/random.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> #include "hard-interface.h" #include "log.h" #include "netlink.h" #include "originator.h" #include "send.h" /** * BATADV_TP_DEF_TEST_LENGTH - Default test length if not specified by the user * in milliseconds */ #define BATADV_TP_DEF_TEST_LENGTH 10000 /** * BATADV_TP_AWND - Advertised window by the receiver (in bytes) */ #define BATADV_TP_AWND 0x20000000 /** * BATADV_TP_RECV_TIMEOUT - Receiver activity timeout. If the receiver does not * get anything for such amount of milliseconds, the connection is killed */ #define BATADV_TP_RECV_TIMEOUT 1000 /** * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond * such amount of milliseconds, the receiver is considered unreachable and the * connection is killed */ #define BATADV_TP_MAX_RTO 30000 /** * BATADV_TP_FIRST_SEQ - First seqno of each session. The number is rather high * in order to immediately trigger a wrap around (test purposes) */ #define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000) /** * BATADV_TP_PLEN - length of the payload (data after the batadv_unicast header) * to simulate */ #define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \ sizeof(struct batadv_unicast_packet)) static u8 batadv_tp_prerandom[4096] __read_mostly; /** * batadv_tp_session_cookie() - generate session cookie based on session ids * @session: TP session identifier * @icmp_uid: icmp pseudo uid of the tp session * * Return: 32 bit tp_meter session cookie */ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid) { u32 cookie; cookie = icmp_uid << 16; cookie |= session[0] << 8; cookie |= session[1]; return cookie; } /** * batadv_tp_cwnd() - compute the new cwnd size * @base: base cwnd size value * @increment: the value to add to base to get the new size * @min: minimum cwnd value (usually MSS) * * Return the new cwnd size and ensure it does not exceed the Advertised * Receiver Window size. It is wrapped around safely. * For details refer to Section 3.1 of RFC5681 * * Return: new congestion window size in bytes */ static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min) { u32 new_size = base + increment; /* check for wrap-around */ if (new_size < base) new_size = (u32)ULONG_MAX; new_size = min_t(u32, new_size, BATADV_TP_AWND); return max_t(u32, new_size, min); } /** * batadv_tp_update_cwnd() - update the Congestion Windows * @tp_vars: the private data of the current TP meter session * @mss: maximum segment size of transmission * * 1) if the session is in Slow Start, the CWND has to be increased by 1 * MSS every unique received ACK * 2) if the session is in Congestion Avoidance, the CWND has to be * increased by MSS * MSS / CWND for every unique received ACK */ static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss) { spin_lock_bh(&tp_vars->cwnd_lock); /* slow start... */ if (tp_vars->cwnd <= tp_vars->ss_threshold) { tp_vars->dec_cwnd = 0; tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); spin_unlock_bh(&tp_vars->cwnd_lock); return; } /* increment CWND at least of 1 (section 3.1 of RFC5681) */ tp_vars->dec_cwnd += max_t(u32, 1U << 3, ((mss * mss) << 6) / (tp_vars->cwnd << 3)); if (tp_vars->dec_cwnd < (mss << 3)) { spin_unlock_bh(&tp_vars->cwnd_lock); return; } tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); tp_vars->dec_cwnd = 0; spin_unlock_bh(&tp_vars->cwnd_lock); } /** * batadv_tp_update_rto() - calculate new retransmission timeout * @tp_vars: the private data of the current TP meter session * @new_rtt: new roundtrip time in msec */ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars, u32 new_rtt) { long m = new_rtt; /* RTT update * Details in Section 2.2 and 2.3 of RFC6298 * * It's tricky to understand. Don't lose hair please. * Inspired by tcp_rtt_estimator() tcp_input.c */ if (tp_vars->srtt != 0) { m -= (tp_vars->srtt >> 3); /* m is now error in rtt est */ tp_vars->srtt += m; /* rtt = 7/8 srtt + 1/8 new */ if (m < 0) m = -m; m -= (tp_vars->rttvar >> 2); tp_vars->rttvar += m; /* mdev ~= 3/4 rttvar + 1/4 new */ } else { /* first measure getting in */ tp_vars->srtt = m << 3; /* take the measured time to be srtt */ tp_vars->rttvar = m << 1; /* new_rtt / 2 */ } /* rto = srtt + 4 * rttvar. * rttvar is scaled by 4, therefore doesn't need to be multiplied */ tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar; } /** * batadv_tp_batctl_notify() - send client status result to client * @reason: reason for tp meter session stop * @dst: destination of tp_meter session * @bat_priv: the bat priv with all the soft interface information * @start_time: start of transmission in jiffies * @total_sent: bytes acked to the receiver * @cookie: cookie of tp_meter session */ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason, const u8 *dst, struct batadv_priv *bat_priv, unsigned long start_time, u64 total_sent, u32 cookie) { u32 test_time; u8 result; u32 total_bytes; if (!batadv_tp_is_error(reason)) { result = BATADV_TP_REASON_COMPLETE; test_time = jiffies_to_msecs(jiffies - start_time); total_bytes = total_sent; } else { result = reason; test_time = 0; total_bytes = 0; } batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time, total_bytes, cookie); } /** * batadv_tp_batctl_error_notify() - send client error result to client * @reason: reason for tp meter session stop * @dst: destination of tp_meter session * @bat_priv: the bat priv with all the soft interface information * @cookie: cookie of tp_meter session */ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, const u8 *dst, struct batadv_priv *bat_priv, u32 cookie) { batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie); } /** * batadv_tp_list_find() - find a tp_vars object in the global list * @bat_priv: the bat priv with all the soft interface information * @dst: the other endpoint MAC address to look for * * Look for a tp_vars object matching dst as end_point and return it after * having increment the refcounter. Return NULL is not found * * Return: matching tp_vars or NULL when no tp_vars with @dst was found */ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv, const u8 *dst) { struct batadv_tp_vars *pos, *tp_vars = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) { if (!batadv_compare_eth(pos->other_end, dst)) continue; /* most of the time this function is invoked during the normal * process..it makes sens to pay more when the session is * finished and to speed the process up during the measurement */ if (unlikely(!kref_get_unless_zero(&pos->refcount))) continue; tp_vars = pos; break; } rcu_read_unlock(); return tp_vars; } /** * batadv_tp_list_find_session() - find tp_vars session object in the global * list * @bat_priv: the bat priv with all the soft interface information * @dst: the other endpoint MAC address to look for * @session: session identifier * * Look for a tp_vars object matching dst as end_point, session as tp meter * session and return it after having increment the refcounter. Return NULL * is not found * * Return: matching tp_vars or NULL when no tp_vars was found */ static struct batadv_tp_vars * batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst, const u8 *session) { struct batadv_tp_vars *pos, *tp_vars = NULL; rcu_read_lock(); hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) { if (!batadv_compare_eth(pos->other_end, dst)) continue; if (memcmp(pos->session, session, sizeof(pos->session)) != 0) continue; /* most of the time this function is invoked during the normal * process..it makes sense to pay more when the session is * finished and to speed the process up during the measurement */ if (unlikely(!kref_get_unless_zero(&pos->refcount))) continue; tp_vars = pos; break; } rcu_read_unlock(); return tp_vars; } /** * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for * free after rcu grace period * @ref: kref pointer of the batadv_tp_vars */ static void batadv_tp_vars_release(struct kref *ref) { struct batadv_tp_vars *tp_vars; struct batadv_tp_unacked *un, *safe; tp_vars = container_of(ref, struct batadv_tp_vars, refcount); /* lock should not be needed because this object is now out of any * context! */ spin_lock_bh(&tp_vars->unacked_lock); list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) { list_del(&un->list); kfree(un); } spin_unlock_bh(&tp_vars->unacked_lock); kfree_rcu(tp_vars, rcu); } /** * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly * release it * @tp_vars: the private data of the current TP meter session to be free'd */ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars) { if (!tp_vars) return; kref_put(&tp_vars->refcount, batadv_tp_vars_release); } /** * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer * @bat_priv: the bat priv with all the soft interface information * @tp_vars: the private data of the current TP meter session to cleanup */ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv, struct batadv_tp_vars *tp_vars) { cancel_delayed_work(&tp_vars->finish_work); spin_lock_bh(&tp_vars->bat_priv->tp_list_lock); hlist_del_rcu(&tp_vars->list); spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock); /* drop list reference */ batadv_tp_vars_put(tp_vars); atomic_dec(&tp_vars->bat_priv->tp_num); /* kill the timer and remove its reference */ del_timer_sync(&tp_vars->timer); /* the worker might have rearmed itself therefore we kill it again. Note * that if the worker should run again before invoking the following * del_timer(), it would not re-arm itself once again because the status * is OFF now */ del_timer(&tp_vars->timer); batadv_tp_vars_put(tp_vars); } /** * batadv_tp_sender_end() - print info about ended session and inform client * @bat_priv: the bat priv with all the soft interface information * @tp_vars: the private data of the current TP meter session */ static void batadv_tp_sender_end(struct batadv_priv *bat_priv, struct batadv_tp_vars *tp_vars) { u32 session_cookie; batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Test towards %pM finished..shutting down (reason=%d)\n", tp_vars->other_end, tp_vars->reason); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n", tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Final values: cwnd=%u ss_threshold=%u\n", tp_vars->cwnd, tp_vars->ss_threshold); session_cookie = batadv_tp_session_cookie(tp_vars->session, tp_vars->icmp_uid); batadv_tp_batctl_notify(tp_vars->reason, tp_vars->other_end, bat_priv, tp_vars->start_time, atomic64_read(&tp_vars->tot_sent), session_cookie); } /** * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully * @tp_vars: the private data of the current TP meter session * @reason: reason for tp meter session stop */ static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars, enum batadv_tp_meter_reason reason) { if (!atomic_dec_and_test(&tp_vars->sending)) return; tp_vars->reason = reason; } /** * batadv_tp_sender_finish() - stop sender session after test_length was reached * @work: delayed work reference of the related tp_vars */ static void batadv_tp_sender_finish(struct work_struct *work) { struct delayed_work *delayed_work; struct batadv_tp_vars *tp_vars; delayed_work = to_delayed_work(work); tp_vars = container_of(delayed_work, struct batadv_tp_vars, finish_work); batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE); } /** * batadv_tp_reset_sender_timer() - reschedule the sender timer * @tp_vars: the private TP meter data for this session * * Reschedule the timer using tp_vars->rto as delay */ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars) { /* most of the time this function is invoked while normal packet * reception... */ if (unlikely(atomic_read(&tp_vars->sending) == 0)) /* timer ref will be dropped in batadv_tp_sender_cleanup */ return; mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto)); } /** * batadv_tp_sender_timeout() - timer that fires in case of packet loss * @t: address to timer_list inside tp_vars * * If fired it means that there was packet loss. * Switch to Slow Start, set the ss_threshold to half of the current cwnd and * reset the cwnd to 3*MSS */ static void batadv_tp_sender_timeout(struct timer_list *t) { struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer); struct batadv_priv *bat_priv = tp_vars->bat_priv; if (atomic_read(&tp_vars->sending) == 0) return; /* if the user waited long enough...shutdown the test */ if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) { batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_DST_UNREACHABLE); return; } /* RTO exponential backoff * Details in Section 5.5 of RFC6298 */ tp_vars->rto <<= 1; spin_lock_bh(&tp_vars->cwnd_lock); tp_vars->ss_threshold = tp_vars->cwnd >> 1; if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2) tp_vars->ss_threshold = BATADV_TP_PLEN * 2; batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n", tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold, atomic_read(&tp_vars->last_acked)); tp_vars->cwnd = BATADV_TP_PLEN * 3; spin_unlock_bh(&tp_vars->cwnd_lock); /* resend the non-ACKed packets.. */ tp_vars->last_sent = atomic_read(&tp_vars->last_acked); wake_up(&tp_vars->more_bytes); batadv_tp_reset_sender_timer(tp_vars); } /** * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes * @tp_vars: the private TP meter data for this session * @buf: Buffer to fill with bytes * @nbytes: amount of pseudorandom bytes */ static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars, u8 *buf, size_t nbytes) { u32 local_offset; size_t bytes_inbuf; size_t to_copy; size_t pos = 0; spin_lock_bh(&tp_vars->prerandom_lock); local_offset = tp_vars->prerandom_offset; tp_vars->prerandom_offset += nbytes; tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom); spin_unlock_bh(&tp_vars->prerandom_lock); while (nbytes) { local_offset %= sizeof(batadv_tp_prerandom); bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset; to_copy = min(nbytes, bytes_inbuf); memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy); pos += to_copy; nbytes -= to_copy; local_offset = 0; } } /** * batadv_tp_send_msg() - send a single message * @tp_vars: the private TP meter data for this session * @src: source mac address * @orig_node: the originator of the destination * @seqno: sequence number of this packet * @len: length of the entire packet * @session: session identifier * @uid: local ICMP "socket" index * @timestamp: timestamp in jiffies which is replied in ack * * Create and send a single TP Meter message. * * Return: 0 on success, BATADV_TP_REASON_DST_UNREACHABLE if the destination is * not reachable, BATADV_TP_REASON_MEMORY_ERROR if the packet couldn't be * allocated */ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src, struct batadv_orig_node *orig_node, u32 seqno, size_t len, const u8 *session, int uid, u32 timestamp) { struct batadv_icmp_tp_packet *icmp; struct sk_buff *skb; int r; u8 *data; size_t data_len; skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN); if (unlikely(!skb)) return BATADV_TP_REASON_MEMORY_ERROR; skb_reserve(skb, ETH_HLEN); icmp = skb_put(skb, sizeof(*icmp)); /* fill the icmp header */ ether_addr_copy(icmp->dst, orig_node->orig); ether_addr_copy(icmp->orig, src); icmp->version = BATADV_COMPAT_VERSION; icmp->packet_type = BATADV_ICMP; icmp->ttl = BATADV_TTL; icmp->msg_type = BATADV_TP; icmp->uid = uid; icmp->subtype = BATADV_TP_MSG; memcpy(icmp->session, session, sizeof(icmp->session)); icmp->seqno = htonl(seqno); icmp->timestamp = htonl(timestamp); data_len = len - sizeof(*icmp); data = skb_put(skb, data_len); batadv_tp_fill_prerandom(tp_vars, data, data_len); r = batadv_send_skb_to_orig(skb, orig_node, NULL); if (r == NET_XMIT_SUCCESS) return 0; return BATADV_TP_REASON_CANT_SEND; } /** * batadv_tp_recv_ack() - ACK receiving function * @bat_priv: the bat priv with all the soft interface information * @skb: the buffer containing the received packet * * Process a received TP ACK packet */ static void batadv_tp_recv_ack(struct batadv_priv *bat_priv, const struct sk_buff *skb) { struct batadv_hard_iface *primary_if = NULL; struct batadv_orig_node *orig_node = NULL; const struct batadv_icmp_tp_packet *icmp; struct batadv_tp_vars *tp_vars; size_t packet_len, mss; u32 rtt, recv_ack, cwnd; unsigned char *dev_addr; packet_len = BATADV_TP_PLEN; mss = BATADV_TP_PLEN; packet_len += sizeof(struct batadv_unicast_packet); icmp = (struct batadv_icmp_tp_packet *)skb->data; /* find the tp_vars */ tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig, icmp->session); if (unlikely(!tp_vars)) return; if (unlikely(atomic_read(&tp_vars->sending) == 0)) goto out; /* old ACK? silently drop it.. */ if (batadv_seq_before(ntohl(icmp->seqno), (u32)atomic_read(&tp_vars->last_acked))) goto out; primary_if = batadv_primary_if_get_selected(bat_priv); if (unlikely(!primary_if)) goto out; orig_node = batadv_orig_hash_find(bat_priv, icmp->orig); if (unlikely(!orig_node)) goto out; /* update RTO with the new sampled RTT, if any */ rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp); if (icmp->timestamp && rtt) batadv_tp_update_rto(tp_vars, rtt); /* ACK for new data... reset the timer */ batadv_tp_reset_sender_timer(tp_vars); recv_ack = ntohl(icmp->seqno); /* check if this ACK is a duplicate */ if (atomic_read(&tp_vars->last_acked) == recv_ack) { atomic_inc(&tp_vars->dup_acks); if (atomic_read(&tp_vars->dup_acks) != 3) goto out; if (recv_ack >= tp_vars->recover) goto out; /* if this is the third duplicate ACK do Fast Retransmit */ batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr, orig_node, recv_ack, packet_len, icmp->session, icmp->uid, jiffies_to_msecs(jiffies)); spin_lock_bh(&tp_vars->cwnd_lock); /* Fast Recovery */ tp_vars->fast_recovery = true; /* Set recover to the last outstanding seqno when Fast Recovery * is entered. RFC6582, Section 3.2, step 1 */ tp_vars->recover = tp_vars->last_sent; tp_vars->ss_threshold = tp_vars->cwnd >> 1; batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n", tp_vars->cwnd, tp_vars->ss_threshold, tp_vars->last_sent, recv_ack); tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss, mss); tp_vars->dec_cwnd = 0; tp_vars->last_sent = recv_ack; spin_unlock_bh(&tp_vars->cwnd_lock); } else { /* count the acked data */ atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked), &tp_vars->tot_sent); /* reset the duplicate ACKs counter */ atomic_set(&tp_vars->dup_acks, 0); if (tp_vars->fast_recovery) { /* partial ACK */ if (batadv_seq_before(recv_ack, tp_vars->recover)) { /* this is another hole in the window. React * immediately as specified by NewReno (see * Section 3.2 of RFC6582 for details) */ dev_addr = primary_if->net_dev->dev_addr; batadv_tp_send_msg(tp_vars, dev_addr, orig_node, recv_ack, packet_len, icmp->session, icmp->uid, jiffies_to_msecs(jiffies)); tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); } else { tp_vars->fast_recovery = false; /* set cwnd to the value of ss_threshold at the * moment that Fast Recovery was entered. * RFC6582, Section 3.2, step 3 */ cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0, mss); tp_vars->cwnd = cwnd; } goto move_twnd; } if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss) batadv_tp_update_cwnd(tp_vars, mss); move_twnd: /* move the Transmit Window */ atomic_set(&tp_vars->last_acked, recv_ack); } wake_up(&tp_vars->more_bytes); out: batadv_hardif_put(primary_if); batadv_orig_node_put(orig_node); batadv_tp_vars_put(tp_vars); } /** * batadv_tp_avail() - check if congestion window is not full * @tp_vars: the private data of the current TP meter session * @payload_len: size of the payload of a single message * * Return: true when congestion window is not full, false otherwise */ static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars, size_t payload_len) { u32 win_left, win_limit; win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd; win_left = win_limit - tp_vars->last_sent; return win_left >= payload_len; } /** * batadv_tp_wait_available() - wait until congestion window becomes free or * timeout is reached * @tp_vars: the private data of the current TP meter session * @plen: size of the payload of a single message * * Return: 0 if the condition evaluated to false after the timeout elapsed, * 1 if the condition evaluated to true after the timeout elapsed, the * remaining jiffies (at least 1) if the condition evaluated to true before * the timeout elapsed, or -ERESTARTSYS if it was interrupted by a signal. */ static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen) { int ret; ret = wait_event_interruptible_timeout(tp_vars->more_bytes, batadv_tp_avail(tp_vars, plen), HZ / 10); return ret; } /** * batadv_tp_send() - main sending thread of a tp meter session * @arg: address of the related tp_vars * * Return: nothing, this function never returns */ static int batadv_tp_send(void *arg) { struct batadv_tp_vars *tp_vars = arg; struct batadv_priv *bat_priv = tp_vars->bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_orig_node *orig_node = NULL; size_t payload_len, packet_len; int err = 0; if (unlikely(tp_vars->role != BATADV_TP_SENDER)) { err = BATADV_TP_REASON_DST_UNREACHABLE; tp_vars->reason = err; goto out; } orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end); if (unlikely(!orig_node)) { err = BATADV_TP_REASON_DST_UNREACHABLE; tp_vars->reason = err; goto out; } primary_if = batadv_primary_if_get_selected(bat_priv); if (unlikely(!primary_if)) { err = BATADV_TP_REASON_DST_UNREACHABLE; tp_vars->reason = err; goto out; } /* assume that all the hard_interfaces have a correctly * configured MTU, so use the soft_iface MTU as MSS. * This might not be true and in that case the fragmentation * should be used. * Now, try to send the packet as it is */ payload_len = BATADV_TP_PLEN; BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN); batadv_tp_reset_sender_timer(tp_vars); /* queue the worker in charge of terminating the test */ queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work, msecs_to_jiffies(tp_vars->test_length)); while (atomic_read(&tp_vars->sending) != 0) { if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) { batadv_tp_wait_available(tp_vars, payload_len); continue; } /* to emulate normal unicast traffic, add to the payload len * the size of the unicast header */ packet_len = payload_len + sizeof(struct batadv_unicast_packet); err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr, orig_node, tp_vars->last_sent, packet_len, tp_vars->session, tp_vars->icmp_uid, jiffies_to_msecs(jiffies)); /* something went wrong during the preparation/transmission */ if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: %s() cannot send packets (%d)\n", __func__, err); /* ensure nobody else tries to stop the thread now */ if (atomic_dec_and_test(&tp_vars->sending)) tp_vars->reason = err; break; } /* right-shift the TWND */ if (!err) tp_vars->last_sent += payload_len; cond_resched(); } out: batadv_hardif_put(primary_if); batadv_orig_node_put(orig_node); batadv_tp_sender_end(bat_priv, tp_vars); batadv_tp_sender_cleanup(bat_priv, tp_vars); batadv_tp_vars_put(tp_vars); do_exit(0); } /** * batadv_tp_start_kthread() - start new thread which manages the tp meter * sender * @tp_vars: the private data of the current TP meter session */ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars) { struct task_struct *kthread; struct batadv_priv *bat_priv = tp_vars->bat_priv; u32 session_cookie; kref_get(&tp_vars->refcount); kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter"); if (IS_ERR(kthread)) { session_cookie = batadv_tp_session_cookie(tp_vars->session, tp_vars->icmp_uid); pr_err("batadv: cannot create tp meter kthread\n"); batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR, tp_vars->other_end, bat_priv, session_cookie); /* drop reserved reference for kthread */ batadv_tp_vars_put(tp_vars); /* cleanup of failed tp meter variables */ batadv_tp_sender_cleanup(bat_priv, tp_vars); return; } wake_up_process(kthread); } /** * batadv_tp_start() - start a new tp meter session * @bat_priv: the bat priv with all the soft interface information * @dst: the receiver MAC address * @test_length: test length in milliseconds * @cookie: session cookie */ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst, u32 test_length, u32 *cookie) { struct batadv_tp_vars *tp_vars; u8 session_id[2]; u8 icmp_uid; u32 session_cookie; get_random_bytes(session_id, sizeof(session_id)); get_random_bytes(&icmp_uid, 1); session_cookie = batadv_tp_session_cookie(session_id, icmp_uid); *cookie = session_cookie; /* look for an already existing test towards this node */ spin_lock_bh(&bat_priv->tp_list_lock); tp_vars = batadv_tp_list_find(bat_priv, dst); if (tp_vars) { spin_unlock_bh(&bat_priv->tp_list_lock); batadv_tp_vars_put(tp_vars); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: test to or from the same node already ongoing, aborting\n"); batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING, dst, bat_priv, session_cookie); return; } if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) { spin_unlock_bh(&bat_priv->tp_list_lock); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: too many ongoing sessions, aborting (SEND)\n"); batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst, bat_priv, session_cookie); return; } tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC); if (!tp_vars) { spin_unlock_bh(&bat_priv->tp_list_lock); batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: %s cannot allocate list elements\n", __func__); batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR, dst, bat_priv, session_cookie); return; } /* initialize tp_vars */ ether_addr_copy(tp_vars->other_end, dst); kref_init(&tp_vars->refcount); tp_vars->role = BATADV_TP_SENDER; atomic_set(&tp_vars->sending, 1); memcpy(tp_vars->session, session_id, sizeof(session_id)); tp_vars->icmp_uid = icmp_uid; tp_vars->last_sent = BATADV_TP_FIRST_SEQ; atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ); tp_vars->fast_recovery = false; tp_vars->recover = BATADV_TP_FIRST_SEQ; /* initialise the CWND to 3*MSS (Section 3.1 in RFC5681). * For batman-adv the MSS is the size of the payload received by the * soft_interface, hence its MTU */ tp_vars->cwnd = BATADV_TP_PLEN * 3; /* at the beginning initialise the SS threshold to the biggest possible * window size, hence the AWND size */ tp_vars->ss_threshold = BATADV_TP_AWND; /* RTO initial value is 3 seconds. * Details in Section 2.1 of RFC6298 */ tp_vars->rto = 1000; tp_vars->srtt = 0; tp_vars->rttvar = 0; atomic64_set(&tp_vars->tot_sent, 0); kref_get(&tp_vars->refcount); timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0); tp_vars->bat_priv = bat_priv; tp_vars->start_time = jiffies; init_waitqueue_head(&tp_vars->more_bytes); spin_lock_init(&tp_vars->unacked_lock); INIT_LIST_HEAD(&tp_vars->unacked_list); spin_lock_init(&tp_vars->cwnd_lock); tp_vars->prerandom_offset = 0; spin_lock_init(&tp_vars->prerandom_lock); kref_get(&tp_vars->refcount); hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list); spin_unlock_bh(&bat_priv->tp_list_lock); tp_vars->test_length = test_length; if (!tp_vars->test_length) tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH; batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: starting throughput meter towards %pM (length=%ums)\n", dst, test_length); /* init work item for finished tp tests */ INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish); /* start tp kthread. This way the write() call issued from userspace can * happily return and avoid to block */ batadv_tp_start_kthread(tp_vars); /* don't return reference to new tp_vars */ batadv_tp_vars_put(tp_vars); } /** * batadv_tp_stop() - stop currently running tp meter session * @bat_priv: the bat priv with all the soft interface information * @dst: the receiver MAC address * @return_value: reason for tp meter session stop */ void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst, u8 return_value) { struct batadv_orig_node *orig_node; struct batadv_tp_vars *tp_vars; batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: stopping test towards %pM\n", dst); orig_node = batadv_orig_hash_find(bat_priv, dst); if (!orig_node) return; tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig); if (!tp_vars) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: trying to interrupt an already over connection\n"); goto out; } batadv_tp_sender_shutdown(tp_vars, return_value); batadv_tp_vars_put(tp_vars); out: batadv_orig_node_put(orig_node); } /** * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer * @tp_vars: the private data of the current TP meter session * * start the receiver shutdown timer or reset it if already started */ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars) { mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT)); } /** * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is * reached without received ack * @t: address to timer_list inside tp_vars */ static void batadv_tp_receiver_shutdown(struct timer_list *t) { struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer); struct batadv_tp_unacked *un, *safe; struct batadv_priv *bat_priv; bat_priv = tp_vars->bat_priv; /* if there is recent activity rearm the timer */ if (!batadv_has_timed_out(tp_vars->last_recv_time, BATADV_TP_RECV_TIMEOUT)) { /* reset the receiver shutdown timer */ batadv_tp_reset_receiver_timer(tp_vars); return; } batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Shutting down for inactivity (more than %dms) from %pM\n", BATADV_TP_RECV_TIMEOUT, tp_vars->other_end); spin_lock_bh(&tp_vars->bat_priv->tp_list_lock); hlist_del_rcu(&tp_vars->list); spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock); /* drop list reference */ batadv_tp_vars_put(tp_vars); atomic_dec(&bat_priv->tp_num); spin_lock_bh(&tp_vars->unacked_lock); list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) { list_del(&un->list); kfree(un); } spin_unlock_bh(&tp_vars->unacked_lock); /* drop reference of timer */ batadv_tp_vars_put(tp_vars); } /** * batadv_tp_send_ack() - send an ACK packet * @bat_priv: the bat priv with all the soft interface information * @dst: the mac address of the destination originator * @seq: the sequence number to ACK * @timestamp: the timestamp to echo back in the ACK * @session: session identifier * @socket_index: local ICMP socket identifier * * Return: 0 on success, a positive integer representing the reason of the * failure otherwise */ static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst, u32 seq, __be32 timestamp, const u8 *session, int socket_index) { struct batadv_hard_iface *primary_if = NULL; struct batadv_orig_node *orig_node; struct batadv_icmp_tp_packet *icmp; struct sk_buff *skb; int r, ret; orig_node = batadv_orig_hash_find(bat_priv, dst); if (unlikely(!orig_node)) { ret = BATADV_TP_REASON_DST_UNREACHABLE; goto out; } primary_if = batadv_primary_if_get_selected(bat_priv); if (unlikely(!primary_if)) { ret = BATADV_TP_REASON_DST_UNREACHABLE; goto out; } skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN); if (unlikely(!skb)) { ret = BATADV_TP_REASON_MEMORY_ERROR; goto out; } skb_reserve(skb, ETH_HLEN); icmp = skb_put(skb, sizeof(*icmp)); icmp->packet_type = BATADV_ICMP; icmp->version = BATADV_COMPAT_VERSION; icmp->ttl = BATADV_TTL; icmp->msg_type = BATADV_TP; ether_addr_copy(icmp->dst, orig_node->orig); ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr); icmp->uid = socket_index; icmp->subtype = BATADV_TP_ACK; memcpy(icmp->session, session, sizeof(icmp->session)); icmp->seqno = htonl(seq); icmp->timestamp = timestamp; /* send the ack */ r = batadv_send_skb_to_orig(skb, orig_node, NULL); if (unlikely(r < 0) || r == NET_XMIT_DROP) { ret = BATADV_TP_REASON_DST_UNREACHABLE; goto out; } ret = 0; out: batadv_orig_node_put(orig_node); batadv_hardif_put(primary_if); return ret; } /** * batadv_tp_handle_out_of_order() - store an out of order packet * @tp_vars: the private data of the current TP meter session * @skb: the buffer containing the received packet * * Store the out of order packet in the unacked list for late processing. This * packets are kept in this list so that they can be ACKed at once as soon as * all the previous packets have been received * * Return: true if the packed has been successfully processed, false otherwise */ static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars, const struct sk_buff *skb) { const struct batadv_icmp_tp_packet *icmp; struct batadv_tp_unacked *un, *new; u32 payload_len; bool added = false; new = kmalloc(sizeof(*new), GFP_ATOMIC); if (unlikely(!new)) return false; icmp = (struct batadv_icmp_tp_packet *)skb->data; new->seqno = ntohl(icmp->seqno); payload_len = skb->len - sizeof(struct batadv_unicast_packet); new->len = payload_len; spin_lock_bh(&tp_vars->unacked_lock); /* if the list is empty immediately attach this new object */ if (list_empty(&tp_vars->unacked_list)) { list_add(&new->list, &tp_vars->unacked_list); goto out; } /* otherwise loop over the list and either drop the packet because this * is a duplicate or store it at the right position. * * The iteration is done in the reverse way because it is likely that * the last received packet (the one being processed now) has a bigger * seqno than all the others already stored. */ list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) { /* check for duplicates */ if (new->seqno == un->seqno) { if (new->len > un->len) un->len = new->len; kfree(new); added = true; break; } /* look for the right position */ if (batadv_seq_before(new->seqno, un->seqno)) continue; /* as soon as an entry having a bigger seqno is found, the new * one is attached _after_ it. In this way the list is kept in * ascending order */ list_add_tail(&new->list, &un->list); added = true; break; } /* received packet with smallest seqno out of order; add it to front */ if (!added) list_add(&new->list, &tp_vars->unacked_list); out: spin_unlock_bh(&tp_vars->unacked_lock); return true; } /** * batadv_tp_ack_unordered() - update number received bytes in current stream * without gaps * @tp_vars: the private data of the current TP meter session */ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars) { struct batadv_tp_unacked *un, *safe; u32 to_ack; /* go through the unacked packet list and possibly ACK them as * well */ spin_lock_bh(&tp_vars->unacked_lock); list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) { /* the list is ordered, therefore it is possible to stop as soon * there is a gap between the last acked seqno and the seqno of * the packet under inspection */ if (batadv_seq_before(tp_vars->last_recv, un->seqno)) break; to_ack = un->seqno + un->len - tp_vars->last_recv; if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len)) tp_vars->last_recv += to_ack; list_del(&un->list); kfree(un); } spin_unlock_bh(&tp_vars->unacked_lock); } /** * batadv_tp_init_recv() - return matching or create new receiver tp_vars * @bat_priv: the bat priv with all the soft interface information * @icmp: received icmp tp msg * * Return: corresponding tp_vars or NULL on errors */ static struct batadv_tp_vars * batadv_tp_init_recv(struct batadv_priv *bat_priv, const struct batadv_icmp_tp_packet *icmp) { struct batadv_tp_vars *tp_vars; spin_lock_bh(&bat_priv->tp_list_lock); tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig, icmp->session); if (tp_vars) goto out_unlock; if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: too many ongoing sessions, aborting (RECV)\n"); goto out_unlock; } tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC); if (!tp_vars) goto out_unlock; ether_addr_copy(tp_vars->other_end, icmp->orig); tp_vars->role = BATADV_TP_RECEIVER; memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session)); tp_vars->last_recv = BATADV_TP_FIRST_SEQ; tp_vars->bat_priv = bat_priv; kref_init(&tp_vars->refcount); spin_lock_init(&tp_vars->unacked_lock); INIT_LIST_HEAD(&tp_vars->unacked_list); kref_get(&tp_vars->refcount); hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list); kref_get(&tp_vars->refcount); timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0); batadv_tp_reset_receiver_timer(tp_vars); out_unlock: spin_unlock_bh(&bat_priv->tp_list_lock); return tp_vars; } /** * batadv_tp_recv_msg() - process a single data message * @bat_priv: the bat priv with all the soft interface information * @skb: the buffer containing the received packet * * Process a received TP MSG packet */ static void batadv_tp_recv_msg(struct batadv_priv *bat_priv, const struct sk_buff *skb) { const struct batadv_icmp_tp_packet *icmp; struct batadv_tp_vars *tp_vars; size_t packet_size; u32 seqno; icmp = (struct batadv_icmp_tp_packet *)skb->data; seqno = ntohl(icmp->seqno); /* check if this is the first seqno. This means that if the * first packet is lost, the tp meter does not work anymore! */ if (seqno == BATADV_TP_FIRST_SEQ) { tp_vars = batadv_tp_init_recv(bat_priv, icmp); if (!tp_vars) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n"); goto out; } } else { tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig, icmp->session); if (!tp_vars) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Unexpected packet from %pM!\n", icmp->orig); goto out; } } if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) { batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Meter: dropping packet: not expected (role=%u)\n", tp_vars->role); goto out; } tp_vars->last_recv_time = jiffies; /* if the packet is a duplicate, it may be the case that an ACK has been * lost. Resend the ACK */ if (batadv_seq_before(seqno, tp_vars->last_recv)) goto send_ack; /* if the packet is out of order enqueue it */ if (ntohl(icmp->seqno) != tp_vars->last_recv) { /* exit immediately (and do not send any ACK) if the packet has * not been enqueued correctly */ if (!batadv_tp_handle_out_of_order(tp_vars, skb)) goto out; /* send a duplicate ACK */ goto send_ack; } /* if everything was fine count the ACKed bytes */ packet_size = skb->len - sizeof(struct batadv_unicast_packet); tp_vars->last_recv += packet_size; /* check if this ordered message filled a gap.... */ batadv_tp_ack_unordered(tp_vars); send_ack: /* send the ACK. If the received packet was out of order, the ACK that * is going to be sent is a duplicate (the sender will count them and * possibly enter Fast Retransmit as soon as it has reached 3) */ batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv, icmp->timestamp, icmp->session, icmp->uid); out: batadv_tp_vars_put(tp_vars); } /** * batadv_tp_meter_recv() - main TP Meter receiving function * @bat_priv: the bat priv with all the soft interface information * @skb: the buffer containing the received packet */ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb) { struct batadv_icmp_tp_packet *icmp; icmp = (struct batadv_icmp_tp_packet *)skb->data; switch (icmp->subtype) { case BATADV_TP_MSG: batadv_tp_recv_msg(bat_priv, skb); break; case BATADV_TP_ACK: batadv_tp_recv_ack(bat_priv, skb); break; default: batadv_dbg(BATADV_DBG_TP_METER, bat_priv, "Received unknown TP Metric packet type %u\n", icmp->subtype); } consume_skb(skb); } /** * batadv_tp_meter_init() - initialize global tp_meter structures */ void __init batadv_tp_meter_init(void) { get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom)); } |
1 1 1 1 382 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_DESC_H #define _ASM_X86_DESC_H #include <asm/desc_defs.h> #include <asm/ldt.h> #include <asm/mmu.h> #include <asm/fixmap.h> #include <asm/irq_vectors.h> #include <asm/cpu_entry_area.h> #include <linux/debug_locks.h> #include <linux/smp.h> #include <linux/percpu.h> static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) { desc->limit0 = info->limit & 0x0ffff; desc->base0 = (info->base_addr & 0x0000ffff); desc->base1 = (info->base_addr & 0x00ff0000) >> 16; desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; /* Set the ACCESS bit so it can be mapped RO */ desc->type |= 1; desc->s = 1; desc->dpl = 0x3; desc->p = info->seg_not_present ^ 1; desc->limit1 = (info->limit & 0xf0000) >> 16; desc->avl = info->useable; desc->d = info->seg_32bit; desc->g = info->limit_in_pages; desc->base2 = (info->base_addr & 0xff000000) >> 24; /* * Don't allow setting of the lm bit. It would confuse * user_64bit_mode and would get overridden by sysret anyway. */ desc->l = 0; } struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); /* Provide the original GDT */ static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu) { return per_cpu(gdt_page, cpu).gdt; } /* Provide the current original GDT */ static inline struct desc_struct *get_current_gdt_rw(void) { return this_cpu_ptr(&gdt_page)->gdt; } /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ static inline struct desc_struct *get_current_gdt_ro(void) { return get_cpu_gdt_ro(smp_processor_id()); } /* Provide the physical address of the GDT page. */ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu) { return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu)); } static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, unsigned dpl, unsigned ist, unsigned seg) { gate->offset_low = (u16) func; gate->bits.p = 1; gate->bits.dpl = dpl; gate->bits.zero = 0; gate->bits.type = type; gate->offset_middle = (u16) (func >> 16); #ifdef CONFIG_X86_64 gate->segment = __KERNEL_CS; gate->bits.ist = ist; gate->reserved = 0; gate->offset_high = (u32) (func >> 32); #else gate->segment = seg; gate->bits.ist = 0; #endif } static inline int desc_empty(const void *ptr) { const u32 *desc = ptr; return !(desc[0] | desc[1]); } #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else #define load_TR_desc() native_load_tr_desc() #define load_gdt(dtr) native_load_gdt(dtr) #define load_idt(dtr) native_load_idt(dtr) #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) #define store_tr(tr) (tr = native_store_tr()) #define load_TLS(t, cpu) native_load_tls(t, cpu) #define set_ldt native_set_ldt #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc) #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type) #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { } static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) { } #endif /* CONFIG_PARAVIRT_XXL */ #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { memcpy(&idt[entry], gate, sizeof(*gate)); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { memcpy(&ldt[entry], desc, 8); } static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { unsigned int size; switch (type) { case DESC_TSS: size = sizeof(tss_desc); break; case DESC_LDT: size = sizeof(ldt_desc); break; default: size = sizeof(*gdt); break; } memcpy(&gdt[entry], desc, size); } static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size) { struct ldttss_desc *desc = d; memset(desc, 0, sizeof(*desc)); desc->limit0 = (u16) size; desc->base0 = (u16) addr; desc->base1 = (addr >> 16) & 0xFF; desc->type = type; desc->p = 1; desc->limit1 = (size >> 16) & 0xF; desc->base2 = (addr >> 24) & 0xFF; #ifdef CONFIG_X86_64 desc->base3 = (u32) (addr >> 32); #endif } static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, __KERNEL_TSS_LIMIT); write_gdt_entry(d, entry, &tss, DESC_TSS); } #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, entries * LDT_ENTRY_SIZE - 1); write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } } static inline void native_load_gdt(const struct desc_ptr *dtr) { asm volatile("lgdt %0"::"m" (*dtr)); } static __always_inline void native_load_idt(const struct desc_ptr *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } static inline void native_store_gdt(struct desc_ptr *dtr) { asm volatile("sgdt %0":"=m" (*dtr)); } static inline void store_idt(struct desc_ptr *dtr) { asm volatile("sidt %0":"=m" (*dtr)); } static inline void native_gdt_invalidate(void) { const struct desc_ptr invalid_gdt = { .address = 0, .size = 0 }; native_load_gdt(&invalid_gdt); } static inline void native_idt_invalidate(void) { const struct desc_ptr invalid_idt = { .address = 0, .size = 0 }; native_load_idt(&invalid_idt); } /* * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is * a read-only remapping. To prevent a page fault, the GDT is switched to the * original writeable version when needed. */ #ifdef CONFIG_X86_64 static inline void native_load_tr_desc(void) { struct desc_ptr gdt; int cpu = raw_smp_processor_id(); bool restore = 0; struct desc_struct *fixmap_gdt; native_store_gdt(&gdt); fixmap_gdt = get_cpu_gdt_ro(cpu); /* * If the current GDT is the read-only fixmap, swap to the original * writeable version. Swap back at the end. */ if (gdt.address == (unsigned long)fixmap_gdt) { load_direct_gdt(cpu); restore = 1; } asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); if (restore) load_fixmap_gdt(cpu); } #else static inline void native_load_tr_desc(void) { asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } #endif static inline unsigned long native_store_tr(void) { unsigned long tr; asm volatile("str %0":"=r" (tr)); return tr; } static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) { struct desc_struct *gdt = get_cpu_gdt_rw(cpu); unsigned int i; for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } DECLARE_PER_CPU(bool, __tss_limit_invalid); static inline void force_reload_TR(void) { struct desc_struct *d = get_current_gdt_rw(); tss_desc tss; memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc)); /* * LTR requires an available TSS, and the TSS is currently * busy. Make it be available so that LTR will work. */ tss.type = DESC_TSS; write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); load_TR_desc(); this_cpu_write(__tss_limit_invalid, false); } /* * Call this if you need the TSS limit to be correct, which should be the case * if and only if you have TIF_IO_BITMAP set or you're switching to a task * with TIF_IO_BITMAP set. */ static inline void refresh_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(this_cpu_read(__tss_limit_invalid))) force_reload_TR(); } /* * If you do something evil that corrupts the cached TSS limit (I'm looking * at you, VMX exits), call this function. * * The optimization here is that the TSS limit only matters for Linux if the * IO bitmap is in use. If the TSS limit gets forced to its minimum value, * everything works except that IO bitmap will be ignored and all CPL 3 IO * instructions will #GP, which is exactly what we want for normal tasks. */ static inline void invalidate_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(test_thread_flag(TIF_IO_BITMAP))) force_reload_TR(); else this_cpu_write(__tss_limit_invalid, true); } /* This intentionally ignores lm, since 32-bit apps don't have that field. */ #define LDT_empty(info) \ ((info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0) /* Lots of programs expect an all-zero user_desc to mean "no segment at all". */ static inline bool LDT_zero(const struct user_desc *info) { return (info->base_addr == 0 && info->limit == 0 && info->contents == 0 && info->read_exec_only == 0 && info->seg_32bit == 0 && info->limit_in_pages == 0 && info->seg_not_present == 0 && info->useable == 0); } static inline void clear_LDT(void) { set_ldt(NULL, 0); } static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); } static inline void set_desc_base(struct desc_struct *desc, unsigned long base) { desc->base0 = base & 0xffff; desc->base1 = (base >> 16) & 0xff; desc->base2 = (base >> 24) & 0xff; } static inline unsigned long get_desc_limit(const struct desc_struct *desc) { return desc->limit0 | (desc->limit1 << 16); } static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) { desc->limit0 = limit & 0xffff; desc->limit1 = (limit >> 16) & 0xf; } void alloc_intr_gate(unsigned int n, const void *addr); static inline void init_idt_data(struct idt_data *data, unsigned int n, const void *addr) { BUG_ON(n > 0xFF); memset(data, 0, sizeof(*data)); data->vector = n; data->addr = addr; data->segment = __KERNEL_CS; data->bits.type = GATE_INTERRUPT; data->bits.p = 1; } static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif } extern unsigned long system_vectors[]; extern void load_current_idt(void); extern void idt_setup_early_handler(void); extern void idt_setup_early_traps(void); extern void idt_setup_traps(void); extern void idt_setup_apic_and_irq_gates(void); extern bool idt_is_f00f_address(unsigned long address); #ifdef CONFIG_X86_64 extern void idt_setup_early_pf(void); #else static inline void idt_setup_early_pf(void) { } #endif extern void idt_invalidate(void); #endif /* _ASM_X86_DESC_H */ |
1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 | // SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * usb_ops_linux.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _HCI_OPS_OS_C_ #include <linux/usb.h> #include "osdep_service.h" #include "drv_types.h" #include "osdep_intf.h" #include "usb_ops.h" #define RTL871X_VENQT_READ 0xc0 #define RTL871X_VENQT_WRITE 0x40 struct zero_bulkout_context { void *pbuf; void *purb; void *pirp; void *padapter; }; uint r8712_usb_init_intf_priv(struct intf_priv *pintfpriv) { pintfpriv->piorw_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!pintfpriv->piorw_urb) return _FAIL; init_completion(&pintfpriv->io_retevt_comp); return _SUCCESS; } void r8712_usb_unload_intf_priv(struct intf_priv *pintfpriv) { if (pintfpriv->piorw_urb) { usb_kill_urb(pintfpriv->piorw_urb); usb_free_urb(pintfpriv->piorw_urb); } } static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr) { unsigned int pipe = 0; struct usb_device *pusbd = pdvobj->pusbdev; if (pdvobj->nr_endpoint == 11) { switch (addr) { case RTL8712_DMA_BKQ: pipe = usb_sndbulkpipe(pusbd, 0x07); break; case RTL8712_DMA_BEQ: pipe = usb_sndbulkpipe(pusbd, 0x06); break; case RTL8712_DMA_VIQ: pipe = usb_sndbulkpipe(pusbd, 0x05); break; case RTL8712_DMA_VOQ: pipe = usb_sndbulkpipe(pusbd, 0x04); break; case RTL8712_DMA_BCNQ: pipe = usb_sndbulkpipe(pusbd, 0x0a); break; case RTL8712_DMA_BMCQ: /* HI Queue */ pipe = usb_sndbulkpipe(pusbd, 0x0b); break; case RTL8712_DMA_MGTQ: pipe = usb_sndbulkpipe(pusbd, 0x0c); break; case RTL8712_DMA_RX0FF: pipe = usb_rcvbulkpipe(pusbd, 0x03); /* in */ break; case RTL8712_DMA_C2HCMD: pipe = usb_rcvbulkpipe(pusbd, 0x09); /* in */ break; case RTL8712_DMA_H2CCMD: pipe = usb_sndbulkpipe(pusbd, 0x0d); break; } } else if (pdvobj->nr_endpoint == 6) { switch (addr) { case RTL8712_DMA_BKQ: pipe = usb_sndbulkpipe(pusbd, 0x07); break; case RTL8712_DMA_BEQ: pipe = usb_sndbulkpipe(pusbd, 0x06); break; case RTL8712_DMA_VIQ: pipe = usb_sndbulkpipe(pusbd, 0x05); break; case RTL8712_DMA_VOQ: pipe = usb_sndbulkpipe(pusbd, 0x04); break; case RTL8712_DMA_RX0FF: case RTL8712_DMA_C2HCMD: pipe = usb_rcvbulkpipe(pusbd, 0x03); /* in */ break; case RTL8712_DMA_H2CCMD: case RTL8712_DMA_BCNQ: case RTL8712_DMA_BMCQ: case RTL8712_DMA_MGTQ: pipe = usb_sndbulkpipe(pusbd, 0x0d); break; } } else if (pdvobj->nr_endpoint == 4) { switch (addr) { case RTL8712_DMA_BEQ: pipe = usb_sndbulkpipe(pusbd, 0x06); break; case RTL8712_DMA_VOQ: pipe = usb_sndbulkpipe(pusbd, 0x04); break; case RTL8712_DMA_RX0FF: case RTL8712_DMA_C2HCMD: pipe = usb_rcvbulkpipe(pusbd, 0x03); /* in */ break; case RTL8712_DMA_H2CCMD: case RTL8712_DMA_BCNQ: case RTL8712_DMA_BMCQ: case RTL8712_DMA_MGTQ: pipe = usb_sndbulkpipe(pusbd, 0x0d); break; } } else { pipe = 0; } return pipe; } static void usb_write_mem_complete(struct urb *purb) { struct io_queue *pio_q = (struct io_queue *)purb->context; struct intf_hdl *pintf = &(pio_q->intf); struct intf_priv *pintfpriv = pintf->pintfpriv; struct _adapter *padapter = (struct _adapter *)pintf->adapter; if (purb->status != 0) { if (purb->status == (-ESHUTDOWN)) padapter->driver_stopped = true; else padapter->surprise_removed = true; } complete(&pintfpriv->io_retevt_comp); } void r8712_usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem) { unsigned int pipe; struct _adapter *padapter = (struct _adapter *)pintfhdl->adapter; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; struct io_queue *pio_queue = padapter->pio_queue; struct dvobj_priv *pdvobj = (struct dvobj_priv *)pintfpriv->intf_dev; struct usb_device *pusbd = pdvobj->pusbdev; struct urb *piorw_urb = pintfpriv->piorw_urb; if ((padapter->driver_stopped) || (padapter->surprise_removed) || (padapter->pwrctrlpriv.pnp_bstop_trx)) return; /* translate DMA FIFO addr to pipehandle */ pipe = ffaddr2pipehdl(pdvobj, addr); if (pipe == 0) return; usb_fill_bulk_urb(piorw_urb, pusbd, pipe, wmem, cnt, usb_write_mem_complete, pio_queue); usb_submit_urb(piorw_urb, GFP_ATOMIC); wait_for_completion_interruptible(&pintfpriv->io_retevt_comp); } static void r8712_usb_read_port_complete(struct urb *purb) { uint isevt; __le32 *pbuf; struct recv_buf *precvbuf = (struct recv_buf *)purb->context; struct _adapter *padapter = (struct _adapter *)precvbuf->adapter; struct recv_priv *precvpriv = &padapter->recvpriv; if (padapter->surprise_removed || padapter->driver_stopped) return; if (purb->status == 0) { /* SUCCESS */ if ((purb->actual_length > (MAX_RECVBUF_SZ)) || (purb->actual_length < RXDESC_SIZE)) { r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } else { _pkt *pskb = precvbuf->pskb; precvbuf->transfer_len = purb->actual_length; pbuf = (__le32 *)precvbuf->pbuf; isevt = le32_to_cpu(*(pbuf + 1)) & 0x1ff; if ((isevt & 0x1ff) == 0x1ff) { r8712_rxcmd_event_hdl(padapter, pbuf); skb_queue_tail(&precvpriv->rx_skb_queue, pskb); r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } else { skb_put(pskb, purb->actual_length); skb_queue_tail(&precvpriv->rx_skb_queue, pskb); tasklet_hi_schedule(&precvpriv->recv_tasklet); r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); } } } else { switch (purb->status) { case -EINVAL: case -EPIPE: case -ENODEV: case -ESHUTDOWN: padapter->driver_stopped = true; break; case -ENOENT: if (!padapter->suspended) { padapter->driver_stopped = true; break; } fallthrough; case -EPROTO: r8712_read_port(padapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf); break; case -EINPROGRESS: netdev_err(padapter->pnetdev, "ERROR: URB IS IN PROGRESS!\n"); break; default: break; } } } u32 r8712_usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem) { unsigned int pipe; int err; u32 tmpaddr = 0; int alignment = 0; u32 ret = _SUCCESS; struct urb *purb = NULL; struct recv_buf *precvbuf = (struct recv_buf *)rmem; struct intf_priv *pintfpriv = pintfhdl->pintfpriv; struct dvobj_priv *pdvobj = (struct dvobj_priv *)pintfpriv->intf_dev; struct _adapter *adapter = pdvobj->padapter; struct recv_priv *precvpriv = &adapter->recvpriv; struct usb_device *pusbd = pdvobj->pusbdev; if (adapter->driver_stopped || adapter->surprise_removed || adapter->pwrctrlpriv.pnp_bstop_trx || !precvbuf) return _FAIL; r8712_init_recvbuf(adapter, precvbuf); /* Try to use skb from the free queue */ precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue); if (!precvbuf->pskb) { precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ); if (!precvbuf->pskb) return _FAIL; tmpaddr = (addr_t)precvbuf->pskb->data; alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1); skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment)); precvbuf->phead = precvbuf->pskb->head; precvbuf->pdata = precvbuf->pskb->data; precvbuf->ptail = skb_tail_pointer(precvbuf->pskb); precvbuf->pend = skb_end_pointer(precvbuf->pskb); precvbuf->pbuf = precvbuf->pskb->data; } else { /* skb is reused */ precvbuf->phead = precvbuf->pskb->head; precvbuf->pdata = precvbuf->pskb->data; precvbuf->ptail = skb_tail_pointer(precvbuf->pskb); precvbuf->pend = skb_end_pointer(precvbuf->pskb); precvbuf->pbuf = precvbuf->pskb->data; } purb = precvbuf->purb; /* translate DMA FIFO addr to pipehandle */ pipe = ffaddr2pipehdl(pdvobj, addr); usb_fill_bulk_urb(purb, pusbd, pipe, precvbuf->pbuf, MAX_RECVBUF_SZ, r8712_usb_read_port_complete, precvbuf); err = usb_submit_urb(purb, GFP_ATOMIC); if ((err) && (err != (-EPERM))) ret = _FAIL; return ret; } void r8712_usb_read_port_cancel(struct _adapter *padapter) { int i; struct recv_buf *precvbuf; precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf; for (i = 0; i < NR_RECVBUFF; i++) { if (precvbuf->purb) usb_kill_urb(precvbuf->purb); precvbuf++; } } void r8712_xmit_bh(struct tasklet_struct *t) { int ret = false; struct _adapter *padapter = from_tasklet(padapter, t, xmitpriv.xmit_tasklet); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; if (padapter->driver_stopped || padapter->surprise_removed) { netdev_err(padapter->pnetdev, "xmit_bh => driver_stopped or surprise_removed\n"); return; } ret = r8712_xmitframe_complete(padapter, pxmitpriv, NULL); if (!ret) return; tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } static void usb_write_port_complete(struct urb *purb) { int i; struct xmit_frame *pxmitframe = (struct xmit_frame *)purb->context; struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf; struct _adapter *padapter = pxmitframe->padapter; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct pkt_attrib *pattrib = &pxmitframe->attrib; switch (pattrib->priority) { case 1: case 2: pxmitpriv->bkq_cnt--; break; case 4: case 5: pxmitpriv->viq_cnt--; break; case 6: case 7: pxmitpriv->voq_cnt--; break; case 0: case 3: default: pxmitpriv->beq_cnt--; break; } pxmitpriv->txirp_cnt--; for (i = 0; i < 8; i++) { if (purb == pxmitframe->pxmit_urb[i]) { pxmitframe->bpending[i] = false; break; } } if (padapter->surprise_removed) return; switch (purb->status) { case 0: break; default: netdev_warn(padapter->pnetdev, "r8712u: pipe error: (%d)\n", purb->status); break; } /* not to consider tx fragment */ r8712_free_xmitframe_ex(pxmitpriv, pxmitframe); r8712_free_xmitbuf(pxmitpriv, pxmitbuf); tasklet_hi_schedule(&pxmitpriv->xmit_tasklet); } u32 r8712_usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem) { unsigned long irqL; int i, status; unsigned int pipe; u32 ret, bwritezero; struct urb *purb = NULL; struct _adapter *padapter = (struct _adapter *)pintfhdl->adapter; struct dvobj_priv *pdvobj = &padapter->dvobjpriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct xmit_frame *pxmitframe = (struct xmit_frame *)wmem; struct usb_device *pusbd = pdvobj->pusbdev; struct pkt_attrib *pattrib = &pxmitframe->attrib; if ((padapter->driver_stopped) || (padapter->surprise_removed) || (padapter->pwrctrlpriv.pnp_bstop_trx)) return _FAIL; for (i = 0; i < 8; i++) { if (!pxmitframe->bpending[i]) { spin_lock_irqsave(&pxmitpriv->lock, irqL); pxmitpriv->txirp_cnt++; pxmitframe->bpending[i] = true; switch (pattrib->priority) { case 1: case 2: pxmitpriv->bkq_cnt++; break; case 4: case 5: pxmitpriv->viq_cnt++; break; case 6: case 7: pxmitpriv->voq_cnt++; break; case 0: case 3: default: pxmitpriv->beq_cnt++; break; } spin_unlock_irqrestore(&pxmitpriv->lock, irqL); pxmitframe->sz[i] = (u16)cnt; purb = pxmitframe->pxmit_urb[i]; break; } } bwritezero = false; if (pdvobj->ishighspeed) { if (cnt > 0 && cnt % 512 == 0) bwritezero = true; } else { if (cnt > 0 && cnt % 64 == 0) bwritezero = true; } /* translate DMA FIFO addr to pipehandle */ pipe = ffaddr2pipehdl(pdvobj, addr); if (pxmitpriv->free_xmitbuf_cnt % NR_XMITBUFF == 0) purb->transfer_flags &= (~URB_NO_INTERRUPT); else purb->transfer_flags |= URB_NO_INTERRUPT; if (bwritezero) cnt += 8; usb_fill_bulk_urb(purb, pusbd, pipe, pxmitframe->mem_addr, cnt, usb_write_port_complete, pxmitframe); /* context is xmit_frame */ status = usb_submit_urb(purb, GFP_ATOMIC); if (!status) ret = _SUCCESS; else ret = _FAIL; return ret; } void r8712_usb_write_port_cancel(struct _adapter *padapter) { int i, j; struct xmit_buf *pxmitbuf = (struct xmit_buf *) padapter->xmitpriv.pxmitbuf; for (i = 0; i < NR_XMITBUFF; i++) { for (j = 0; j < 8; j++) { if (pxmitbuf->pxmit_urb[j]) usb_kill_urb(pxmitbuf->pxmit_urb[j]); } pxmitbuf++; } } int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype) { unsigned int pipe; int status; u8 reqtype; struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *) pintfpriv->intf_dev; struct usb_device *udev = pdvobjpriv->pusbdev; /* For mstar platform, mstar suggests the address for USB IO * should be 16 bytes alignment. Trying to fix it here. */ u8 *palloc_buf, *pIo_buf; palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC); if (!palloc_buf) return -ENOMEM; pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f); if (requesttype == 0x01) { pipe = usb_rcvctrlpipe(udev, 0); /* read_in */ reqtype = RTL871X_VENQT_READ; } else { pipe = usb_sndctrlpipe(udev, 0); /* write_out */ reqtype = RTL871X_VENQT_WRITE; memcpy(pIo_buf, pdata, len); } status = usb_control_msg(udev, pipe, request, reqtype, value, index, pIo_buf, len, 500); if (status > 0) { /* Success this control transfer. */ if (requesttype == 0x01) { /* For Control read transfer, we have to copy the read * data from pIo_buf to pdata. */ memcpy(pdata, pIo_buf, status); } } kfree(palloc_buf); return status; } |
14 1 1 4 8 8 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 | // SPDX-License-Identifier: GPL-2.0-only /* * net/sched/sch_choke.c CHOKE scheduler * * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com> * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/inet_ecn.h> #include <net/red.h> #include <net/flow_dissector.h> /* CHOKe stateless AQM for fair bandwidth allocation ================================================= CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for unresponsive flows) is a variant of RED that penalizes misbehaving flows but maintains no flow state. The difference from RED is an additional step during the enqueuing process. If average queue size is over the low threshold (qmin), a packet is chosen at random from the queue. If both the new and chosen packet are from the same flow, both are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it needs to access packets in queue randomly. It has a minimal class interface to allow overriding the builtin flow classifier with filters. Source: R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless Active Queue Management Scheme for Approximating Fair Bandwidth Allocation", IEEE INFOCOM, 2000. A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial Characteristics", IEEE/ACM Transactions on Networking, 2004 */ /* Upper bound on size of sk_buff table (packets) */ #define CHOKE_MAX_QUEUE (128*1024 - 1) struct choke_sched_data { /* Parameters */ u32 limit; unsigned char flags; struct red_parms parms; /* Variables */ struct red_vars vars; struct { u32 prob_drop; /* Early probability drops */ u32 prob_mark; /* Early probability marks */ u32 forced_drop; /* Forced drops, qavg > max_thresh */ u32 forced_mark; /* Forced marks, qavg > max_thresh */ u32 pdrop; /* Drops due to queue limits */ u32 other; /* Drops due to drop() calls */ u32 matched; /* Drops to flow match */ } stats; unsigned int head; unsigned int tail; unsigned int tab_mask; /* size - 1 */ struct sk_buff **tab; }; /* number of elements in queue including holes */ static unsigned int choke_len(const struct choke_sched_data *q) { return (q->tail - q->head) & q->tab_mask; } /* Is ECN parameter configured */ static int use_ecn(const struct choke_sched_data *q) { return q->flags & TC_RED_ECN; } /* Should packets over max just be dropped (versus marked) */ static int use_harddrop(const struct choke_sched_data *q) { return q->flags & TC_RED_HARDDROP; } /* Move head pointer forward to skip over holes */ static void choke_zap_head_holes(struct choke_sched_data *q) { do { q->head = (q->head + 1) & q->tab_mask; if (q->head == q->tail) break; } while (q->tab[q->head] == NULL); } /* Move tail pointer backwards to reuse holes */ static void choke_zap_tail_holes(struct choke_sched_data *q) { do { q->tail = (q->tail - 1) & q->tab_mask; if (q->head == q->tail) break; } while (q->tab[q->tail] == NULL); } /* Drop packet from queue array by creating a "hole" */ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); struct sk_buff *skb = q->tab[idx]; q->tab[idx] = NULL; if (idx == q->head) choke_zap_head_holes(q); if (idx == q->tail) choke_zap_tail_holes(q); --sch->q.qlen; qdisc_qstats_backlog_dec(sch, skb); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_drop(skb, sch, to_free); } struct choke_skb_cb { u8 keys_valid; struct flow_keys_digest keys; }; static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb)); return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; } /* * Compare flow of two packets * Returns true only if source and destination address and port match. * false for special cases */ static bool choke_match_flow(struct sk_buff *skb1, struct sk_buff *skb2) { struct flow_keys temp; if (skb1->protocol != skb2->protocol) return false; if (!choke_skb_cb(skb1)->keys_valid) { choke_skb_cb(skb1)->keys_valid = 1; skb_flow_dissect_flow_keys(skb1, &temp, 0); make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp); } if (!choke_skb_cb(skb2)->keys_valid) { choke_skb_cb(skb2)->keys_valid = 1; skb_flow_dissect_flow_keys(skb2, &temp, 0); make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp); } return !memcmp(&choke_skb_cb(skb1)->keys, &choke_skb_cb(skb2)->keys, sizeof(choke_skb_cb(skb1)->keys)); } /* * Select a packet at random from queue * HACK: since queue can have holes from previous deletion; retry several * times to find a random skb but then just give up and return the head * Will return NULL if queue is empty (q->head == q->tail) */ static struct sk_buff *choke_peek_random(const struct choke_sched_data *q, unsigned int *pidx) { struct sk_buff *skb; int retrys = 3; do { *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; skb = q->tab[*pidx]; if (skb) return skb; } while (--retrys > 0); return q->tab[*pidx = q->head]; } /* * Compare new packet with random packet in queue * returns true if matched and sets *pidx */ static bool choke_match_random(const struct choke_sched_data *q, struct sk_buff *nskb, unsigned int *pidx) { struct sk_buff *oskb; if (q->head == q->tail) return false; oskb = choke_peek_random(q, pidx); return choke_match_flow(oskb, nskb); } static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); /* Is queue small? */ if (q->vars.qavg <= p->qth_min) q->vars.qcount = -1; else { unsigned int idx; /* Draw a packet at random from queue and compare flow */ if (choke_match_random(q, skb, &idx)) { q->stats.matched++; choke_drop_by_idx(sch, idx, to_free); goto congestion_drop; } /* Queue is large, always mark/drop */ if (q->vars.qavg > p->qth_max) { q->vars.qcount = -1; qdisc_qstats_overlimit(sch); if (use_harddrop(q) || !use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; } else if (++q->vars.qcount) { if (red_mark_probability(p, &q->vars, q->vars.qavg)) { q->vars.qcount = 0; q->vars.qR = red_random(p); qdisc_qstats_overlimit(sch); if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; } } else q->vars.qR = red_random(p); } /* Admit new packet */ if (sch->q.qlen < q->limit) { q->tab[q->tail] = skb; q->tail = (q->tail + 1) & q->tab_mask; ++sch->q.qlen; qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } q->stats.pdrop++; return qdisc_drop(skb, sch, to_free); congestion_drop: qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } static struct sk_buff *choke_dequeue(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (q->head == q->tail) { if (!red_is_idling(&q->vars)) red_start_of_idle_period(&q->vars); return NULL; } skb = q->tab[q->head]; q->tab[q->head] = NULL; choke_zap_head_holes(q); --sch->q.qlen; qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); return skb; } static void choke_reset(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); while (q->head != q->tail) { struct sk_buff *skb = q->tab[q->head]; q->head = (q->head + 1) & q->tab_mask; if (!skb) continue; rtnl_qdisc_drop(skb, sch); } if (q->tab) memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q->head = q->tail = 0; red_restart(&q->vars); } static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = { [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) }, [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE }, [TCA_CHOKE_MAX_P] = { .type = NLA_U32 }, }; static void choke_free(void *addr) { kvfree(addr); } static int choke_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_CHOKE_MAX + 1]; const struct tc_red_qopt *ctl; int err; struct sk_buff **old = NULL; unsigned int mask; u32 max_P; u8 *stab; if (opt == NULL) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt, choke_policy, NULL); if (err < 0) return err; if (tb[TCA_CHOKE_PARMS] == NULL || tb[TCA_CHOKE_STAB] == NULL) return -EINVAL; max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0; ctl = nla_data(tb[TCA_CHOKE_PARMS]); stab = nla_data(tb[TCA_CHOKE_STAB]); if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) return -EINVAL; if (ctl->limit > CHOKE_MAX_QUEUE) return -EINVAL; mask = roundup_pow_of_two(ctl->limit + 1) - 1; if (mask != q->tab_mask) { struct sk_buff **ntab; ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL); if (!ntab) return -ENOMEM; sch_tree_lock(sch); old = q->tab; if (old) { unsigned int oqlen = sch->q.qlen, tail = 0; unsigned dropped = 0; while (q->head != q->tail) { struct sk_buff *skb = q->tab[q->head]; q->head = (q->head + 1) & q->tab_mask; if (!skb) continue; if (tail < mask) { ntab[tail++] = skb; continue; } dropped += qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb); --sch->q.qlen; rtnl_qdisc_drop(skb, sch); } qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); q->head = 0; q->tail = tail; } q->tab_mask = mask; q->tab = ntab; } else sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); if (q->head == q->tail) red_end_of_idle_period(&q->vars); sch_tree_unlock(sch); choke_free(old); return 0; } static int choke_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { return choke_change(sch, opt, extack); } static int choke_dump(struct Qdisc *sch, struct sk_buff *skb) { struct choke_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct choke_sched_data *q = qdisc_priv(sch); struct tc_choke_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .marked = q->stats.prob_mark + q->stats.forced_mark, .pdrop = q->stats.pdrop, .other = q->stats.other, .matched = q->stats.matched, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static void choke_destroy(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); choke_free(q->tab); } static struct sk_buff *choke_peek_head(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); return (q->head != q->tail) ? q->tab[q->head] : NULL; } static struct Qdisc_ops choke_qdisc_ops __read_mostly = { .id = "choke", .priv_size = sizeof(struct choke_sched_data), .enqueue = choke_enqueue, .dequeue = choke_dequeue, .peek = choke_peek_head, .init = choke_init, .destroy = choke_destroy, .reset = choke_reset, .change = choke_change, .dump = choke_dump, .dump_stats = choke_dump_stats, .owner = THIS_MODULE, }; static int __init choke_module_init(void) { return register_qdisc(&choke_qdisc_ops); } static void __exit choke_module_exit(void) { unregister_qdisc(&choke_qdisc_ops); } module_init(choke_module_init) module_exit(choke_module_exit) MODULE_LICENSE("GPL"); |
3 3 3 2 3 2 1 2 1 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 1 1 1 3 3 3 3 3 3 4 4 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 | // SPDX-License-Identifier: GPL-2.0 /* * platform.c - platform 'pseudo' bus for legacy devices * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * * Please see Documentation/driver-api/driver-model/platform.rst for more * information. */ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/memblock.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/idr.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/limits.h> #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> #include "base.h" #include "power/power.h" /* For automatically allocated device IDs */ static DEFINE_IDA(platform_devid_ida); struct device platform_bus = { .init_name = "platform", }; EXPORT_SYMBOL_GPL(platform_bus); /** * platform_get_resource - get a resource for a device * @dev: platform device * @type: resource type * @num: resource index * * Return: a pointer to the resource or NULL on failure. */ struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (type == resource_type(r) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource); struct resource *platform_get_mem_or_io(struct platform_device *dev, unsigned int num) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_mem_or_io); #ifdef CONFIG_HAS_IOMEM /** * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a * platform device and get resource * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @index: resource index * @res: optional output parameter to store a pointer to the obtained resource. * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem * devm_platform_get_and_ioremap_resource(struct platform_device *pdev, unsigned int index, struct resource **res) { struct resource *r; r = platform_get_resource(pdev, IORESOURCE_MEM, index); if (res) *res = r; return devm_ioremap_resource(&pdev->dev, r); } EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); /** * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform * device * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @index: resource index * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { return devm_platform_get_and_ioremap_resource(pdev, index, NULL); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); /** * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for * a platform device, retrieve the * resource by name * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @name: name of the resource * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name) { struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); return devm_ioremap_resource(&pdev->dev, res); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); #endif /* CONFIG_HAS_IOMEM */ /** * platform_get_irq_optional - get an optional IRQ for a device * @dev: platform device * @num: IRQ number index * * Gets an IRQ for a platform device. Device drivers should check the return * value for errors so as to not pass a negative integer value to the * request_irq() APIs. This is the same as platform_get_irq(), except that it * does not print an error message if an IRQ can not be obtained. * * For example:: * * int irq = platform_get_irq_optional(pdev, 0); * if (irq < 0) * return irq; * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_optional(struct platform_device *dev, unsigned int num) { int ret; #ifdef CONFIG_SPARC /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ if (!dev || num >= dev->archdata.num_irqs) goto out_not_found; ret = dev->archdata.irqs[num]; goto out; #else struct resource *r; if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { ret = of_irq_get(dev->dev.of_node, num); if (ret > 0 || ret == -EPROBE_DEFER) goto out; } r = platform_get_resource(dev, IORESOURCE_IRQ, num); if (has_acpi_companion(&dev->dev)) { if (r && r->flags & IORESOURCE_DISABLED) { ret = acpi_irq_get(ACPI_HANDLE(&dev->dev), num, r); if (ret) goto out; } } /* * The resources may pass trigger flags to the irqs that need * to be set up. It so happens that the trigger flags for * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* * settings. */ if (r && r->flags & IORESOURCE_BITS) { struct irq_data *irqd; irqd = irq_get_irq_data(r->start); if (!irqd) goto out_not_found; irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } if (r) { ret = r->start; goto out; } /* * For the index 0 interrupt, allow falling back to GpioInt * resources. While a device could have both Interrupt and GpioInt * resources, making this fallback ambiguous, in many common cases * the device will only expose one IRQ, and this fallback * allows a common code path across either kind of resource. */ if (num == 0 && has_acpi_companion(&dev->dev)) { ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num); /* Our callers expect -ENXIO for missing IRQs. */ if (ret >= 0 || ret == -EPROBE_DEFER) goto out; } #endif out_not_found: ret = -ENXIO; out: WARN(ret == 0, "0 is an invalid IRQ number\n"); return ret; } EXPORT_SYMBOL_GPL(platform_get_irq_optional); /** * platform_get_irq - get an IRQ for a device * @dev: platform device * @num: IRQ number index * * Gets an IRQ for a platform device and prints an error message if finding the * IRQ fails. Device drivers should check the return value for errors so as to * not pass a negative integer value to the request_irq() APIs. * * For example:: * * int irq = platform_get_irq(pdev, 0); * if (irq < 0) * return irq; * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq(struct platform_device *dev, unsigned int num) { int ret; ret = platform_get_irq_optional(dev, num); if (ret < 0 && ret != -EPROBE_DEFER) dev_err(&dev->dev, "IRQ index %u not found\n", num); return ret; } EXPORT_SYMBOL_GPL(platform_get_irq); /** * platform_irq_count - Count the number of IRQs a platform device uses * @dev: platform device * * Return: Number of IRQs a platform device uses or EPROBE_DEFER */ int platform_irq_count(struct platform_device *dev) { int ret, nr = 0; while ((ret = platform_get_irq_optional(dev, nr)) >= 0) nr++; if (ret == -EPROBE_DEFER) return ret; return nr; } EXPORT_SYMBOL_GPL(platform_irq_count); struct irq_affinity_devres { unsigned int count; unsigned int irq[]; }; static void platform_disable_acpi_irq(struct platform_device *pdev, int index) { struct resource *r; r = platform_get_resource(pdev, IORESOURCE_IRQ, index); if (r) irqresource_disabled(r, 0); } static void devm_platform_get_irqs_affinity_release(struct device *dev, void *res) { struct irq_affinity_devres *ptr = res; int i; for (i = 0; i < ptr->count; i++) { irq_dispose_mapping(ptr->irq[i]); if (has_acpi_companion(dev)) platform_disable_acpi_irq(to_platform_device(dev), i); } } /** * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a * device using an interrupt affinity descriptor * @dev: platform device pointer * @affd: affinity descriptor * @minvec: minimum count of interrupt vectors * @maxvec: maximum count of interrupt vectors * @irqs: pointer holder for IRQ numbers * * Gets a set of IRQs for a platform device, and updates IRQ afffinty according * to the passed affinity descriptor * * Return: Number of vectors on success, negative error number on failure. */ int devm_platform_get_irqs_affinity(struct platform_device *dev, struct irq_affinity *affd, unsigned int minvec, unsigned int maxvec, int **irqs) { struct irq_affinity_devres *ptr; struct irq_affinity_desc *desc; size_t size; int i, ret, nvec; if (!affd) return -EPERM; if (maxvec < minvec) return -ERANGE; nvec = platform_irq_count(dev); if (nvec < 0) return nvec; if (nvec < minvec) return -ENOSPC; nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; if (nvec > maxvec) nvec = maxvec; size = sizeof(*ptr) + sizeof(unsigned int) * nvec; ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, GFP_KERNEL); if (!ptr) return -ENOMEM; ptr->count = nvec; for (i = 0; i < nvec; i++) { int irq = platform_get_irq(dev, i); if (irq < 0) { ret = irq; goto err_free_devres; } ptr->irq[i] = irq; } desc = irq_create_affinity_masks(nvec, affd); if (!desc) { ret = -ENOMEM; goto err_free_devres; } for (i = 0; i < nvec; i++) { ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); if (ret) { dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", ptr->irq[i], ret); goto err_free_desc; } } devres_add(&dev->dev, ptr); kfree(desc); *irqs = ptr->irq; return nvec; err_free_desc: kfree(desc); err_free_devres: devres_free(ptr); return ret; } EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); /** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type * @name: resource name */ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (unlikely(!r->name)) continue; if (type == resource_type(r) && !strcmp(r->name, name)) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource_byname); static int __platform_get_irq_byname(struct platform_device *dev, const char *name) { struct resource *r; int ret; if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { ret = of_irq_get_byname(dev->dev.of_node, name); if (ret > 0 || ret == -EPROBE_DEFER) return ret; } r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); if (r) { WARN(r->start == 0, "0 is an invalid IRQ number\n"); return r->start; } return -ENXIO; } /** * platform_get_irq_byname - get an IRQ for a device by name * @dev: platform device * @name: IRQ name * * Get an IRQ like platform_get_irq(), but then by name rather then by index. * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { int ret; ret = __platform_get_irq_byname(dev, name); if (ret < 0 && ret != -EPROBE_DEFER) dev_err(&dev->dev, "IRQ %s not found\n", name); return ret; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); /** * platform_get_irq_byname_optional - get an optional IRQ for a device by name * @dev: platform device * @name: IRQ name * * Get an optional IRQ by name like platform_get_irq_byname(). Except that it * does not print an error message if an IRQ can not be obtained. * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_byname_optional(struct platform_device *dev, const char *name) { return __platform_get_irq_byname(dev, name); } EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); /** * platform_add_devices - add a numbers of platform devices * @devs: array of platform devices to add * @num: number of platform devices in array */ int platform_add_devices(struct platform_device **devs, int num) { int i, ret = 0; for (i = 0; i < num; i++) { ret = platform_device_register(devs[i]); if (ret) { while (--i >= 0) platform_device_unregister(devs[i]); break; } } return ret; } EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; char name[]; }; /* * Set up default DMA mask for platform devices if the they weren't * previously set by the architecture / DT. */ static void setup_pdev_dma_masks(struct platform_device *pdev) { pdev->dev.dma_parms = &pdev->dma_parms; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (!pdev->dev.dma_mask) { pdev->platform_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->platform_dma_mask; } }; /** * platform_device_put - destroy a platform device * @pdev: platform device to free * * Free all memory associated with a platform device. This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_put(struct platform_device *pdev) { if (!IS_ERR_OR_NULL(pdev)) put_device(&pdev->dev); } EXPORT_SYMBOL_GPL(platform_device_put); static void platform_device_release(struct device *dev) { struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev); of_node_put(pa->pdev.dev.of_node); kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); kfree(pa->pdev.driver_override); kfree(pa); } /** * platform_device_alloc - create a platform device * @name: base name of the device we're adding * @id: instance id * * Create a platform device object which can have other objects attached * to it, and which will have attached objects freed when it is released. */ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; pa->pdev.id = id; device_initialize(&pa->pdev.dev); pa->pdev.dev.release = platform_device_release; setup_pdev_dma_masks(&pa->pdev); } return pa ? &pa->pdev : NULL; } EXPORT_SYMBOL_GPL(platform_device_alloc); /** * platform_device_add_resources - add resources to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @res: set of resources that needs to be allocated for the device * @num: number of resources * * Add a copy of the resources to the platform device. The memory * associated with the resources will be freed when the platform device is * released. */ int platform_device_add_resources(struct platform_device *pdev, const struct resource *res, unsigned int num) { struct resource *r = NULL; if (res) { r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); if (!r) return -ENOMEM; } kfree(pdev->resource); pdev->resource = r; pdev->num_resources = num; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_resources); /** * platform_device_add_data - add platform-specific data to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @data: platform specific data for this platform device * @size: size of platform specific data * * Add a copy of platform specific data to the platform device's * platform_data pointer. The memory associated with the platform data * will be freed when the platform device is released. */ int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size) { void *d = NULL; if (data) { d = kmemdup(data, size, GFP_KERNEL); if (!d) return -ENOMEM; } kfree(pdev->dev.platform_data); pdev->dev.platform_data = d; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_data); /** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * * This is part 2 of platform_device_register(), though may be called * separately _iff_ pdev was allocated by platform_device_alloc(). */ int platform_device_add(struct platform_device *pdev) { u32 i; int ret; if (!pdev) return -EINVAL; if (!pdev->dev.parent) pdev->dev.parent = &platform_bus; pdev->dev.bus = &platform_bus_type; switch (pdev->id) { default: dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id); break; case PLATFORM_DEVID_NONE: dev_set_name(&pdev->dev, "%s", pdev->name); break; case PLATFORM_DEVID_AUTO: /* * Automatically allocated device ID. We mark it as such so * that we remember it must be freed, and we append a suffix * to avoid namespace collision with explicit IDs. */ ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); if (ret < 0) goto err_out; pdev->id = ret; pdev->id_auto = true; dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id); break; } for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(&pdev->dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p) { ret = insert_resource(p, r); if (ret) { dev_err(&pdev->dev, "failed to claim resource %d: %pR\n", i, r); goto failed; } } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(&pdev->dev), dev_name(pdev->dev.parent)); ret = device_add(&pdev->dev); if (ret == 0) return ret; failed: if (pdev->id_auto) { ida_free(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } while (i--) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } err_out: return ret; } EXPORT_SYMBOL_GPL(platform_device_add); /** * platform_device_del - remove a platform-level device * @pdev: platform device we're removing * * Note that this function will also release all memory- and port-based * resources owned by the device (@dev->resource). This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_del(struct platform_device *pdev) { u32 i; if (!IS_ERR_OR_NULL(pdev)) { device_del(&pdev->dev); if (pdev->id_auto) { ida_free(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } } } EXPORT_SYMBOL_GPL(platform_device_del); /** * platform_device_register - add a platform-level device * @pdev: platform device we're adding */ int platform_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); setup_pdev_dma_masks(pdev); return platform_device_add(pdev); } EXPORT_SYMBOL_GPL(platform_device_register); /** * platform_device_unregister - unregister a platform-level device * @pdev: platform device we're unregistering * * Unregistration is done in 2 steps. First we release all resources * and remove it from the subsystem, then we drop reference count by * calling platform_device_put(). */ void platform_device_unregister(struct platform_device *pdev) { platform_device_del(pdev); platform_device_put(pdev); } EXPORT_SYMBOL_GPL(platform_device_unregister); /** * platform_device_register_full - add a platform-level device with * resources and platform-specific data * * @pdevinfo: data used to create device * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo) { int ret; struct platform_device *pdev; pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { pdev->platform_dma_mask = pdevinfo->dma_mask; pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res); if (ret) goto err; ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data); if (ret) goto err; if (pdevinfo->properties) { ret = device_create_managed_software_node(&pdev->dev, pdevinfo->properties, NULL); if (ret) goto err; } ret = platform_device_add(pdev); if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); platform_device_put(pdev); return ERR_PTR(ret); } return pdev; } EXPORT_SYMBOL_GPL(platform_device_register_full); /** * __platform_driver_register - register a driver for platform-level devices * @drv: platform driver structure * @owner: owning module/driver */ int __platform_driver_register(struct platform_driver *drv, struct module *owner) { drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__platform_driver_register); /** * platform_driver_unregister - unregister a driver for platform-level devices * @drv: platform driver structure */ void platform_driver_unregister(struct platform_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_unregister); static int platform_probe_fail(struct platform_device *pdev) { return -ENXIO; } /** * __platform_driver_probe - register driver for non-hotpluggable device * @drv: platform driver structure * @probe: the driver probe routine, probably from an __init section * @module: module which will be the owner of the driver * * Use this instead of platform_driver_register() when you know the device * is not hotpluggable and has already been registered, and you want to * remove its run-once probe() infrastructure from memory after the driver * has bound to the device. * * One typical use for this would be with drivers for controllers integrated * into system-on-chip processors, where the controller devices have been * configured as part of board setup. * * Note that this is incompatible with deferred probing. * * Returns zero if the driver registered and bound to a device, else returns * a negative error code and with the driver not registered. */ int __init_or_module __platform_driver_probe(struct platform_driver *drv, int (*probe)(struct platform_device *), struct module *module) { int retval, code; if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { pr_err("%s: drivers registered with %s can not be probed asynchronously\n", drv->driver.name, __func__); return -EINVAL; } /* * We have to run our probes synchronously because we check if * we find any devices to bind to and exit with error if there * are any. */ drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; /* * Prevent driver from requesting probe deferral to avoid further * futile probe attempts. */ drv->prevent_deferred_probe = true; /* make sure driver won't have bind/unbind attributes */ drv->driver.suppress_bind_attrs = true; /* temporary section violation during probe() */ drv->probe = probe; retval = code = __platform_driver_register(drv, module); if (retval) return retval; /* * Fixup that section violation, being paranoid about code scanning * the list of drivers in order to probe new devices. Check to see * if the probe was successful, and make sure any forced probes of * new devices fail. */ spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); drv->probe = platform_probe_fail; if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) retval = -ENODEV; spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); if (code != retval) platform_driver_unregister(drv); return retval; } EXPORT_SYMBOL_GPL(__platform_driver_probe); /** * __platform_create_bundle - register driver and create corresponding device * @driver: platform driver structure * @probe: the driver probe routine, probably from an __init section * @res: set of resources that needs to be allocated for the device * @n_res: number of resources * @data: platform specific data for this platform device * @size: size of platform specific data * @module: module which will be the owner of the driver * * Use this in legacy-style modules that probe hardware directly and * register a single platform device and corresponding platform driver. * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device * __init_or_module __platform_create_bundle( struct platform_driver *driver, int (*probe)(struct platform_device *), struct resource *res, unsigned int n_res, const void *data, size_t size, struct module *module) { struct platform_device *pdev; int error; pdev = platform_device_alloc(driver->driver.name, -1); if (!pdev) { error = -ENOMEM; goto err_out; } error = platform_device_add_resources(pdev, res, n_res); if (error) goto err_pdev_put; error = platform_device_add_data(pdev, data, size); if (error) goto err_pdev_put; error = platform_device_add(pdev); if (error) goto err_pdev_put; error = __platform_driver_probe(driver, probe, module); if (error) goto err_pdev_del; return pdev; err_pdev_del: platform_device_del(pdev); err_pdev_put: platform_device_put(pdev); err_out: return ERR_PTR(error); } EXPORT_SYMBOL_GPL(__platform_create_bundle); /** * __platform_register_drivers - register an array of platform drivers * @drivers: an array of drivers to register * @count: the number of drivers to register * @owner: module owning the drivers * * Registers platform drivers specified by an array. On failure to register a * driver, all previously registered drivers will be unregistered. Callers of * this API should use platform_unregister_drivers() to unregister drivers in * the reverse order. * * Returns: 0 on success or a negative error code on failure. */ int __platform_register_drivers(struct platform_driver * const *drivers, unsigned int count, struct module *owner) { unsigned int i; int err; for (i = 0; i < count; i++) { pr_debug("registering platform driver %ps\n", drivers[i]); err = __platform_driver_register(drivers[i], owner); if (err < 0) { pr_err("failed to register platform driver %ps: %d\n", drivers[i], err); goto error; } } return 0; error: while (i--) { pr_debug("unregistering platform driver %ps\n", drivers[i]); platform_driver_unregister(drivers[i]); } return err; } EXPORT_SYMBOL_GPL(__platform_register_drivers); /** * platform_unregister_drivers - unregister an array of platform drivers * @drivers: an array of drivers to unregister * @count: the number of drivers to unregister * * Unregisters platform drivers specified by an array. This is typically used * to complement an earlier call to platform_register_drivers(). Drivers are * unregistered in the reverse order in which they were registered. */ void platform_unregister_drivers(struct platform_driver * const *drivers, unsigned int count) { while (count--) { pr_debug("unregistering platform driver %ps\n", drivers[count]); platform_driver_unregister(drivers[count]); } } EXPORT_SYMBOL_GPL(platform_unregister_drivers); static const struct platform_device_id *platform_match_id( const struct platform_device_id *id, struct platform_device *pdev) { while (id->name[0]) { if (strcmp(pdev->name, id->name) == 0) { pdev->id_entry = id; return id; } id++; } return NULL; } #ifdef CONFIG_PM_SLEEP static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->suspend) ret = pdrv->suspend(pdev, mesg); return ret; } static int platform_legacy_resume(struct device *dev) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->resume) ret = pdrv->resume(pdev); return ret; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND int platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = platform_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = platform_legacy_suspend(dev, PMSG_FREEZE); } return ret; } int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = platform_legacy_resume(dev); } return ret; } int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_HIBERNATE_CALLBACKS */ /* modalias support enables more hands-off userspace setup: * (a) environment variable lets new-style hotplug events work once system is * fully running: "modprobe $MODALIAS" * (b) sysfs attribute lets new-style coldplug recover from hotplug events * mishandled before system is fully running: "modprobe $(cat modalias)" */ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; return sysfs_emit(buf, "platform:%s\n", pdev->name); } static DEVICE_ATTR_RO(modalias); static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev_to_node(dev)); } static DEVICE_ATTR_RO(numa_node); static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); ssize_t len; device_lock(dev); len = sysfs_emit(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); int ret; ret = driver_set_override(dev, &pdev->driver_override, buf, count); if (ret) return ret; return count; } static DEVICE_ATTR_RW(driver_override); static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, &dev_attr_numa_node.attr, &dev_attr_driver_override.attr, NULL, }; static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, typeof(*dev), kobj); if (a == &dev_attr_numa_node.attr && dev_to_node(dev) == NUMA_NO_NODE) return 0; return a->mode; } static const struct attribute_group platform_dev_group = { .attrs = platform_dev_attrs, .is_visible = platform_dev_attrs_visible, }; __ATTRIBUTE_GROUPS(platform_dev); /** * platform_match - bind platform device to platform driver. * @dev: device. * @drv: driver. * * Platform device IDs are assumed to be encoded like this: * "<name><instance>", where <name> is a short description of the type of * device, like "pci" or "floppy", and <instance> is the enumerated * instance of the device, like '0' or '42'. Driver IDs are simply * "<name>". So, extract the <name> from the platform_device structure, * and compare it against the name of the driver. Return whether they match * or not. */ static int platform_match(struct device *dev, struct device_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); /* When driver_override is set, only bind to the matching driver */ if (pdev->driver_override) return !strcmp(pdev->driver_override, drv->name); /* Attempt an OF style match first */ if (of_driver_match_device(dev, drv)) return 1; /* Then try ACPI style match */ if (acpi_driver_match_device(dev, drv)) return 1; /* Then try to match against the id table */ if (pdrv->id_table) return platform_match_id(pdrv->id_table, pdev) != NULL; /* fall-back to driver name match */ return (strcmp(pdev->name, drv->name) == 0); } static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { struct platform_device *pdev = to_platform_device(dev); int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, pdev->name); return 0; } static int platform_probe(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); int ret; /* * A driver registered using platform_driver_probe() cannot be bound * again later because the probe function usually lives in __init code * and so is gone. For these drivers .probe is set to * platform_probe_fail in __platform_driver_probe(). Don't even prepare * clocks and PM domains for these to match the traditional behaviour. */ if (unlikely(drv->probe == platform_probe_fail)) return -ENXIO; ret = of_clk_set_defaults(_dev->of_node, false); if (ret < 0) return ret; ret = dev_pm_domain_attach(_dev, true); if (ret) goto out; if (drv->probe) { ret = drv->probe(dev); if (ret) dev_pm_domain_detach(_dev, true); } out: if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { dev_warn(_dev, "probe deferral not supported\n"); ret = -ENXIO; } return ret; } static void platform_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); if (drv->remove_new) { drv->remove_new(dev); } else if (drv->remove) { int ret = drv->remove(dev); if (ret) dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n"); } dev_pm_domain_detach(_dev, true); } static void platform_shutdown(struct device *_dev) { struct platform_device *dev = to_platform_device(_dev); struct platform_driver *drv; if (!_dev->driver) return; drv = to_platform_driver(_dev->driver); if (drv->shutdown) drv->shutdown(dev); } int platform_dma_configure(struct device *dev) { enum dev_dma_attr attr; int ret = 0; if (dev->of_node) { ret = of_dma_configure(dev, dev->of_node, true); } else if (has_acpi_companion(dev)) { attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); ret = acpi_dma_configure(dev, attr); } return ret; } static const struct dev_pm_ops platform_dev_pm_ops = { .runtime_suspend = pm_generic_runtime_suspend, .runtime_resume = pm_generic_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }; struct bus_type platform_bus_type = { .name = "platform", .dev_groups = platform_dev_groups, .match = platform_match, .uevent = platform_uevent, .probe = platform_probe, .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); static inline int __platform_match(struct device *dev, const void *drv) { return platform_match(dev, (struct device_driver *)drv); } /** * platform_find_device_by_driver - Find a platform device with a given * driver. * @start: The device to start the search from. * @drv: The device driver to look for. */ struct device *platform_find_device_by_driver(struct device *start, const struct device_driver *drv) { return bus_find_device(&platform_bus_type, start, drv, __platform_match); } EXPORT_SYMBOL_GPL(platform_find_device_by_driver); void __weak __init early_platform_cleanup(void) { } int __init platform_bus_init(void) { int error; early_platform_cleanup(); error = device_register(&platform_bus); if (error) { put_device(&platform_bus); return error; } error = bus_register(&platform_bus_type); if (error) device_unregister(&platform_bus); of_platform_register_reconfig_notifier(); return error; } |
1 1 2 1 2 301 6 5 301 3 4 9 11 12 12 12 316 9 308 8 301 5 296 41 5 5 3 2 2 29 28 5 8 9 9 19 17 4 15 8 8 7 7 15 8 16 29 19 29 9 6 29 15 8 3 4 28 6 22 7 6 6 4 39 39 7 6 6 6 6 6 6 1 6 4 2 6 2 39 6 4 39 4 1 4 39 39 39 39 2 39 39 8 7 2 2 6 6 39 39 39 39 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/fs/pnode.c * * (C) Copyright IBM Corporation 2005. * Author : Ram Pai (linuxram@us.ibm.com) */ #include <linux/mnt_namespace.h> #include <linux/mount.h> #include <linux/fs.h> #include <linux/nsproxy.h> #include <uapi/linux/mount.h> #include "internal.h" #include "pnode.h" /* return the next shared peer mount of @p */ static inline struct mount *next_peer(struct mount *p) { return list_entry(p->mnt_share.next, struct mount, mnt_share); } static inline struct mount *first_slave(struct mount *p) { return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); } static inline struct mount *last_slave(struct mount *p) { return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave); } static inline struct mount *next_slave(struct mount *p) { return list_entry(p->mnt_slave.next, struct mount, mnt_slave); } static struct mount *get_peer_under_root(struct mount *mnt, struct mnt_namespace *ns, const struct path *root) { struct mount *m = mnt; do { /* Check the namespace first for optimization */ if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) return m; m = next_peer(m); } while (m != mnt); return NULL; } /* * Get ID of closest dominating peer group having a representative * under the given root. * * Caller must hold namespace_sem */ int get_dominating_id(struct mount *mnt, const struct path *root) { struct mount *m; for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); if (d) return d->mnt_group_id; } return 0; } static int do_make_slave(struct mount *mnt) { struct mount *master, *slave_mnt; if (list_empty(&mnt->mnt_share)) { if (IS_MNT_SHARED(mnt)) { mnt_release_group_id(mnt); CLEAR_MNT_SHARED(mnt); } master = mnt->mnt_master; if (!master) { struct list_head *p = &mnt->mnt_slave_list; while (!list_empty(p)) { slave_mnt = list_first_entry(p, struct mount, mnt_slave); list_del_init(&slave_mnt->mnt_slave); slave_mnt->mnt_master = NULL; } return 0; } } else { struct mount *m; /* * slave 'mnt' to a peer mount that has the * same root dentry. If none is available then * slave it to anything that is available. */ for (m = master = next_peer(mnt); m != mnt; m = next_peer(m)) { if (m->mnt.mnt_root == mnt->mnt.mnt_root) { master = m; break; } } list_del_init(&mnt->mnt_share); mnt->mnt_group_id = 0; CLEAR_MNT_SHARED(mnt); } list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) slave_mnt->mnt_master = master; list_move(&mnt->mnt_slave, &master->mnt_slave_list); list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); INIT_LIST_HEAD(&mnt->mnt_slave_list); mnt->mnt_master = master; return 0; } /* * vfsmount lock must be held for write */ void change_mnt_propagation(struct mount *mnt, int type) { if (type == MS_SHARED) { set_mnt_shared(mnt); return; } do_make_slave(mnt); if (type != MS_SLAVE) { list_del_init(&mnt->mnt_slave); mnt->mnt_master = NULL; if (type == MS_UNBINDABLE) mnt->mnt.mnt_flags |= MNT_UNBINDABLE; else mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; } } /* * get the next mount in the propagation tree. * @m: the mount seen last * @origin: the original mount from where the tree walk initiated * * Note that peer groups form contiguous segments of slave lists. * We rely on that in get_source() to be able to find out if * vfsmount found while iterating with propagation_next() is * a peer of one we'd found earlier. */ static struct mount *propagation_next(struct mount *m, struct mount *origin) { /* are there any slaves of this mount? */ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); while (1) { struct mount *master = m->mnt_master; if (master == origin->mnt_master) { struct mount *next = next_peer(m); return (next == origin) ? NULL : next; } else if (m->mnt_slave.next != &master->mnt_slave_list) return next_slave(m); /* back at master */ m = master; } } static struct mount *skip_propagation_subtree(struct mount *m, struct mount *origin) { /* * Advance m such that propagation_next will not return * the slaves of m. */ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) m = last_slave(m); return m; } static struct mount *next_group(struct mount *m, struct mount *origin) { while (1) { while (1) { struct mount *next; if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) return first_slave(m); next = next_peer(m); if (m->mnt_group_id == origin->mnt_group_id) { if (next == origin) return NULL; } else if (m->mnt_slave.next != &next->mnt_slave) break; m = next; } /* m is the last peer */ while (1) { struct mount *master = m->mnt_master; if (m->mnt_slave.next != &master->mnt_slave_list) return next_slave(m); m = next_peer(master); if (master->mnt_group_id == origin->mnt_group_id) break; if (master->mnt_slave.next == &m->mnt_slave) break; m = master; } if (m == origin) return NULL; } } /* all accesses are serialized by namespace_sem */ static struct mount *last_dest, *first_source, *last_source, *dest_master; static struct mountpoint *mp; static struct hlist_head *list; static inline bool peers(struct mount *m1, struct mount *m2) { return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; } static int propagate_one(struct mount *m) { struct mount *child; int type; /* skip ones added by this propagate_mnt() */ if (IS_MNT_NEW(m)) return 0; /* skip if mountpoint isn't covered by it */ if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) return 0; if (peers(m, last_dest)) { type = CL_MAKE_SHARED; } else { struct mount *n, *p; bool done; for (n = m; ; n = p) { p = n->mnt_master; if (p == dest_master || IS_MNT_MARKED(p)) break; } do { struct mount *parent = last_source->mnt_parent; if (peers(last_source, first_source)) break; done = parent->mnt_master == p; if (done && peers(n, parent)) break; last_source = last_source->mnt_master; } while (!done); type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) type |= CL_MAKE_SHARED; } child = copy_tree(last_source, last_source->mnt.mnt_root, type); if (IS_ERR(child)) return PTR_ERR(child); read_seqlock_excl(&mount_lock); mnt_set_mountpoint(m, mp, child); if (m->mnt_master != dest_master) SET_MNT_MARK(m->mnt_master); read_sequnlock_excl(&mount_lock); last_dest = m; last_source = child; hlist_add_head(&child->mnt_hash, list); return count_mounts(m->mnt_ns, child); } /* * mount 'source_mnt' under the destination 'dest_mnt' at * dentry 'dest_dentry'. And propagate that mount to * all the peer and slave mounts of 'dest_mnt'. * Link all the new mounts into a propagation tree headed at * source_mnt. Also link all the new mounts using ->mnt_list * headed at source_mnt's ->mnt_list * * @dest_mnt: destination mount. * @dest_dentry: destination dentry. * @source_mnt: source mount. * @tree_list : list of heads of trees to be attached. */ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, struct mount *source_mnt, struct hlist_head *tree_list) { struct mount *m, *n; int ret = 0; /* * we don't want to bother passing tons of arguments to * propagate_one(); everything is serialized by namespace_sem, * so globals will do just fine. */ last_dest = dest_mnt; first_source = source_mnt; last_source = source_mnt; mp = dest_mp; list = tree_list; dest_master = dest_mnt->mnt_master; /* all peers of dest_mnt, except dest_mnt itself */ for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { ret = propagate_one(n); if (ret) goto out; } /* all slave groups */ for (m = next_group(dest_mnt, dest_mnt); m; m = next_group(m, dest_mnt)) { /* everything in that slave group */ n = m; do { ret = propagate_one(n); if (ret) goto out; n = next_peer(n); } while (n != m); } out: read_seqlock_excl(&mount_lock); hlist_for_each_entry(n, tree_list, mnt_hash) { m = n->mnt_parent; if (m->mnt_master != dest_mnt->mnt_master) CLEAR_MNT_MARK(m->mnt_master); } read_sequnlock_excl(&mount_lock); return ret; } static struct mount *find_topper(struct mount *mnt) { /* If there is exactly one mount covering mnt completely return it. */ struct mount *child; if (!list_is_singular(&mnt->mnt_mounts)) return NULL; child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child); if (child->mnt_mountpoint != mnt->mnt.mnt_root) return NULL; return child; } /* * return true if the refcount is greater than count */ static inline int do_refcount_check(struct mount *mnt, int count) { return mnt_get_count(mnt) > count; } /* * check if the mount 'mnt' can be unmounted successfully. * @mnt: the mount to be checked for unmount * NOTE: unmounting 'mnt' would naturally propagate to all * other mounts its parent propagates to. * Check if any of these mounts that **do not have submounts** * have more references than 'refcnt'. If so return busy. * * vfsmount lock must be held for write */ int propagate_mount_busy(struct mount *mnt, int refcnt) { struct mount *m, *child, *topper; struct mount *parent = mnt->mnt_parent; if (mnt == parent) return do_refcount_check(mnt, refcnt); /* * quickly check if the current mount can be unmounted. * If not, we don't have to go checking for all other * mounts */ if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) return 1; for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { int count = 1; child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); if (!child) continue; /* Is there exactly one mount on the child that covers * it completely whose reference should be ignored? */ topper = find_topper(child); if (topper) count += 1; else if (!list_empty(&child->mnt_mounts)) continue; if (do_refcount_check(child, count)) return 1; } return 0; } /* * Clear MNT_LOCKED when it can be shown to be safe. * * mount_lock lock must be held for write */ void propagate_mount_unlock(struct mount *mnt) { struct mount *parent = mnt->mnt_parent; struct mount *m, *child; BUG_ON(parent == mnt); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); if (child) child->mnt.mnt_flags &= ~MNT_LOCKED; } } static void umount_one(struct mount *mnt, struct list_head *to_umount) { CLEAR_MNT_MARK(mnt); mnt->mnt.mnt_flags |= MNT_UMOUNT; list_del_init(&mnt->mnt_child); list_del_init(&mnt->mnt_umounting); list_move_tail(&mnt->mnt_list, to_umount); } /* * NOTE: unmounting 'mnt' naturally propagates to all other mounts its * parent propagates to. */ static bool __propagate_umount(struct mount *mnt, struct list_head *to_umount, struct list_head *to_restore) { bool progress = false; struct mount *child; /* * The state of the parent won't change if this mount is * already unmounted or marked as without children. */ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED)) goto out; /* Verify topper is the only grandchild that has not been * speculatively unmounted. */ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { if (child->mnt_mountpoint == mnt->mnt.mnt_root) continue; if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child)) continue; /* Found a mounted child */ goto children; } /* Mark mounts that can be unmounted if not locked */ SET_MNT_MARK(mnt); progress = true; /* If a mount is without children and not locked umount it. */ if (!IS_MNT_LOCKED(mnt)) { umount_one(mnt, to_umount); } else { children: list_move_tail(&mnt->mnt_umounting, to_restore); } out: return progress; } static void umount_list(struct list_head *to_umount, struct list_head *to_restore) { struct mount *mnt, *child, *tmp; list_for_each_entry(mnt, to_umount, mnt_list) { list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) { /* topper? */ if (child->mnt_mountpoint == mnt->mnt.mnt_root) list_move_tail(&child->mnt_umounting, to_restore); else umount_one(child, to_umount); } } } static void restore_mounts(struct list_head *to_restore) { /* Restore mounts to a clean working state */ while (!list_empty(to_restore)) { struct mount *mnt, *parent; struct mountpoint *mp; mnt = list_first_entry(to_restore, struct mount, mnt_umounting); CLEAR_MNT_MARK(mnt); list_del_init(&mnt->mnt_umounting); /* Should this mount be reparented? */ mp = mnt->mnt_mp; parent = mnt->mnt_parent; while (parent->mnt.mnt_flags & MNT_UMOUNT) { mp = parent->mnt_mp; parent = parent->mnt_parent; } if (parent != mnt->mnt_parent) mnt_change_mountpoint(parent, mp, mnt); } } static void cleanup_umount_visitations(struct list_head *visited) { while (!list_empty(visited)) { struct mount *mnt = list_first_entry(visited, struct mount, mnt_umounting); list_del_init(&mnt->mnt_umounting); } } /* * collect all mounts that receive propagation from the mount in @list, * and return these additional mounts in the same list. * @list: the list of mounts to be unmounted. * * vfsmount lock must be held for write */ int propagate_umount(struct list_head *list) { struct mount *mnt; LIST_HEAD(to_restore); LIST_HEAD(to_umount); LIST_HEAD(visited); /* Find candidates for unmounting */ list_for_each_entry_reverse(mnt, list, mnt_list) { struct mount *parent = mnt->mnt_parent; struct mount *m; /* * If this mount has already been visited it is known that it's * entire peer group and all of their slaves in the propagation * tree for the mountpoint has already been visited and there is * no need to visit them again. */ if (!list_empty(&mnt->mnt_umounting)) continue; list_add_tail(&mnt->mnt_umounting, &visited); for (m = propagation_next(parent, parent); m; m = propagation_next(m, parent)) { struct mount *child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint); if (!child) continue; if (!list_empty(&child->mnt_umounting)) { /* * If the child has already been visited it is * know that it's entire peer group and all of * their slaves in the propgation tree for the * mountpoint has already been visited and there * is no need to visit this subtree again. */ m = skip_propagation_subtree(m, parent); continue; } else if (child->mnt.mnt_flags & MNT_UMOUNT) { /* * We have come accross an partially unmounted * mount in list that has not been visited yet. * Remember it has been visited and continue * about our merry way. */ list_add_tail(&child->mnt_umounting, &visited); continue; } /* Check the child and parents while progress is made */ while (__propagate_umount(child, &to_umount, &to_restore)) { /* Is the parent a umount candidate? */ child = child->mnt_parent; if (list_empty(&child->mnt_umounting)) break; } } } umount_list(&to_umount, &to_restore); restore_mounts(&to_restore); cleanup_umount_visitations(&visited); list_splice_tail(&to_umount, list); return 0; } |
16 22 16 15 3 16 2 16 7 36 13 28 36 13 8 36 36 36 35 28 27 28 28 16 16 16 17 1 16 16 16 16 16 16 16 16 21 12 19 21 21 21 11 8 8 11 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 | /************************************************************************** * * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * **************************************************************************/ /* * Generic simple memory manager implementation. Intended to be used as a base * class implementation for more advanced memory managers. * * Note that the algorithm used is quite simple and there might be substantial * performance gains if a smarter free list is implemented. Currently it is * just an unordered stack of free regions. This could easily be improved if * an RB-tree is used instead. At least if we expect heavy fragmentation. * * Aligned allocations can also see improvement. * * Authors: * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stacktrace.h> #include <drm/drm_mm.h> /** * DOC: Overview * * drm_mm provides a simple range allocator. The drivers are free to use the * resource allocator from the linux core if it suits them, the upside of drm_mm * is that it's in the DRM core. Which means that it's easier to extend for * some of the crazier special purpose needs of gpus. * * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. * Drivers are free to embed either of them into their own suitable * datastructures. drm_mm itself will not do any memory allocations of its own, * so if drivers choose not to embed nodes they need to still allocate them * themselves. * * The range allocator also supports reservation of preallocated blocks. This is * useful for taking over initial mode setting configurations from the firmware, * where an object needs to be created which exactly matches the firmware's * scanout target. As long as the range is still free it can be inserted anytime * after the allocator is initialized, which helps with avoiding looped * dependencies in the driver load sequence. * * drm_mm maintains a stack of most recently freed holes, which of all * simplistic datastructures seems to be a fairly decent approach to clustering * allocations and avoiding too much fragmentation. This means free space * searches are O(num_holes). Given that all the fancy features drm_mm supports * something better would be fairly complex and since gfx thrashing is a fairly * steep cliff not a real concern. Removing a node again is O(1). * * drm_mm supports a few features: Alignment and range restrictions can be * supplied. Furthermore every &drm_mm_node has a color value (which is just an * opaque unsigned long) which in conjunction with a driver callback can be used * to implement sophisticated placement restrictions. The i915 DRM driver uses * this to implement guard pages between incompatible caching domains in the * graphics TT. * * Two behaviors are supported for searching and allocating: bottom-up and * top-down. The default is bottom-up. Top-down allocation can be used if the * memory area has different restrictions, or just to reduce fragmentation. * * Finally iteration helpers to walk all nodes and all holes are provided as are * some basic allocator dumpers for debugging. * * Note that this range allocator is not thread-safe, drivers need to protect * modifications with their own locking. The idea behind this is that for a full * memory manager additional data needs to be protected anyway, hence internal * locking would be fully redundant. */ #ifdef CONFIG_DRM_DEBUG_MM #include <linux/stackdepot.h> #define STACKDEPTH 32 #define BUFSZ 4096 static noinline void save_stack(struct drm_mm_node *node) { unsigned long entries[STACKDEPTH]; unsigned int n; n = stack_trace_save(entries, ARRAY_SIZE(entries), 1); /* May be called under spinlock, so avoid sleeping */ node->stack = stack_depot_save(entries, n, GFP_NOWAIT); } static void show_leaks(struct drm_mm *mm) { struct drm_mm_node *node; unsigned long *entries; unsigned int nr_entries; char *buf; buf = kmalloc(BUFSZ, GFP_KERNEL); if (!buf) return; list_for_each_entry(node, drm_mm_nodes(mm), node_list) { if (!node->stack) { DRM_ERROR("node [%08llx + %08llx]: unknown owner\n", node->start, node->size); continue; } nr_entries = stack_depot_fetch(node->stack, &entries); stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0); DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s", node->start, node->size, buf); } kfree(buf); } #undef STACKDEPTH #undef BUFSZ #else static void save_stack(struct drm_mm_node *node) { } static void show_leaks(struct drm_mm *mm) { } #endif #define START(node) ((node)->start) #define LAST(node) ((node)->start + (node)->size - 1) INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, start, last) ?: (struct drm_mm_node *)&mm->head_node; } EXPORT_SYMBOL(__drm_mm_interval_first); static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, struct drm_mm_node *node) { struct drm_mm *mm = hole_node->mm; struct rb_node **link, *rb; struct drm_mm_node *parent; bool leftmost; node->__subtree_last = LAST(node); if (drm_mm_node_allocated(hole_node)) { rb = &hole_node->rb; while (rb) { parent = rb_entry(rb, struct drm_mm_node, rb); if (parent->__subtree_last >= node->__subtree_last) break; parent->__subtree_last = node->__subtree_last; rb = rb_parent(rb); } rb = &hole_node->rb; link = &hole_node->rb.rb_right; leftmost = false; } else { rb = NULL; link = &mm->interval_tree.rb_root.rb_node; leftmost = true; } while (*link) { rb = *link; parent = rb_entry(rb, struct drm_mm_node, rb); if (parent->__subtree_last < node->__subtree_last) parent->__subtree_last = node->__subtree_last; if (node->start < parent->start) { link = &parent->rb.rb_left; } else { link = &parent->rb.rb_right; leftmost = false; } } rb_link_node(&node->rb, rb, link); rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, &drm_mm_interval_tree_augment); } #define HOLE_SIZE(NODE) ((NODE)->hole_size) #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) static u64 rb_to_hole_size(struct rb_node *rb) { return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; } static void insert_hole_size(struct rb_root_cached *root, struct drm_mm_node *node) { struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; u64 x = node->hole_size; bool first = true; while (*link) { rb = *link; if (x > rb_to_hole_size(rb)) { link = &rb->rb_left; } else { link = &rb->rb_right; first = false; } } rb_link_node(&node->rb_hole_size, rb, link); rb_insert_color_cached(&node->rb_hole_size, root, first); } RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, struct drm_mm_node, rb_hole_addr, u64, subtree_max_hole, HOLE_SIZE) static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) { struct rb_node **link = &root->rb_node, *rb_parent = NULL; u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; struct drm_mm_node *parent; while (*link) { rb_parent = *link; parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); if (parent->subtree_max_hole < subtree_max_hole) parent->subtree_max_hole = subtree_max_hole; if (start < HOLE_ADDR(parent)) link = &parent->rb_hole_addr.rb_left; else link = &parent->rb_hole_addr.rb_right; } rb_link_node(&node->rb_hole_addr, rb_parent, link); rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks); } static void add_hole(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; node->hole_size = __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); node->subtree_max_hole = node->hole_size; DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); insert_hole_size(&mm->holes_size, node); insert_hole_addr(&mm->holes_addr, node); list_add(&node->hole_stack, &mm->hole_stack); } static void rm_hole(struct drm_mm_node *node) { DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); list_del(&node->hole_stack); rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size); rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr, &augment_callbacks); node->hole_size = 0; node->subtree_max_hole = 0; DRM_MM_BUG_ON(drm_mm_hole_follows(node)); } static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) { return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); } static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) { return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); } static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) { struct rb_node *rb = mm->holes_size.rb_root.rb_node; struct drm_mm_node *best = NULL; do { struct drm_mm_node *node = rb_entry(rb, struct drm_mm_node, rb_hole_size); if (size <= node->hole_size) { best = node; rb = rb->rb_right; } else { rb = rb->rb_left; } } while (rb); return best; } static bool usable_hole_addr(struct rb_node *rb, u64 size) { return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; } static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) { struct rb_node *rb = mm->holes_addr.rb_node; struct drm_mm_node *node = NULL; while (rb) { u64 hole_start; if (!usable_hole_addr(rb, size)) break; node = rb_hole_addr_to_node(rb); hole_start = __drm_mm_hole_node_start(node); if (addr < hole_start) rb = node->rb_hole_addr.rb_left; else if (addr > hole_start + node->hole_size) rb = node->rb_hole_addr.rb_right; else break; } return node; } static struct drm_mm_node * first_hole(struct drm_mm *mm, u64 start, u64 end, u64 size, enum drm_mm_insert_mode mode) { switch (mode) { default: case DRM_MM_INSERT_BEST: return best_hole(mm, size); case DRM_MM_INSERT_LOW: return find_hole_addr(mm, start, size); case DRM_MM_INSERT_HIGH: return find_hole_addr(mm, end, size); case DRM_MM_INSERT_EVICT: return list_first_entry_or_null(&mm->hole_stack, struct drm_mm_node, hole_stack); } } /** * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions * @name: name of function to declare * @first: first rb member to traverse (either rb_left or rb_right). * @last: last rb member to traverse (either rb_right or rb_left). * * This macro declares a function to return the next hole of the addr rb tree. * While traversing the tree we take the searched size into account and only * visit branches with potential big enough holes. */ #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ { \ struct rb_node *parent, *node = &entry->rb_hole_addr; \ \ if (!entry || RB_EMPTY_NODE(node)) \ return NULL; \ \ if (usable_hole_addr(node->first, size)) { \ node = node->first; \ while (usable_hole_addr(node->last, size)) \ node = node->last; \ return rb_hole_addr_to_node(node); \ } \ \ while ((parent = rb_parent(node)) && node == parent->first) \ node = parent; \ \ return rb_hole_addr_to_node(parent); \ } DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) static struct drm_mm_node * next_hole(struct drm_mm *mm, struct drm_mm_node *node, u64 size, enum drm_mm_insert_mode mode) { switch (mode) { default: case DRM_MM_INSERT_BEST: return rb_hole_size_to_node(rb_prev(&node->rb_hole_size)); case DRM_MM_INSERT_LOW: return next_hole_low_addr(node, size); case DRM_MM_INSERT_HIGH: return next_hole_high_addr(node, size); case DRM_MM_INSERT_EVICT: node = list_next_entry(node, hole_stack); return &node->hole_stack == &mm->hole_stack ? NULL : node; } } /** * drm_mm_reserve_node - insert an pre-initialized node * @mm: drm_mm allocator to insert @node into * @node: drm_mm_node to insert * * This functions inserts an already set-up &drm_mm_node into the allocator, * meaning that start, size and color must be set by the caller. All other * fields must be cleared to 0. This is useful to initialize the allocator with * preallocated objects which must be set-up before the range allocator can be * set-up, e.g. when taking over a firmware framebuffer. * * Returns: * 0 on success, -ENOSPC if there's no hole where @node is. */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) { struct drm_mm_node *hole; u64 hole_start, hole_end; u64 adj_start, adj_end; u64 end; end = node->start + node->size; if (unlikely(end <= node->start)) return -ENOSPC; /* Find the relevant hole to add our node to */ hole = find_hole_addr(mm, node->start, 0); if (!hole) return -ENOSPC; adj_start = hole_start = __drm_mm_hole_node_start(hole); adj_end = hole_end = hole_start + hole->hole_size; if (mm->color_adjust) mm->color_adjust(hole, node->color, &adj_start, &adj_end); if (adj_start > node->start || adj_end < end) return -ENOSPC; node->mm = mm; __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); node->hole_size = 0; rm_hole(hole); if (node->start > hole_start) add_hole(hole); if (end < hole_end) add_hole(node); save_stack(node); return 0; } EXPORT_SYMBOL(drm_mm_reserve_node); static u64 rb_to_hole_size_or_zero(struct rb_node *rb) { return rb ? rb_to_hole_size(rb) : 0; } /** * drm_mm_insert_node_in_range - ranged search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for this node * @range_start: start of the allowed range for this node * @range_end: end of the allowed range for this node * @mode: fine-tune the allocation search and placement * * The preallocated @node must be cleared to 0. * * Returns: * 0 on success, -ENOSPC if there's no suitable hole. */ int drm_mm_insert_node_in_range(struct drm_mm * const mm, struct drm_mm_node * const node, u64 size, u64 alignment, unsigned long color, u64 range_start, u64 range_end, enum drm_mm_insert_mode mode) { struct drm_mm_node *hole; u64 remainder_mask; bool once; DRM_MM_BUG_ON(range_start > range_end); if (unlikely(size == 0 || range_end - range_start < size)) return -ENOSPC; if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) return -ENOSPC; if (alignment <= 1) alignment = 0; once = mode & DRM_MM_INSERT_ONCE; mode &= ~DRM_MM_INSERT_ONCE; remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; for (hole = first_hole(mm, range_start, range_end, size, mode); hole; hole = once ? NULL : next_hole(mm, hole, size, mode)) { u64 hole_start = __drm_mm_hole_node_start(hole); u64 hole_end = hole_start + hole->hole_size; u64 adj_start, adj_end; u64 col_start, col_end; if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) break; if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) break; col_start = hole_start; col_end = hole_end; if (mm->color_adjust) mm->color_adjust(hole, color, &col_start, &col_end); adj_start = max(col_start, range_start); adj_end = min(col_end, range_end); if (adj_end <= adj_start || adj_end - adj_start < size) continue; if (mode == DRM_MM_INSERT_HIGH) adj_start = adj_end - size; if (alignment) { u64 rem; if (likely(remainder_mask)) rem = adj_start & remainder_mask; else div64_u64_rem(adj_start, alignment, &rem); if (rem) { adj_start -= rem; if (mode != DRM_MM_INSERT_HIGH) adj_start += alignment; if (adj_start < max(col_start, range_start) || min(col_end, range_end) - adj_start < size) continue; if (adj_end <= adj_start || adj_end - adj_start < size) continue; } } node->mm = mm; node->size = size; node->start = adj_start; node->color = color; node->hole_size = 0; __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); rm_hole(hole); if (adj_start > hole_start) add_hole(hole); if (adj_start + size < hole_end) add_hole(node); save_stack(node); return 0; } return -ENOSPC; } EXPORT_SYMBOL(drm_mm_insert_node_in_range); static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); } /** * drm_mm_remove_node - Remove a memory node from the allocator. * @node: drm_mm_node to remove * * This just removes a node from its drm_mm allocator. The node does not need to * be cleared again before it can be re-inserted into this or any other drm_mm * allocator. It is a bug to call this function on a unallocated node. */ void drm_mm_remove_node(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); prev_node = list_prev_entry(node, node_list); if (drm_mm_hole_follows(node)) rm_hole(node); drm_mm_interval_tree_remove(node, &mm->interval_tree); list_del(&node->node_list); if (drm_mm_hole_follows(prev_node)) rm_hole(prev_node); add_hole(prev_node); clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); } EXPORT_SYMBOL(drm_mm_remove_node); /** * drm_mm_replace_node - move an allocation from @old to @new * @old: drm_mm_node to remove from the allocator * @new: drm_mm_node which should inherit @old's allocation * * This is useful for when drivers embed the drm_mm_node structure and hence * can't move allocations by reassigning pointers. It's a combination of remove * and insert with the guarantee that the allocation start will match. */ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { struct drm_mm *mm = old->mm; DRM_MM_BUG_ON(!drm_mm_node_allocated(old)); *new = *old; __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags); list_replace(&old->node_list, &new->node_list); rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); if (drm_mm_hole_follows(old)) { list_replace(&old->hole_stack, &new->hole_stack); rb_replace_node_cached(&old->rb_hole_size, &new->rb_hole_size, &mm->holes_size); rb_replace_node(&old->rb_hole_addr, &new->rb_hole_addr, &mm->holes_addr); } clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags); } EXPORT_SYMBOL(drm_mm_replace_node); /** * DOC: lru scan roster * * Very often GPUs need to have continuous allocations for a given object. When * evicting objects to make space for a new one it is therefore not most * efficient when we simply start to select all objects from the tail of an LRU * until there's a suitable hole: Especially for big objects or nodes that * otherwise have special allocation constraints there's a good chance we evict * lots of (smaller) objects unnecessarily. * * The DRM range allocator supports this use-case through the scanning * interfaces. First a scan operation needs to be initialized with * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds * objects to the roster, probably by walking an LRU list, but this can be * freely implemented. Eviction candidates are added using * drm_mm_scan_add_block() until a suitable hole is found or there are no * further evictable objects. Eviction roster metadata is tracked in &struct * drm_mm_scan. * * The driver must walk through all objects again in exactly the reverse * order to restore the allocator state. Note that while the allocator is used * in the scan mode no other operation is allowed. * * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() * reported true) in the scan, and any overlapping nodes after color adjustment * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and * since freeing a node is also O(1) the overall complexity is * O(scanned_objects). So like the free stack which needs to be walked before a * scan operation even begins this is linear in the number of objects. It * doesn't seem to hurt too badly. */ /** * drm_mm_scan_init_with_range - initialize range-restricted lru scanning * @scan: scan state * @mm: drm_mm to scan * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for the allocation * @start: start of the allowed range for the allocation * @end: end of the allowed range for the allocation * @mode: fine-tune the allocation search and placement * * This simply sets up the scanning routines with the parameters for the desired * hole. * * Warning: * As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, struct drm_mm *mm, u64 size, u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_insert_mode mode) { DRM_MM_BUG_ON(start >= end); DRM_MM_BUG_ON(!size || size > end - start); DRM_MM_BUG_ON(mm->scan_active); scan->mm = mm; if (alignment <= 1) alignment = 0; scan->color = color; scan->alignment = alignment; scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; scan->size = size; scan->mode = mode; DRM_MM_BUG_ON(end <= start); scan->range_start = start; scan->range_end = end; scan->hit_start = U64_MAX; scan->hit_end = 0; } EXPORT_SYMBOL(drm_mm_scan_init_with_range); /** * drm_mm_scan_add_block - add a node to the scan list * @scan: the active drm_mm scanner * @node: drm_mm_node to add * * Add a node to the scan list that might be freed to make space for the desired * hole. * * Returns: * True if a hole has been found, false otherwise. */ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, struct drm_mm_node *node) { struct drm_mm *mm = scan->mm; struct drm_mm_node *hole; u64 hole_start, hole_end; u64 col_start, col_end; u64 adj_start, adj_end; DRM_MM_BUG_ON(node->mm != mm); DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); mm->scan_active++; /* Remove this block from the node_list so that we enlarge the hole * (distance between the end of our previous node and the start of * or next), without poisoning the link so that we can restore it * later in drm_mm_scan_remove_block(). */ hole = list_prev_entry(node, node_list); DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); __list_del_entry(&node->node_list); hole_start = __drm_mm_hole_node_start(hole); hole_end = __drm_mm_hole_node_end(hole); col_start = hole_start; col_end = hole_end; if (mm->color_adjust) mm->color_adjust(hole, scan->color, &col_start, &col_end); adj_start = max(col_start, scan->range_start); adj_end = min(col_end, scan->range_end); if (adj_end <= adj_start || adj_end - adj_start < scan->size) return false; if (scan->mode == DRM_MM_INSERT_HIGH) adj_start = adj_end - scan->size; if (scan->alignment) { u64 rem; if (likely(scan->remainder_mask)) rem = adj_start & scan->remainder_mask; else div64_u64_rem(adj_start, scan->alignment, &rem); if (rem) { adj_start -= rem; if (scan->mode != DRM_MM_INSERT_HIGH) adj_start += scan->alignment; if (adj_start < max(col_start, scan->range_start) || min(col_end, scan->range_end) - adj_start < scan->size) return false; if (adj_end <= adj_start || adj_end - adj_start < scan->size) return false; } } scan->hit_start = adj_start; scan->hit_end = adj_start + scan->size; DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); DRM_MM_BUG_ON(scan->hit_start < hole_start); DRM_MM_BUG_ON(scan->hit_end > hole_end); return true; } EXPORT_SYMBOL(drm_mm_scan_add_block); /** * drm_mm_scan_remove_block - remove a node from the scan list * @scan: the active drm_mm scanner * @node: drm_mm_node to remove * * Nodes **must** be removed in exactly the reverse order from the scan list as * they have been added (e.g. using list_add() as they are added and then * list_for_each() over that eviction list to remove), otherwise the internal * state of the memory manager will be corrupted. * * When the scan list is empty, the selected memory nodes can be freed. An * immediately following drm_mm_insert_node_in_range_generic() or one of the * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return * the just freed block (because it's at the top of the free_stack list). * * Returns: * True if this block should be evicted, false otherwise. Will always * return false when no hole has been found. */ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, struct drm_mm_node *node) { struct drm_mm_node *prev_node; DRM_MM_BUG_ON(node->mm != scan->mm); DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); DRM_MM_BUG_ON(!node->mm->scan_active); node->mm->scan_active--; /* During drm_mm_scan_add_block() we decoupled this node leaving * its pointers intact. Now that the caller is walking back along * the eviction list we can restore this block into its rightful * place on the full node_list. To confirm that the caller is walking * backwards correctly we check that prev_node->next == node->next, * i.e. both believe the same node should be on the other side of the * hole. */ prev_node = list_prev_entry(node, node_list); DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != list_next_entry(node, node_list)); list_add(&node->node_list, &prev_node->node_list); return (node->start + node->size > scan->hit_start && node->start < scan->hit_end); } EXPORT_SYMBOL(drm_mm_scan_remove_block); /** * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole * @scan: drm_mm scan with target hole * * After completing an eviction scan and removing the selected nodes, we may * need to remove a few more nodes from either side of the target hole if * mm.color_adjust is being used. * * Returns: * A node to evict, or NULL if there are no overlapping nodes. */ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) { struct drm_mm *mm = scan->mm; struct drm_mm_node *hole; u64 hole_start, hole_end; DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); if (!mm->color_adjust) return NULL; /* * The hole found during scanning should ideally be the first element * in the hole_stack list, but due to side-effects in the driver it * may not be. */ list_for_each_entry(hole, &mm->hole_stack, hole_stack) { hole_start = __drm_mm_hole_node_start(hole); hole_end = hole_start + hole->hole_size; if (hole_start <= scan->hit_start && hole_end >= scan->hit_end) break; } /* We should only be called after we found the hole previously */ DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); if (unlikely(&hole->hole_stack == &mm->hole_stack)) return NULL; DRM_MM_BUG_ON(hole_start > scan->hit_start); DRM_MM_BUG_ON(hole_end < scan->hit_end); mm->color_adjust(hole, scan->color, &hole_start, &hole_end); if (hole_start > scan->hit_start) return hole; if (hole_end < scan->hit_end) return list_next_entry(hole, node_list); return NULL; } EXPORT_SYMBOL(drm_mm_scan_color_evict); /** * drm_mm_init - initialize a drm-mm allocator * @mm: the drm_mm structure to initialize * @start: start of the range managed by @mm * @size: end of the range managed by @mm * * Note that @mm must be cleared to 0 before calling this function. */ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) { DRM_MM_BUG_ON(start + size <= start); mm->color_adjust = NULL; INIT_LIST_HEAD(&mm->hole_stack); mm->interval_tree = RB_ROOT_CACHED; mm->holes_size = RB_ROOT_CACHED; mm->holes_addr = RB_ROOT; /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); mm->head_node.flags = 0; mm->head_node.mm = mm; mm->head_node.start = start + size; mm->head_node.size = -size; add_hole(&mm->head_node); mm->scan_active = 0; } EXPORT_SYMBOL(drm_mm_init); /** * drm_mm_takedown - clean up a drm_mm allocator * @mm: drm_mm allocator to clean up * * Note that it is a bug to call this function on an allocator which is not * clean. */ void drm_mm_takedown(struct drm_mm *mm) { if (WARN(!drm_mm_clean(mm), "Memory manager not clean during takedown.\n")) show_leaks(mm); } EXPORT_SYMBOL(drm_mm_takedown); static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) { u64 start, size; size = entry->hole_size; if (size) { start = drm_mm_hole_node_start(entry); drm_printf(p, "%#018llx-%#018llx: %llu: free\n", start, start + size, size); } return size; } /** * drm_mm_print - print allocator state * @mm: drm_mm allocator to print * @p: DRM printer to use */ void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) { const struct drm_mm_node *entry; u64 total_used = 0, total_free = 0, total = 0; total_free += drm_mm_dump_hole(p, &mm->head_node); drm_mm_for_each_node(entry, mm) { drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; total_free += drm_mm_dump_hole(p, entry); } total = total_free + total_used; drm_printf(p, "total: %llu, used %llu free %llu\n", total, total_used, total_free); } EXPORT_SYMBOL(drm_mm_print); |
2 2 3 3 3 5 1 5 5 5 5 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 | // SPDX-License-Identifier: GPL-2.0-only /* Common methods for dibusb-based-receivers. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS); MODULE_LICENSE("GPL"); #define deb_info(args...) dprintk(debug,0x01,args) /* common stuff used by the different dibusb modules */ int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.fifo_ctrl != NULL) if (st->ops.fifo_ctrl(adap->fe_adap[0].fe, onoff)) { err("error while controlling the fifo of the demod."); return -ENODEV; } } return 0; } EXPORT_SYMBOL(dibusb_streaming_ctrl); int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_ctrl != NULL) st->ops.pid_ctrl(adap->fe_adap[0].fe, index, pid, onoff); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter); int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { if (adap->priv != NULL) { struct dibusb_state *st = adap->priv; if (st->ops.pid_parse != NULL) if (st->ops.pid_parse(adap->fe_adap[0].fe, onoff) < 0) err("could not handle pid_parser"); } return 0; } EXPORT_SYMBOL(dibusb_pid_filter_ctrl); int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); msleep(10); return ret; } EXPORT_SYMBOL(dibusb_power_ctrl); int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { int ret; u8 *b; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0) goto ret; if (onoff) { b[0] = DIBUSB_REQ_SET_STREAMING_MODE; b[1] = 0x00; ret = dvb_usb_generic_write(adap->dev, b, 2); if (ret < 0) goto ret; } b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM; ret = dvb_usb_generic_write(adap->dev, b, 3); ret: kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_streaming_ctrl); int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff) { u8 *b; int ret; if (!onoff) return 0; b = kmalloc(3, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = DIBUSB_REQ_SET_IOCTL; b[1] = DIBUSB_IOCTL_CMD_POWER_MODE; b[2] = DIBUSB_IOCTL_POWER_WAKEUP; ret = dvb_usb_generic_write(d, b, 3); kfree(b); return ret; } EXPORT_SYMBOL(dibusb2_0_power_ctrl); static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 *sndbuf; int ret, wo, len; /* write only ? */ wo = (rbuf == NULL || rlen == 0); len = 2 + wlen + (wo ? 0 : 2); sndbuf = kmalloc(MAX_XFER_SIZE, GFP_KERNEL); if (!sndbuf) return -ENOMEM; if (4 + wlen > MAX_XFER_SIZE) { warn("i2c wr: len=%d is too big!\n", wlen); ret = -EOPNOTSUPP; goto ret; } sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ; sndbuf[1] = (addr << 1) | (wo ? 0 : 1); memcpy(&sndbuf[2], wbuf, wlen); if (!wo) { sndbuf[wlen + 2] = (rlen >> 8) & 0xff; sndbuf[wlen + 3] = rlen & 0xff; } ret = dvb_usb_generic_rw(d, sndbuf, len, rbuf, rlen, 0); ret: kfree(sndbuf); return ret; } /* * I2C master xfer function */ static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0 && (msg[i+1].flags & I2C_M_RD)) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len, msg[i+1].buf,msg[i+1].len) < 0) break; i++; } else if ((msg[i].flags & I2C_M_RD) == 0) { if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0) break; } else if (msg[i].addr != 0x50) { /* 0x50 is the address of the eeprom - we need to protect it * from dibusb's bad i2c implementation: reads without * writing the offset before are forbidden */ if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0) break; } } mutex_unlock(&d->i2c_mutex); return i; } static u32 dibusb_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } struct i2c_algorithm dibusb_i2c_algo = { .master_xfer = dibusb_i2c_xfer, .functionality = dibusb_i2c_func, }; EXPORT_SYMBOL(dibusb_i2c_algo); int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val) { u8 *buf; int rc; buf = kzalloc(2, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = offs; rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1); *val = buf[1]; kfree(buf); return rc; } EXPORT_SYMBOL(dibusb_read_eeprom_byte); /* * common remote control stuff */ struct rc_map_table rc_map_dibusb_table[] = { /* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */ { 0x0016, KEY_POWER }, { 0x0010, KEY_MUTE }, { 0x0003, KEY_1 }, { 0x0001, KEY_2 }, { 0x0006, KEY_3 }, { 0x0009, KEY_4 }, { 0x001d, KEY_5 }, { 0x001f, KEY_6 }, { 0x000d, KEY_7 }, { 0x0019, KEY_8 }, { 0x001b, KEY_9 }, { 0x0015, KEY_0 }, { 0x0005, KEY_CHANNELUP }, { 0x0002, KEY_CHANNELDOWN }, { 0x001e, KEY_VOLUMEUP }, { 0x000a, KEY_VOLUMEDOWN }, { 0x0011, KEY_RECORD }, { 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */ { 0x0014, KEY_PLAY }, { 0x001a, KEY_STOP }, { 0x0040, KEY_REWIND }, { 0x0012, KEY_FASTFORWARD }, { 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */ { 0x004c, KEY_PAUSE }, { 0x004d, KEY_SCREEN }, /* Full screen mode. */ { 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */ /* additional keys TwinHan VisionPlus, the Artec seemingly not have */ { 0x000c, KEY_CANCEL }, /* Cancel */ { 0x001c, KEY_EPG }, /* EPG */ { 0x0000, KEY_TAB }, /* Tab */ { 0x0048, KEY_INFO }, /* Preview */ { 0x0004, KEY_LIST }, /* RecordList */ { 0x000f, KEY_TEXT }, /* Teletext */ /* Key codes for the KWorld/ADSTech/JetWay remote. */ { 0x8612, KEY_POWER }, { 0x860f, KEY_SELECT }, /* source */ { 0x860c, KEY_UNKNOWN }, /* scan */ { 0x860b, KEY_EPG }, { 0x8610, KEY_MUTE }, { 0x8601, KEY_1 }, { 0x8602, KEY_2 }, { 0x8603, KEY_3 }, { 0x8604, KEY_4 }, { 0x8605, KEY_5 }, { 0x8606, KEY_6 }, { 0x8607, KEY_7 }, { 0x8608, KEY_8 }, { 0x8609, KEY_9 }, { 0x860a, KEY_0 }, { 0x8618, KEY_ZOOM }, { 0x861c, KEY_UNKNOWN }, /* preview */ { 0x8613, KEY_UNKNOWN }, /* snap */ { 0x8600, KEY_UNDO }, { 0x861d, KEY_RECORD }, { 0x860d, KEY_STOP }, { 0x860e, KEY_PAUSE }, { 0x8616, KEY_PLAY }, { 0x8611, KEY_BACK }, { 0x8619, KEY_FORWARD }, { 0x8614, KEY_UNKNOWN }, /* pip */ { 0x8615, KEY_ESC }, { 0x861a, KEY_UP }, { 0x861e, KEY_DOWN }, { 0x861f, KEY_LEFT }, { 0x861b, KEY_RIGHT }, /* Key codes for the DiBcom MOD3000 remote. */ { 0x8000, KEY_MUTE }, { 0x8001, KEY_TEXT }, { 0x8002, KEY_HOME }, { 0x8003, KEY_POWER }, { 0x8004, KEY_RED }, { 0x8005, KEY_GREEN }, { 0x8006, KEY_YELLOW }, { 0x8007, KEY_BLUE }, { 0x8008, KEY_DVD }, { 0x8009, KEY_AUDIO }, { 0x800a, KEY_IMAGES }, /* Pictures */ { 0x800b, KEY_VIDEO }, { 0x800c, KEY_BACK }, { 0x800d, KEY_UP }, { 0x800e, KEY_RADIO }, { 0x800f, KEY_EPG }, { 0x8010, KEY_LEFT }, { 0x8011, KEY_OK }, { 0x8012, KEY_RIGHT }, { 0x8013, KEY_UNKNOWN }, /* SAP */ { 0x8014, KEY_TV }, { 0x8015, KEY_DOWN }, { 0x8016, KEY_MENU }, /* DVD Menu */ { 0x8017, KEY_LAST }, { 0x8018, KEY_RECORD }, { 0x8019, KEY_STOP }, { 0x801a, KEY_PAUSE }, { 0x801b, KEY_PLAY }, { 0x801c, KEY_PREVIOUS }, { 0x801d, KEY_REWIND }, { 0x801e, KEY_FASTFORWARD }, { 0x801f, KEY_NEXT}, { 0x8040, KEY_1 }, { 0x8041, KEY_2 }, { 0x8042, KEY_3 }, { 0x8043, KEY_CHANNELUP }, { 0x8044, KEY_4 }, { 0x8045, KEY_5 }, { 0x8046, KEY_6 }, { 0x8047, KEY_CHANNELDOWN }, { 0x8048, KEY_7 }, { 0x8049, KEY_8 }, { 0x804a, KEY_9 }, { 0x804b, KEY_VOLUMEUP }, { 0x804c, KEY_CLEAR }, { 0x804d, KEY_0 }, { 0x804e, KEY_ENTER }, { 0x804f, KEY_VOLUMEDOWN }, }; EXPORT_SYMBOL(rc_map_dibusb_table); int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state) { u8 *buf; int ret; buf = kmalloc(5, GFP_KERNEL); if (!buf) return -ENOMEM; buf[0] = DIBUSB_REQ_POLL_REMOTE; ret = dvb_usb_generic_rw(d, buf, 1, buf, 5, 0); if (ret < 0) goto ret; dvb_usb_nec_rc_key_to_event(d, buf, event, state); if (buf[0] != 0) deb_info("key: %*ph\n", 5, buf); ret: kfree(buf); return ret; } EXPORT_SYMBOL(dibusb_rc_query); |
27 1 1 1 18 8 18 5 4 1 1 1 4 1 4 4 3 1 4 3 3 20 17 5 1 1 1 1 1 2 1 1 2 1 2 1 1 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/power/user.c * * This file provides the user space interface for software suspend/resume. * * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> */ #include <linux/suspend.h> #include <linux/reboot.h> #include <linux/string.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pm.h> #include <linux/fs.h> #include <linux/compat.h> #include <linux/console.h> #include <linux/cpu.h> #include <linux/freezer.h> #include <linux/uaccess.h> #include "power.h" static bool need_wait; static struct snapshot_data { struct snapshot_handle handle; int swap; int mode; bool frozen; bool ready; bool platform_support; bool free_bitmaps; dev_t dev; } snapshot_state; int is_hibernate_resume_dev(dev_t dev) { return hibernation_available() && snapshot_state.dev == dev; } static int snapshot_open(struct inode *inode, struct file *filp) { struct snapshot_data *data; int error; if (!hibernation_available()) return -EPERM; lock_system_sleep(); if (!hibernate_acquire()) { error = -EBUSY; goto Unlock; } if ((filp->f_flags & O_ACCMODE) == O_RDWR) { hibernate_release(); error = -ENOSYS; goto Unlock; } nonseekable_open(inode, filp); data = &snapshot_state; filp->private_data = data; memset(&data->handle, 0, sizeof(struct snapshot_handle)); if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { /* Hibernating. The image device should be accessible. */ data->swap = swap_type_of(swsusp_resume_device, 0); data->mode = O_RDONLY; data->free_bitmaps = false; error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION); } else { /* * Resuming. We may need to wait for the image device to * appear. */ need_wait = true; data->swap = -1; data->mode = O_WRONLY; error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE); if (!error) { error = create_basic_memory_bitmaps(); data->free_bitmaps = !error; } } if (error) hibernate_release(); data->frozen = false; data->ready = false; data->platform_support = false; data->dev = 0; Unlock: unlock_system_sleep(); return error; } static int snapshot_release(struct inode *inode, struct file *filp) { struct snapshot_data *data; lock_system_sleep(); swsusp_free(); data = filp->private_data; data->dev = 0; free_all_swap_pages(data->swap); if (data->frozen) { pm_restore_gfp_mask(); free_basic_memory_bitmaps(); thaw_processes(); } else if (data->free_bitmaps) { free_basic_memory_bitmaps(); } pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); hibernate_release(); unlock_system_sleep(); return 0; } static ssize_t snapshot_read(struct file *filp, char __user *buf, size_t count, loff_t *offp) { struct snapshot_data *data; ssize_t res; loff_t pg_offp = *offp & ~PAGE_MASK; lock_system_sleep(); data = filp->private_data; if (!data->ready) { res = -ENODATA; goto Unlock; } if (!pg_offp) { /* on page boundary? */ res = snapshot_read_next(&data->handle); if (res <= 0) goto Unlock; } else { res = PAGE_SIZE - pg_offp; } res = simple_read_from_buffer(buf, count, &pg_offp, data_of(data->handle), res); if (res > 0) *offp += res; Unlock: unlock_system_sleep(); return res; } static ssize_t snapshot_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp) { struct snapshot_data *data; ssize_t res; loff_t pg_offp = *offp & ~PAGE_MASK; if (need_wait) { wait_for_device_probe(); need_wait = false; } lock_system_sleep(); data = filp->private_data; if (!pg_offp) { res = snapshot_write_next(&data->handle); if (res <= 0) goto unlock; } else { res = PAGE_SIZE - pg_offp; } if (!data_of(data->handle)) { res = -EINVAL; goto unlock; } res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp, buf, count); if (res > 0) *offp += res; unlock: unlock_system_sleep(); return res; } struct compat_resume_swap_area { compat_loff_t offset; u32 dev; } __packed; static int snapshot_set_swap_area(struct snapshot_data *data, void __user *argp) { sector_t offset; dev_t swdev; if (swsusp_swap_in_use()) return -EPERM; if (in_compat_syscall()) { struct compat_resume_swap_area swap_area; if (copy_from_user(&swap_area, argp, sizeof(swap_area))) return -EFAULT; swdev = new_decode_dev(swap_area.dev); offset = swap_area.offset; } else { struct resume_swap_area swap_area; if (copy_from_user(&swap_area, argp, sizeof(swap_area))) return -EFAULT; swdev = new_decode_dev(swap_area.dev); offset = swap_area.offset; } /* * User space encodes device types as two-byte values, * so we need to recode them */ data->swap = swap_type_of(swdev, offset); if (data->swap < 0) return swdev ? -ENODEV : -EINVAL; data->dev = swdev; return 0; } static long snapshot_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = 0; struct snapshot_data *data; loff_t size; sector_t offset; if (need_wait) { wait_for_device_probe(); need_wait = false; } if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR) return -ENOTTY; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!mutex_trylock(&system_transition_mutex)) return -EBUSY; lock_device_hotplug(); data = filp->private_data; switch (cmd) { case SNAPSHOT_FREEZE: if (data->frozen) break; ksys_sync_helper(); error = freeze_processes(); if (error) break; error = create_basic_memory_bitmaps(); if (error) thaw_processes(); else data->frozen = true; break; case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; pm_restore_gfp_mask(); free_basic_memory_bitmaps(); data->free_bitmaps = false; thaw_processes(); data->frozen = false; break; case SNAPSHOT_CREATE_IMAGE: if (data->mode != O_RDONLY || !data->frozen || data->ready) { error = -EPERM; break; } pm_restore_gfp_mask(); error = hibernation_snapshot(data->platform_support); if (!error) { error = put_user(in_suspend, (int __user *)arg); data->ready = !freezer_test_done && !error; freezer_test_done = false; } break; case SNAPSHOT_ATOMIC_RESTORE: snapshot_write_finalize(&data->handle); if (data->mode != O_WRONLY || !data->frozen || !snapshot_image_loaded(&data->handle)) { error = -EPERM; break; } error = hibernation_restore(data->platform_support); break; case SNAPSHOT_FREE: swsusp_free(); memset(&data->handle, 0, sizeof(struct snapshot_handle)); data->ready = false; /* * It is necessary to thaw kernel threads here, because * SNAPSHOT_CREATE_IMAGE may be invoked directly after * SNAPSHOT_FREE. In that case, if kernel threads were not * thawed, the preallocation of memory carried out by * hibernation_snapshot() might run into problems (i.e. it * might fail or even deadlock). */ thaw_kernel_threads(); break; case SNAPSHOT_PREF_IMAGE_SIZE: image_size = arg; break; case SNAPSHOT_GET_IMAGE_SIZE: if (!data->ready) { error = -ENODATA; break; } size = snapshot_get_image_size(); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_AVAIL_SWAP_SIZE: size = count_swap_pages(data->swap, 1); size <<= PAGE_SHIFT; error = put_user(size, (loff_t __user *)arg); break; case SNAPSHOT_ALLOC_SWAP_PAGE: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } offset = alloc_swapdev_block(data->swap); if (offset) { offset <<= PAGE_SHIFT; error = put_user(offset, (loff_t __user *)arg); } else { error = -ENOSPC; } break; case SNAPSHOT_FREE_SWAP_PAGES: if (data->swap < 0 || data->swap >= MAX_SWAPFILES) { error = -ENODEV; break; } free_all_swap_pages(data->swap); break; case SNAPSHOT_S2RAM: if (!data->frozen) { error = -EPERM; break; } /* * Tasks are frozen and the notifiers have been called with * PM_HIBERNATION_PREPARE */ error = suspend_devices_and_enter(PM_SUSPEND_MEM); data->ready = false; break; case SNAPSHOT_PLATFORM_SUPPORT: data->platform_support = !!arg; break; case SNAPSHOT_POWER_OFF: if (data->platform_support) error = hibernation_platform_enter(); break; case SNAPSHOT_SET_SWAP_AREA: error = snapshot_set_swap_area(data, (void __user *)arg); break; default: error = -ENOTTY; } unlock_device_hotplug(); mutex_unlock(&system_transition_mutex); return error; } #ifdef CONFIG_COMPAT static long snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t)); switch (cmd) { case SNAPSHOT_GET_IMAGE_SIZE: case SNAPSHOT_AVAIL_SWAP_SIZE: case SNAPSHOT_ALLOC_SWAP_PAGE: case SNAPSHOT_CREATE_IMAGE: case SNAPSHOT_SET_SWAP_AREA: return snapshot_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); default: return snapshot_ioctl(file, cmd, arg); } } #endif /* CONFIG_COMPAT */ static const struct file_operations snapshot_fops = { .open = snapshot_open, .release = snapshot_release, .read = snapshot_read, .write = snapshot_write, .llseek = no_llseek, .unlocked_ioctl = snapshot_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = snapshot_compat_ioctl, #endif }; static struct miscdevice snapshot_device = { .minor = SNAPSHOT_MINOR, .name = "snapshot", .fops = &snapshot_fops, }; static int __init snapshot_device_init(void) { return misc_register(&snapshot_device); }; device_initcall(snapshot_device_init); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004 IBM Corporation * Copyright (C) 2014 Intel Corporation * * Authors: * Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> * Leendert van Doorn <leendert@watson.ibm.com> * Dave Safford <safford@watson.ibm.com> * Reiner Sailer <sailer@watson.ibm.com> * Kylene Hall <kjhall@us.ibm.com> * * Maintained by: <tpmdd-devel@lists.sourceforge.net> * * TPM chip management routines. */ #include <linux/poll.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/freezer.h> #include <linux/major.h> #include <linux/tpm_eventlog.h> #include <linux/hw_random.h> #include "tpm.h" DEFINE_IDR(dev_nums_idr); static DEFINE_MUTEX(idr_lock); struct class *tpm_class; struct class *tpmrm_class; dev_t tpm_devt; static int tpm_request_locality(struct tpm_chip *chip) { int rc; if (!chip->ops->request_locality) return 0; rc = chip->ops->request_locality(chip, 0); if (rc < 0) return rc; chip->locality = rc; return 0; } static void tpm_relinquish_locality(struct tpm_chip *chip) { int rc; if (!chip->ops->relinquish_locality) return; rc = chip->ops->relinquish_locality(chip, chip->locality); if (rc) dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); chip->locality = -1; } static int tpm_cmd_ready(struct tpm_chip *chip) { if (!chip->ops->cmd_ready) return 0; return chip->ops->cmd_ready(chip); } static int tpm_go_idle(struct tpm_chip *chip) { if (!chip->ops->go_idle) return 0; return chip->ops->go_idle(chip); } static void tpm_clk_enable(struct tpm_chip *chip) { if (chip->ops->clk_enable) chip->ops->clk_enable(chip, true); } static void tpm_clk_disable(struct tpm_chip *chip) { if (chip->ops->clk_enable) chip->ops->clk_enable(chip, false); } /** * tpm_chip_start() - power on the TPM * @chip: a TPM chip to use * * Return: * * The response length - OK * * -errno - A system error */ int tpm_chip_start(struct tpm_chip *chip) { int ret; tpm_clk_enable(chip); if (chip->locality == -1) { ret = tpm_request_locality(chip); if (ret) { tpm_clk_disable(chip); return ret; } } ret = tpm_cmd_ready(chip); if (ret) { tpm_relinquish_locality(chip); tpm_clk_disable(chip); return ret; } return 0; } EXPORT_SYMBOL_GPL(tpm_chip_start); /** * tpm_chip_stop() - power off the TPM * @chip: a TPM chip to use * * Return: * * The response length - OK * * -errno - A system error */ void tpm_chip_stop(struct tpm_chip *chip) { tpm_go_idle(chip); tpm_relinquish_locality(chip); tpm_clk_disable(chip); } EXPORT_SYMBOL_GPL(tpm_chip_stop); /** * tpm_try_get_ops() - Get a ref to the tpm_chip * @chip: Chip to ref * * The caller must already have some kind of locking to ensure that chip is * valid. This function will lock the chip so that the ops member can be * accessed safely. The locking prevents tpm_chip_unregister from * completing, so it should not be held for long periods. * * Returns -ERRNO if the chip could not be got. */ int tpm_try_get_ops(struct tpm_chip *chip) { int rc = -EIO; get_device(&chip->dev); down_read(&chip->ops_sem); if (!chip->ops) goto out_ops; mutex_lock(&chip->tpm_mutex); rc = tpm_chip_start(chip); if (rc) goto out_lock; return 0; out_lock: mutex_unlock(&chip->tpm_mutex); out_ops: up_read(&chip->ops_sem); put_device(&chip->dev); return rc; } EXPORT_SYMBOL_GPL(tpm_try_get_ops); /** * tpm_put_ops() - Release a ref to the tpm_chip * @chip: Chip to put * * This is the opposite pair to tpm_try_get_ops(). After this returns chip may * be kfree'd. */ void tpm_put_ops(struct tpm_chip *chip) { tpm_chip_stop(chip); mutex_unlock(&chip->tpm_mutex); up_read(&chip->ops_sem); put_device(&chip->dev); } EXPORT_SYMBOL_GPL(tpm_put_ops); /** * tpm_default_chip() - find a TPM chip and get a reference to it */ struct tpm_chip *tpm_default_chip(void) { struct tpm_chip *chip, *res = NULL; int chip_num = 0; int chip_prev; mutex_lock(&idr_lock); do { chip_prev = chip_num; chip = idr_get_next(&dev_nums_idr, &chip_num); if (chip) { get_device(&chip->dev); res = chip; break; } } while (chip_prev != chip_num); mutex_unlock(&idr_lock); return res; } EXPORT_SYMBOL_GPL(tpm_default_chip); /** * tpm_find_get_ops() - find and reserve a TPM chip * @chip: a &struct tpm_chip instance, %NULL for the default chip * * Finds a TPM chip and reserves its class device and operations. The chip must * be released with tpm_put_ops() after use. * This function is for internal use only. It supports existing TPM callers * by accepting NULL, but those callers should be converted to pass in a chip * directly. * * Return: * A reserved &struct tpm_chip instance. * %NULL if a chip is not found. * %NULL if the chip is not available. */ struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip) { int rc; if (chip) { if (!tpm_try_get_ops(chip)) return chip; return NULL; } chip = tpm_default_chip(); if (!chip) return NULL; rc = tpm_try_get_ops(chip); /* release additional reference we got from tpm_default_chip() */ put_device(&chip->dev); if (rc) return NULL; return chip; } /** * tpm_dev_release() - free chip memory and the device number * @dev: the character device for the TPM chip * * This is used as the release function for the character device. */ static void tpm_dev_release(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); mutex_lock(&idr_lock); idr_remove(&dev_nums_idr, chip->dev_num); mutex_unlock(&idr_lock); kfree(chip->log.bios_event_log); kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); kfree(chip->allocated_banks); kfree(chip); } /** * tpm_class_shutdown() - prepare the TPM device for loss of power. * @dev: device to which the chip is associated. * * Issues a TPM2_Shutdown command prior to loss of power, as required by the * TPM 2.0 spec. Then, calls bus- and device- specific shutdown code. * * Return: always 0 (i.e. success) */ static int tpm_class_shutdown(struct device *dev) { struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev); down_write(&chip->ops_sem); if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } } chip->ops = NULL; up_write(&chip->ops_sem); return 0; } /** * tpm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: device to which the chip is associated * At this point pdev mst be initialized, but does not have to * be registered * @ops: struct tpm_class_ops instance * * Allocates a new struct tpm_chip instance and assigns a free * device number for it. Must be paired with put_device(&chip->dev). */ struct tpm_chip *tpm_chip_alloc(struct device *pdev, const struct tpm_class_ops *ops) { struct tpm_chip *chip; int rc; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) return ERR_PTR(-ENOMEM); mutex_init(&chip->tpm_mutex); init_rwsem(&chip->ops_sem); chip->ops = ops; mutex_lock(&idr_lock); rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL); mutex_unlock(&idr_lock); if (rc < 0) { dev_err(pdev, "No available tpm device numbers\n"); kfree(chip); return ERR_PTR(rc); } chip->dev_num = rc; device_initialize(&chip->dev); chip->dev.class = tpm_class; chip->dev.class->shutdown_pre = tpm_class_shutdown; chip->dev.release = tpm_dev_release; chip->dev.parent = pdev; chip->dev.groups = chip->groups; if (chip->dev_num == 0) chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR); else chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num); rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num); if (rc) goto out; if (!pdev) chip->flags |= TPM_CHIP_FLAG_VIRTUAL; cdev_init(&chip->cdev, &tpm_fops); chip->cdev.owner = THIS_MODULE; rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE); if (rc) { rc = -ENOMEM; goto out; } chip->locality = -1; return chip; out: put_device(&chip->dev); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(tpm_chip_alloc); /** * tpmm_chip_alloc() - allocate a new struct tpm_chip instance * @pdev: parent device to which the chip is associated * @ops: struct tpm_class_ops instance * * Same as tpm_chip_alloc except devm is used to do the put_device */ struct tpm_chip *tpmm_chip_alloc(struct device *pdev, const struct tpm_class_ops *ops) { struct tpm_chip *chip; int rc; chip = tpm_chip_alloc(pdev, ops); if (IS_ERR(chip)) return chip; rc = devm_add_action_or_reset(pdev, (void (*)(void *)) put_device, &chip->dev); if (rc) return ERR_PTR(rc); dev_set_drvdata(pdev, chip); return chip; } EXPORT_SYMBOL_GPL(tpmm_chip_alloc); static int tpm_add_char_device(struct tpm_chip *chip) { int rc; rc = cdev_device_add(&chip->cdev, &chip->dev); if (rc) { dev_err(&chip->dev, "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n", dev_name(&chip->dev), MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); return rc; } if (chip->flags & TPM_CHIP_FLAG_TPM2) { rc = tpm_devs_add(chip); if (rc) goto err_del_cdev; } /* Make the chip available. */ mutex_lock(&idr_lock); idr_replace(&dev_nums_idr, chip, chip->dev_num); mutex_unlock(&idr_lock); return 0; err_del_cdev: cdev_device_del(&chip->cdev, &chip->dev); return rc; } static void tpm_del_char_device(struct tpm_chip *chip) { cdev_device_del(&chip->cdev, &chip->dev); /* Make the chip unavailable. */ mutex_lock(&idr_lock); idr_replace(&dev_nums_idr, NULL, chip->dev_num); mutex_unlock(&idr_lock); /* Make the driver uncallable. */ down_write(&chip->ops_sem); /* * Check if chip->ops is still valid: In case that the controller * drivers shutdown handler unregisters the controller in its * shutdown handler we are called twice and chip->ops to NULL. */ if (chip->ops) { if (chip->flags & TPM_CHIP_FLAG_TPM2) { if (!tpm_chip_start(chip)) { tpm2_shutdown(chip, TPM2_SU_CLEAR); tpm_chip_stop(chip); } } chip->ops = NULL; } up_write(&chip->ops_sem); } static void tpm_del_legacy_sysfs(struct tpm_chip *chip) { struct attribute **i; if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) return; sysfs_remove_link(&chip->dev.parent->kobj, "ppi"); for (i = chip->groups[0]->attrs; *i != NULL; ++i) sysfs_remove_link(&chip->dev.parent->kobj, (*i)->name); } /* For compatibility with legacy sysfs paths we provide symlinks from the * parent dev directory to selected names within the tpm chip directory. Old * kernel versions created these files directly under the parent. */ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) { struct attribute **i; int rc; if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) return 0; rc = compat_only_sysfs_link_entry_to_kobj( &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL); if (rc && rc != -ENOENT) return rc; /* All the names from tpm-sysfs */ for (i = chip->groups[0]->attrs; *i != NULL; ++i) { rc = compat_only_sysfs_link_entry_to_kobj( &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL); if (rc) { tpm_del_legacy_sysfs(chip); return rc; } } return 0; } static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); return tpm_get_random(chip, data, max); } static int tpm_add_hwrng(struct tpm_chip *chip) { if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM)) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), "tpm-rng-%d", chip->dev_num); chip->hwrng.name = chip->hwrng_name; chip->hwrng.read = tpm_hwrng_read; return hwrng_register(&chip->hwrng); } static int tpm_get_pcr_allocation(struct tpm_chip *chip) { int rc; rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ? tpm2_get_pcr_allocation(chip) : tpm1_get_pcr_allocation(chip); if (rc > 0) return -ENODEV; return rc; } /* * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. * * Creates a character device for the TPM chip and adds sysfs attributes for * the device. As the last step this function adds the chip to the list of TPM * chips available for in-kernel use. * * This function should be only called after the chip initialization is * complete. */ int tpm_chip_register(struct tpm_chip *chip) { int rc; rc = tpm_chip_start(chip); if (rc) return rc; rc = tpm_auto_startup(chip); if (rc) { tpm_chip_stop(chip); return rc; } rc = tpm_get_pcr_allocation(chip); tpm_chip_stop(chip); if (rc) return rc; tpm_sysfs_add_device(chip); tpm_bios_log_setup(chip); tpm_add_ppi(chip); rc = tpm_add_hwrng(chip); if (rc) goto out_ppi; rc = tpm_add_char_device(chip); if (rc) goto out_hwrng; rc = tpm_add_legacy_sysfs(chip); if (rc) { tpm_chip_unregister(chip); return rc; } return 0; out_hwrng: if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) hwrng_unregister(&chip->hwrng); out_ppi: tpm_bios_log_teardown(chip); return rc; } EXPORT_SYMBOL_GPL(tpm_chip_register); /* * tpm_chip_unregister() - release the TPM driver * @chip: TPM chip to use. * * Takes the chip first away from the list of available TPM chips and then * cleans up all the resources reserved by tpm_chip_register(). * * Once this function returns the driver call backs in 'op's will not be * running and will no longer start. * * NOTE: This function should be only called before deinitializing chip * resources. */ void tpm_chip_unregister(struct tpm_chip *chip) { tpm_del_legacy_sysfs(chip); if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) hwrng_unregister(&chip->hwrng); tpm_bios_log_teardown(chip); if (chip->flags & TPM_CHIP_FLAG_TPM2) tpm_devs_remove(chip); tpm_del_char_device(chip); } EXPORT_SYMBOL_GPL(tpm_chip_unregister); |
34 16 35 13 28 27 25 29 7 40 37 3 38 2 34 12 33 13 3 3 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * v4l2-rect.h - v4l2_rect helper functions * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #ifndef _V4L2_RECT_H_ #define _V4L2_RECT_H_ #include <linux/videodev2.h> /** * v4l2_rect_set_size_to() - copy the width/height values. * @r: rect whose width and height fields will be set * @size: rect containing the width and height fields you need. */ static inline void v4l2_rect_set_size_to(struct v4l2_rect *r, const struct v4l2_rect *size) { r->width = size->width; r->height = size->height; } /** * v4l2_rect_set_min_size() - width and height of r should be >= min_size. * @r: rect whose width and height will be modified * @min_size: rect containing the minimal width and height */ static inline void v4l2_rect_set_min_size(struct v4l2_rect *r, const struct v4l2_rect *min_size) { if (r->width < min_size->width) r->width = min_size->width; if (r->height < min_size->height) r->height = min_size->height; } /** * v4l2_rect_set_max_size() - width and height of r should be <= max_size * @r: rect whose width and height will be modified * @max_size: rect containing the maximum width and height */ static inline void v4l2_rect_set_max_size(struct v4l2_rect *r, const struct v4l2_rect *max_size) { if (r->width > max_size->width) r->width = max_size->width; if (r->height > max_size->height) r->height = max_size->height; } /** * v4l2_rect_map_inside()- r should be inside boundary. * @r: rect that will be modified * @boundary: rect containing the boundary for @r */ static inline void v4l2_rect_map_inside(struct v4l2_rect *r, const struct v4l2_rect *boundary) { v4l2_rect_set_max_size(r, boundary); if (r->left < boundary->left) r->left = boundary->left; if (r->top < boundary->top) r->top = boundary->top; if (r->left + r->width > boundary->left + boundary->width) r->left = boundary->left + boundary->width - r->width; if (r->top + r->height > boundary->top + boundary->height) r->top = boundary->top + boundary->height - r->height; } /** * v4l2_rect_same_size() - return true if r1 has the same size as r2 * @r1: rectangle. * @r2: rectangle. * * Return true if both rectangles have the same size. */ static inline bool v4l2_rect_same_size(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { return r1->width == r2->width && r1->height == r2->height; } /** * v4l2_rect_same_position() - return true if r1 has the same position as r2 * @r1: rectangle. * @r2: rectangle. * * Return true if both rectangles have the same position */ static inline bool v4l2_rect_same_position(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { return r1->top == r2->top && r1->left == r2->left; } /** * v4l2_rect_equal() - return true if r1 equals r2 * @r1: rectangle. * @r2: rectangle. * * Return true if both rectangles have the same size and position. */ static inline bool v4l2_rect_equal(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { return v4l2_rect_same_size(r1, r2) && v4l2_rect_same_position(r1, r2); } /** * v4l2_rect_intersect() - calculate the intersection of two rects. * @r: intersection of @r1 and @r2. * @r1: rectangle. * @r2: rectangle. */ static inline void v4l2_rect_intersect(struct v4l2_rect *r, const struct v4l2_rect *r1, const struct v4l2_rect *r2) { int right, bottom; r->top = max(r1->top, r2->top); r->left = max(r1->left, r2->left); bottom = min(r1->top + r1->height, r2->top + r2->height); right = min(r1->left + r1->width, r2->left + r2->width); r->height = max(0, bottom - r->top); r->width = max(0, right - r->left); } /** * v4l2_rect_scale() - scale rect r by to/from * @r: rect to be scaled. * @from: from rectangle. * @to: to rectangle. * * This scales rectangle @r horizontally by @to->width / @from->width and * vertically by @to->height / @from->height. * * Typically @r is a rectangle inside @from and you want the rectangle as * it would appear after scaling @from to @to. So the resulting @r will * be the scaled rectangle inside @to. */ static inline void v4l2_rect_scale(struct v4l2_rect *r, const struct v4l2_rect *from, const struct v4l2_rect *to) { if (from->width == 0 || from->height == 0) { r->left = r->top = r->width = r->height = 0; return; } r->left = (((r->left - from->left) * to->width) / from->width) & ~1; r->width = ((r->width * to->width) / from->width) & ~1; r->top = ((r->top - from->top) * to->height) / from->height; r->height = (r->height * to->height) / from->height; } /** * v4l2_rect_overlap() - do r1 and r2 overlap? * @r1: rectangle. * @r2: rectangle. * * Returns true if @r1 and @r2 overlap. */ static inline bool v4l2_rect_overlap(const struct v4l2_rect *r1, const struct v4l2_rect *r2) { /* * IF the left side of r1 is to the right of the right side of r2 OR * the left side of r2 is to the right of the right side of r1 THEN * they do not overlap. */ if (r1->left >= r2->left + r2->width || r2->left >= r1->left + r1->width) return false; /* * IF the top side of r1 is below the bottom of r2 OR * the top side of r2 is below the bottom of r1 THEN * they do not overlap. */ if (r1->top >= r2->top + r2->height || r2->top >= r1->top + r1->height) return false; return true; } /** * v4l2_rect_enclosed() - is r1 enclosed in r2? * @r1: rectangle. * @r2: rectangle. * * Returns true if @r1 is enclosed in @r2. */ static inline bool v4l2_rect_enclosed(struct v4l2_rect *r1, struct v4l2_rect *r2) { if (r1->left < r2->left || r1->top < r2->top) return false; if (r1->left + r1->width > r2->left + r2->width) return false; if (r1->top + r1->height > r2->top + r2->height) return false; return true; } #endif |
3 3 3 3 3 3 3 3 3 3 4 1 2 2 1 1 4 3 1 1 1 1 1 3 3 3 6 2 4 4 1 2 2 2 2 11 1 1 6 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 | // SPDX-License-Identifier: GPL-2.0 /* * n_gsm.c GSM 0710 tty multiplexor * Copyright (c) 2009/10 Intel Corporation * * * THIS IS A DEVELOPMENT SNAPSHOT IT IS NOT A FINAL RELEASE * * * TO DO: * Mostly done: ioctls for setting modes/timing * Partly done: hooks so you can pull off frames to non tty devs * Restart DLCI 0 when it closes ? * Improve the tx engine * Resolve tx side locking by adding a queue_head and routing * all control traffic via it * General tidy/document * Review the locking/move to refcounts more (mux now moved to an * alloc/free model ready) * Use newest tty open/close port helpers and install hooks * What to do about power functions ? * Termios setting and negotiation * Do we need a 'which mux are you' ioctl to correlate mux and tty sets * */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched/signal.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/tty_flip.h> #include <linux/tty_driver.h> #include <linux/serial.h> #include <linux/kfifo.h> #include <linux/skbuff.h> #include <net/arp.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/gsmmux.h> #include "tty.h" static int debug; module_param(debug, int, 0600); /* Defaults: these are from the specification */ #define T1 10 /* 100mS */ #define T2 34 /* 333mS */ #define N2 3 /* Retry 3 times */ /* Use long timers for testing at low speed with debug on */ #ifdef DEBUG_TIMING #define T1 100 #define T2 200 #endif /* * Semi-arbitrary buffer size limits. 0710 is normally run with 32-64 byte * limits so this is plenty */ #define MAX_MRU 1500 #define MAX_MTU 1500 /* SOF, ADDR, CTRL, LEN1, LEN2, ..., FCS, EOF */ #define PROT_OVERHEAD 7 #define GSM_NET_TX_TIMEOUT (HZ*10) /* * struct gsm_mux_net - network interface * * Created when net interface is initialized. */ struct gsm_mux_net { struct kref ref; struct gsm_dlci *dlci; }; /* * Each block of data we have queued to go out is in the form of * a gsm_msg which holds everything we need in a link layer independent * format */ struct gsm_msg { struct list_head list; u8 addr; /* DLCI address + flags */ u8 ctrl; /* Control byte + flags */ unsigned int len; /* Length of data block (can be zero) */ unsigned char *data; /* Points into buffer but not at the start */ unsigned char buffer[]; }; enum gsm_dlci_state { DLCI_CLOSED, DLCI_OPENING, /* Sending SABM not seen UA */ DLCI_OPEN, /* SABM/UA complete */ DLCI_CLOSING, /* Sending DISC not seen UA/DM */ }; enum gsm_dlci_mode { DLCI_MODE_ABM, /* Normal Asynchronous Balanced Mode */ DLCI_MODE_ADM, /* Asynchronous Disconnected Mode */ }; /* * Each active data link has a gsm_dlci structure associated which ties * the link layer to an optional tty (if the tty side is open). To avoid * complexity right now these are only ever freed up when the mux is * shut down. * * At the moment we don't free DLCI objects until the mux is torn down * this avoid object life time issues but might be worth review later. */ struct gsm_dlci { struct gsm_mux *gsm; int addr; enum gsm_dlci_state state; struct mutex mutex; /* Link layer */ enum gsm_dlci_mode mode; spinlock_t lock; /* Protects the internal state */ struct timer_list t1; /* Retransmit timer for SABM and UA */ int retries; /* Uplink tty if active */ struct tty_port port; /* The tty bound to this DLCI if there is one */ #define TX_SIZE 4096 /* Must be power of 2. */ struct kfifo fifo; /* Queue fifo for the DLCI */ int adaption; /* Adaption layer in use */ int prev_adaption; u32 modem_rx; /* Our incoming virtual modem lines */ u32 modem_tx; /* Our outgoing modem lines */ bool dead; /* Refuse re-open */ /* Flow control */ bool throttled; /* Private copy of throttle state */ bool constipated; /* Throttle status for outgoing */ /* Packetised I/O */ struct sk_buff *skb; /* Frame being sent */ struct sk_buff_head skb_list; /* Queued frames */ /* Data handling callback */ void (*data)(struct gsm_dlci *dlci, const u8 *data, int len); void (*prev_data)(struct gsm_dlci *dlci, const u8 *data, int len); struct net_device *net; /* network interface, if created */ }; /* DLCI 0, 62/63 are special or reserved see gsmtty_open */ #define NUM_DLCI 64 /* * DLCI 0 is used to pass control blocks out of band of the data * flow (and with a higher link priority). One command can be outstanding * at a time and we use this structure to manage them. They are created * and destroyed by the user context, and updated by the receive paths * and timers */ struct gsm_control { u8 cmd; /* Command we are issuing */ u8 *data; /* Data for the command in case we retransmit */ int len; /* Length of block for retransmission */ int done; /* Done flag */ int error; /* Error if any */ }; enum gsm_mux_state { GSM_SEARCH, GSM0_ADDRESS, GSM0_CONTROL, GSM0_LEN0, GSM0_LEN1, GSM0_DATA, GSM0_FCS, GSM0_SSOF, GSM1_START, GSM1_ADDRESS, GSM1_CONTROL, GSM1_DATA, GSM1_OVERRUN, }; /* * Each GSM mux we have is represented by this structure. If we are * operating as an ldisc then we use this structure as our ldisc * state. We need to sort out lifetimes and locking with respect * to the gsm mux array. For now we don't free DLCI objects that * have been instantiated until the mux itself is terminated. * * To consider further: tty open versus mux shutdown. */ struct gsm_mux { struct tty_struct *tty; /* The tty our ldisc is bound to */ spinlock_t lock; struct mutex mutex; unsigned int num; struct kref ref; /* Events on the GSM channel */ wait_queue_head_t event; /* Bits for GSM mode decoding */ /* Framing Layer */ unsigned char *buf; enum gsm_mux_state state; unsigned int len; unsigned int address; unsigned int count; bool escape; int encoding; u8 control; u8 fcs; u8 *txframe; /* TX framing buffer */ /* Method for the receiver side */ void (*receive)(struct gsm_mux *gsm, u8 ch); /* Link Layer */ unsigned int mru; unsigned int mtu; int initiator; /* Did we initiate connection */ bool dead; /* Has the mux been shut down */ struct gsm_dlci *dlci[NUM_DLCI]; int old_c_iflag; /* termios c_iflag value before attach */ bool constipated; /* Asked by remote to shut up */ bool has_devices; /* Devices were registered */ spinlock_t tx_lock; unsigned int tx_bytes; /* TX data outstanding */ #define TX_THRESH_HI 8192 #define TX_THRESH_LO 2048 struct list_head tx_list; /* Pending data packets */ /* Control messages */ struct timer_list kick_timer; /* Kick TX queuing on timeout */ struct timer_list t2_timer; /* Retransmit timer for commands */ int cretries; /* Command retry counter */ struct gsm_control *pending_cmd;/* Our current pending command */ spinlock_t control_lock; /* Protects the pending command */ /* Configuration */ int adaption; /* 1 or 2 supported */ u8 ftype; /* UI or UIH */ int t1, t2; /* Timers in 1/100th of a sec */ int n2; /* Retry count */ /* Statistics (not currently exposed) */ unsigned long bad_fcs; unsigned long malformed; unsigned long io_error; unsigned long bad_size; unsigned long unsupported; }; /* * Mux objects - needed so that we can translate a tty index into the * relevant mux and DLCI. */ #define MAX_MUX 4 /* 256 minors */ static struct gsm_mux *gsm_mux[MAX_MUX]; /* GSM muxes */ static DEFINE_SPINLOCK(gsm_mux_lock); static struct tty_driver *gsm_tty_driver; /* Save dlci open address */ static int addr_open[256] = { 0 }; /* Save dlci open count */ static int addr_cnt; /* * This section of the driver logic implements the GSM encodings * both the basic and the 'advanced'. Reliable transport is not * supported. */ #define CR 0x02 #define EA 0x01 #define PF 0x10 /* I is special: the rest are ..*/ #define RR 0x01 #define UI 0x03 #define RNR 0x05 #define REJ 0x09 #define DM 0x0F #define SABM 0x2F #define DISC 0x43 #define UA 0x63 #define UIH 0xEF /* Channel commands */ #define CMD_NSC 0x09 #define CMD_TEST 0x11 #define CMD_PSC 0x21 #define CMD_RLS 0x29 #define CMD_FCOFF 0x31 #define CMD_PN 0x41 #define CMD_RPN 0x49 #define CMD_FCON 0x51 #define CMD_CLD 0x61 #define CMD_SNC 0x69 #define CMD_MSC 0x71 /* Virtual modem bits */ #define MDM_FC 0x01 #define MDM_RTC 0x02 #define MDM_RTR 0x04 #define MDM_IC 0x20 #define MDM_DV 0x40 #define GSM0_SOF 0xF9 #define GSM1_SOF 0x7E #define GSM1_ESCAPE 0x7D #define GSM1_ESCAPE_BITS 0x20 #define XON 0x11 #define XOFF 0x13 #define ISO_IEC_646_MASK 0x7F static const struct tty_port_operations gsm_port_ops; /* * CRC table for GSM 0710 */ static const u8 gsm_fcs8[256] = { 0x00, 0x91, 0xE3, 0x72, 0x07, 0x96, 0xE4, 0x75, 0x0E, 0x9F, 0xED, 0x7C, 0x09, 0x98, 0xEA, 0x7B, 0x1C, 0x8D, 0xFF, 0x6E, 0x1B, 0x8A, 0xF8, 0x69, 0x12, 0x83, 0xF1, 0x60, 0x15, 0x84, 0xF6, 0x67, 0x38, 0xA9, 0xDB, 0x4A, 0x3F, 0xAE, 0xDC, 0x4D, 0x36, 0xA7, 0xD5, 0x44, 0x31, 0xA0, 0xD2, 0x43, 0x24, 0xB5, 0xC7, 0x56, 0x23, 0xB2, 0xC0, 0x51, 0x2A, 0xBB, 0xC9, 0x58, 0x2D, 0xBC, 0xCE, 0x5F, 0x70, 0xE1, 0x93, 0x02, 0x77, 0xE6, 0x94, 0x05, 0x7E, 0xEF, 0x9D, 0x0C, 0x79, 0xE8, 0x9A, 0x0B, 0x6C, 0xFD, 0x8F, 0x1E, 0x6B, 0xFA, 0x88, 0x19, 0x62, 0xF3, 0x81, 0x10, 0x65, 0xF4, 0x86, 0x17, 0x48, 0xD9, 0xAB, 0x3A, 0x4F, 0xDE, 0xAC, 0x3D, 0x46, 0xD7, 0xA5, 0x34, 0x41, 0xD0, 0xA2, 0x33, 0x54, 0xC5, 0xB7, 0x26, 0x53, 0xC2, 0xB0, 0x21, 0x5A, 0xCB, 0xB9, 0x28, 0x5D, 0xCC, 0xBE, 0x2F, 0xE0, 0x71, 0x03, 0x92, 0xE7, 0x76, 0x04, 0x95, 0xEE, 0x7F, 0x0D, 0x9C, 0xE9, 0x78, 0x0A, 0x9B, 0xFC, 0x6D, 0x1F, 0x8E, 0xFB, 0x6A, 0x18, 0x89, 0xF2, 0x63, 0x11, 0x80, 0xF5, 0x64, 0x16, 0x87, 0xD8, 0x49, 0x3B, 0xAA, 0xDF, 0x4E, 0x3C, 0xAD, 0xD6, 0x47, 0x35, 0xA4, 0xD1, 0x40, 0x32, 0xA3, 0xC4, 0x55, 0x27, 0xB6, 0xC3, 0x52, 0x20, 0xB1, 0xCA, 0x5B, 0x29, 0xB8, 0xCD, 0x5C, 0x2E, 0xBF, 0x90, 0x01, 0x73, 0xE2, 0x97, 0x06, 0x74, 0xE5, 0x9E, 0x0F, 0x7D, 0xEC, 0x99, 0x08, 0x7A, 0xEB, 0x8C, 0x1D, 0x6F, 0xFE, 0x8B, 0x1A, 0x68, 0xF9, 0x82, 0x13, 0x61, 0xF0, 0x85, 0x14, 0x66, 0xF7, 0xA8, 0x39, 0x4B, 0xDA, 0xAF, 0x3E, 0x4C, 0xDD, 0xA6, 0x37, 0x45, 0xD4, 0xA1, 0x30, 0x42, 0xD3, 0xB4, 0x25, 0x57, 0xC6, 0xB3, 0x22, 0x50, 0xC1, 0xBA, 0x2B, 0x59, 0xC8, 0xBD, 0x2C, 0x5E, 0xCF }; #define INIT_FCS 0xFF #define GOOD_FCS 0xCF static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len); static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk); /** * gsm_fcs_add - update FCS * @fcs: Current FCS * @c: Next data * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add(u8 fcs, u8 c) { return gsm_fcs8[fcs ^ c]; } /** * gsm_fcs_add_block - update FCS for a block * @fcs: Current FCS * @c: buffer of data * @len: length of buffer * * Update the FCS to include c. Uses the algorithm in the specification * notes. */ static inline u8 gsm_fcs_add_block(u8 fcs, u8 *c, int len) { while (len--) fcs = gsm_fcs8[fcs ^ *c++]; return fcs; } /** * gsm_read_ea - read a byte into an EA * @val: variable holding value * @c: byte going into the EA * * Processes one byte of an EA. Updates the passed variable * and returns 1 if the EA is now completely read */ static int gsm_read_ea(unsigned int *val, u8 c) { /* Add the next 7 bits into the value */ *val <<= 7; *val |= c >> 1; /* Was this the last byte of the EA 1 = yes*/ return c & EA; } /** * gsm_read_ea_val - read a value until EA * @val: variable holding value * @data: buffer of data * @dlen: length of data * * Processes an EA value. Updates the passed variable and * returns the processed data length. */ static unsigned int gsm_read_ea_val(unsigned int *val, const u8 *data, int dlen) { unsigned int len = 0; for (; dlen > 0; dlen--) { len++; if (gsm_read_ea(val, *data++)) break; } return len; } /** * gsm_encode_modem - encode modem data bits * @dlci: DLCI to encode from * * Returns the correct GSM encoded modem status bits (6 bit field) for * the current status of the DLCI and attached tty object */ static u8 gsm_encode_modem(const struct gsm_dlci *dlci) { u8 modembits = 0; /* FC is true flow control not modem bits */ if (dlci->throttled) modembits |= MDM_FC; if (dlci->modem_tx & TIOCM_DTR) modembits |= MDM_RTC; if (dlci->modem_tx & TIOCM_RTS) modembits |= MDM_RTR; if (dlci->modem_tx & TIOCM_RI) modembits |= MDM_IC; if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator) modembits |= MDM_DV; return modembits; } static void gsm_hex_dump_bytes(const char *fname, const u8 *data, unsigned long len) { char *prefix; if (!fname) { print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, data, len, true); return; } prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, true); kfree(prefix); } /** * gsm_register_devices - register all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static int gsm_register_devices(struct tty_driver *driver, unsigned int index) { struct device *dev; int i; unsigned int base; if (!driver || index >= MAX_MUX) return -EINVAL; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't register device 0 - this is the control channel * and not a usable tty interface */ dev = tty_register_device(gsm_tty_driver, base + i, NULL); if (IS_ERR(dev)) { if (debug & 8) pr_info("%s failed to register device minor %u", __func__, base + i); for (i--; i >= 1; i--) tty_unregister_device(gsm_tty_driver, base + i); return PTR_ERR(dev); } } return 0; } /** * gsm_unregister_devices - unregister all tty devices for a given mux index * * @driver: the tty driver that describes the tty devices * @index: the mux number is used to calculate the minor numbers of the * ttys for this mux and may differ from the position in the * mux array. */ static void gsm_unregister_devices(struct tty_driver *driver, unsigned int index) { int i; unsigned int base; if (!driver || index >= MAX_MUX) return; base = index * NUM_DLCI; /* first minor for this index */ for (i = 1; i < NUM_DLCI; i++) { /* Don't unregister device 0 - this is the control * channel and not a usable tty interface */ tty_unregister_device(gsm_tty_driver, base + i); } } /** * gsm_print_packet - display a frame for debug * @hdr: header to print before decode * @addr: address EA from the frame * @cr: C/R bit seen as initiator * @control: control including PF bit * @data: following data bytes * @dlen: length of data * * Displays a packet in human readable format for debugging purposes. The * style is based on amateur radio LAP-B dump display. */ static void gsm_print_packet(const char *hdr, int addr, int cr, u8 control, const u8 *data, int dlen) { if (!(debug & 1)) return; pr_info("%s %d) %c: ", hdr, addr, "RC"[cr]); switch (control & ~PF) { case SABM: pr_cont("SABM"); break; case UA: pr_cont("UA"); break; case DISC: pr_cont("DISC"); break; case DM: pr_cont("DM"); break; case UI: pr_cont("UI"); break; case UIH: pr_cont("UIH"); break; default: if (!(control & 0x01)) { pr_cont("I N(S)%d N(R)%d", (control & 0x0E) >> 1, (control & 0xE0) >> 5); } else switch (control & 0x0F) { case RR: pr_cont("RR(%d)", (control & 0xE0) >> 5); break; case RNR: pr_cont("RNR(%d)", (control & 0xE0) >> 5); break; case REJ: pr_cont("REJ(%d)", (control & 0xE0) >> 5); break; default: pr_cont("[%02X]", control); } } if (control & PF) pr_cont("(P)"); else pr_cont("(F)"); gsm_hex_dump_bytes(NULL, data, dlen); } /* * Link level transmission side */ /** * gsm_stuff_frame - bytestuff a packet * @input: input buffer * @output: output buffer * @len: length of input * * Expand a buffer by bytestuffing it. The worst case size change * is doubling and the caller is responsible for handing out * suitable sized buffers. */ static int gsm_stuff_frame(const u8 *input, u8 *output, int len) { int olen = 0; while (len--) { if (*input == GSM1_SOF || *input == GSM1_ESCAPE || (*input & ISO_IEC_646_MASK) == XON || (*input & ISO_IEC_646_MASK) == XOFF) { *output++ = GSM1_ESCAPE; *output++ = *input++ ^ GSM1_ESCAPE_BITS; olen++; } else *output++ = *input++; olen++; } return olen; } /** * gsm_send - send a control frame * @gsm: our GSM mux * @addr: address for control frame * @cr: command/response bit seen as initiator * @control: control byte including PF bit * * Format up and transmit a control frame. These do not go via the * queueing logic as they should be transmitted ahead of data when * they are needed. * * FIXME: Lock versus data TX path */ static void gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) { int len; u8 cbuf[10]; u8 ibuf[3]; int ocr; /* toggle C/R coding if not initiator */ ocr = cr ^ (gsm->initiator ? 0 : 1); switch (gsm->encoding) { case 0: cbuf[0] = GSM0_SOF; cbuf[1] = (addr << 2) | (ocr << 1) | EA; cbuf[2] = control; cbuf[3] = EA; /* Length of data = 0 */ cbuf[4] = 0xFF - gsm_fcs_add_block(INIT_FCS, cbuf + 1, 3); cbuf[5] = GSM0_SOF; len = 6; break; case 1: case 2: /* Control frame + packing (but not frame stuffing) in mode 1 */ ibuf[0] = (addr << 2) | (ocr << 1) | EA; ibuf[1] = control; ibuf[2] = 0xFF - gsm_fcs_add_block(INIT_FCS, ibuf, 2); /* Stuffing may double the size worst case */ len = gsm_stuff_frame(ibuf, cbuf + 1, 3); /* Now add the SOF markers */ cbuf[0] = GSM1_SOF; cbuf[len + 1] = GSM1_SOF; /* FIXME: we can omit the lead one in many cases */ len += 2; break; default: WARN_ON(1); return; } gsmld_output(gsm, cbuf, len); gsm_print_packet("-->", addr, cr, control, NULL, 0); } /** * gsm_response - send a control response * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level response frame. */ static inline void gsm_response(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 0, control); } /** * gsm_command - send a control command * @gsm: our GSM mux * @addr: address for control frame * @control: control byte including PF bit * * Format up and transmit a link level command frame. */ static inline void gsm_command(struct gsm_mux *gsm, int addr, int control) { gsm_send(gsm, addr, 1, control); } /* Data transmission */ #define HDR_LEN 6 /* ADDR CTRL [LEN.2] DATA FCS */ /** * gsm_data_alloc - allocate data frame * @gsm: GSM mux * @addr: DLCI address * @len: length excluding header and FCS * @ctrl: control byte * * Allocate a new data buffer for sending frames with data. Space is left * at the front for header bytes but that is treated as an implementation * detail and not for the high level code to use */ static struct gsm_msg *gsm_data_alloc(struct gsm_mux *gsm, u8 addr, int len, u8 ctrl) { struct gsm_msg *m = kmalloc(sizeof(struct gsm_msg) + len + HDR_LEN, GFP_ATOMIC); if (m == NULL) return NULL; m->data = m->buffer + HDR_LEN - 1; /* Allow for FCS */ m->len = len; m->addr = addr; m->ctrl = ctrl; INIT_LIST_HEAD(&m->list); return m; } /** * gsm_is_flow_ctrl_msg - checks if flow control message * @msg: message to check * * Returns true if the given message is a flow control command of the * control channel. False is returned in any other case. */ static bool gsm_is_flow_ctrl_msg(struct gsm_msg *msg) { unsigned int cmd; if (msg->addr > 0) return false; switch (msg->ctrl & ~PF) { case UI: case UIH: cmd = 0; if (gsm_read_ea_val(&cmd, msg->data + 2, msg->len - 2) < 1) break; switch (cmd & ~PF) { case CMD_FCOFF: case CMD_FCON: return true; } break; } return false; } /** * gsm_data_kick - poke the queue * @gsm: GSM Mux * @dlci: DLCI sending the data * * The tty device has called us to indicate that room has appeared in * the transmit queue. Ram more data into the pipe if we have any * If we have been flow-stopped by a CMD_FCOFF, then we can only * send messages on DLCI0 until CMD_FCON * * FIXME: lock against link layer control transmissions */ static void gsm_data_kick(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg, *nmsg; int len; list_for_each_entry_safe(msg, nmsg, &gsm->tx_list, list) { if (gsm->constipated && !gsm_is_flow_ctrl_msg(msg)) continue; if (gsm->encoding != 0) { gsm->txframe[0] = GSM1_SOF; len = gsm_stuff_frame(msg->data, gsm->txframe + 1, msg->len); gsm->txframe[len + 1] = GSM1_SOF; len += 2; } else { gsm->txframe[0] = GSM0_SOF; memcpy(gsm->txframe + 1 , msg->data, msg->len); gsm->txframe[msg->len + 1] = GSM0_SOF; len = msg->len + 2; } if (debug & 4) gsm_hex_dump_bytes(__func__, gsm->txframe, len); if (gsmld_output(gsm, gsm->txframe, len) <= 0) break; /* FIXME: Can eliminate one SOF in many more cases */ gsm->tx_bytes -= msg->len; list_del(&msg->list); kfree(msg); if (dlci) { tty_port_tty_wakeup(&dlci->port); } else { int i = 0; for (i = 0; i < NUM_DLCI; i++) if (gsm->dlci[i]) tty_port_tty_wakeup(&gsm->dlci[i]->port); } } } /** * __gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. The Caller must hold * the gsm tx lock. */ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { struct gsm_mux *gsm = dlci->gsm; u8 *dp = msg->data; u8 *fcs = dp + msg->len; /* Fill in the header */ if (gsm->encoding == 0) { if (msg->len < 128) *--dp = (msg->len << 1) | EA; else { *--dp = (msg->len >> 7); /* bits 7 - 15 */ *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ } } *--dp = msg->ctrl; if (gsm->initiator) *--dp = (msg->addr << 2) | 2 | EA; else *--dp = (msg->addr << 2) | EA; *fcs = gsm_fcs_add_block(INIT_FCS, dp , msg->data - dp); /* Ugly protocol layering violation */ if (msg->ctrl == UI || msg->ctrl == (UI|PF)) *fcs = gsm_fcs_add_block(*fcs, msg->data, msg->len); *fcs = 0xFF - *fcs; gsm_print_packet("Q> ", msg->addr, gsm->initiator, msg->ctrl, msg->data, msg->len); /* Move the header back and adjust the length, also allow for the FCS now tacked on the end */ msg->len += (msg->data - dp) + 1; msg->data = dp; /* Add to the actual output queue */ list_add_tail(&msg->list, &gsm->tx_list); gsm->tx_bytes += msg->len; gsm_data_kick(gsm, dlci); mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100); } /** * gsm_data_queue - queue a UI or UIH frame * @dlci: DLCI sending the data * @msg: message queued * * Add data to the transmit queue and try and get stuff moving * out of the mux tty if not already doing so. Take the * the gsm tx lock and dlci lock. */ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { unsigned long flags; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); __gsm_data_queue(dlci, msg); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /** * gsm_dlci_data_output - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles the usual tty * interface which is a byte stream with optional modem data. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int h, len, size; /* for modem bits without break data */ h = ((dlci->adaption == 1) ? 0 : 1); len = kfifo_len(&dlci->fifo); if (len == 0) return 0; /* MTU/MRU count only the data bits but watch adaption mode */ if ((len + h) > gsm->mtu) len = gsm->mtu - h; size = len + h; msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); if (!msg) return -ENOMEM; dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. * Always one byte as we never send inline break data */ *dp++ = (gsm_encode_modem(dlci) << 1) | EA; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); break; } WARN_ON(len != kfifo_out_locked(&dlci->fifo, dp, len, &dlci->lock)); /* Notify upper layer about available send space. */ tty_port_tty_wakeup(&dlci->port); __gsm_data_queue(dlci, msg); /* Bytes of data we used up */ return size; } /** * gsm_dlci_data_output_framed - try and push data out of a DLCI * @gsm: mux * @dlci: the DLCI to pull data from * * Pull data from a DLCI and send it into the transmit queue if there * is data. Keep to the MRU of the mux. This path handles framed data * queued as skbuffs to the DLCI. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, struct gsm_dlci *dlci) { struct gsm_msg *msg; u8 *dp; int len, size; int last = 0, first = 0; int overhead = 0; /* One byte per frame is used for B/F flags */ if (dlci->adaption == 4) overhead = 1; /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) return 0; first = 1; } len = dlci->skb->len + overhead; /* MTU/MRU count only the data bits */ if (len > gsm->mtu) { if (dlci->adaption == 3) { /* Over long frame, bin it */ dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; return 0; } len = gsm->mtu; } else last = 1; size = len + overhead; msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); if (msg == NULL) { skb_queue_tail(&dlci->skb_list, dlci->skb); dlci->skb = NULL; return -ENOMEM; } dp = msg->data; if (dlci->adaption == 4) { /* Interruptible framed (Packetised Data) */ /* Flag byte to carry the start/end info */ *dp++ = last << 7 | first << 6 | 1; /* EA */ len--; } memcpy(dp, dlci->skb->data, len); skb_pull(dlci->skb, len); __gsm_data_queue(dlci, msg); if (last) { dev_kfree_skb_any(dlci->skb); dlci->skb = NULL; } return size; } /** * gsm_dlci_modem_output - try and push modem status out of a DLCI * @gsm: mux * @dlci: the DLCI to pull modem status from * @brk: break signal * * Push an empty frame in to the transmit queue to update the modem status * bits and to transmit an optional break. * * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, u8 brk) { u8 *dp = NULL; struct gsm_msg *msg; int size = 0; /* for modem bits without break data */ switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ size++; if (brk > 0) size++; break; default: pr_err("%s: unsupported adaption %d\n", __func__, dlci->adaption); return -EINVAL; } msg = gsm_data_alloc(gsm, dlci->addr, size, gsm->ftype); if (!msg) { pr_err("%s: gsm_data_alloc error", __func__); return -ENOMEM; } dp = msg->data; switch (dlci->adaption) { case 1: /* Unstructured */ break; case 2: /* Unstructured with modem bits. */ if (brk == 0) { *dp++ = (gsm_encode_modem(dlci) << 1) | EA; } else { *dp++ = gsm_encode_modem(dlci) << 1; *dp++ = (brk << 4) | 2 | EA; /* Length, Break, EA */ } break; default: /* Handled above */ break; } __gsm_data_queue(dlci, msg); return size; } /** * gsm_dlci_data_sweep - look for data to send * @gsm: the GSM mux * * Sweep the GSM mux channels in priority order looking for ones with * data to send. We could do with optimising this scan a bit. We aim * to fill the queue totally or up to TX_THRESH_HI bytes. Once we hit * TX_THRESH_LO we get called again * * FIXME: We should round robin between groups and in theory you can * renegotiate DLCI priorities with optional stuff. Needs optimising. */ static int gsm_dlci_data_sweep(struct gsm_mux *gsm) { int len, ret = 0; /* Priority ordering: We should do priority with RR of the groups */ int i = 1; while (i < NUM_DLCI) { struct gsm_dlci *dlci; if (gsm->tx_bytes > TX_THRESH_HI) break; dlci = gsm->dlci[i]; if (dlci == NULL || dlci->constipated) { i++; continue; } if (dlci->adaption < 3 && !dlci->net) len = gsm_dlci_data_output(gsm, dlci); else len = gsm_dlci_data_output_framed(gsm, dlci); if (len < 0) break; /* DLCI empty - try the next */ if (len == 0) i++; else ret++; } return ret; } /** * gsm_dlci_data_kick - transmit if possible * @dlci: DLCI to kick * * Transmit data from this DLCI if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { unsigned long flags; int sweep; if (dlci->constipated) return; spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { if (dlci->net) gsm_dlci_data_output_framed(dlci->gsm, dlci); else gsm_dlci_data_output(dlci->gsm, dlci); } if (sweep) gsm_dlci_data_sweep(dlci->gsm); spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /* * Control message processing */ /** * gsm_control_reply - send a response frame to a control * @gsm: gsm channel * @cmd: the command to use * @data: data to follow encoded info * @dlen: length of data * * Encode up and queue a UI/UIH frame containing our response. */ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data, int dlen) { struct gsm_msg *msg; msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); if (msg == NULL) return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); gsm_data_queue(gsm->dlci[0], msg); } /** * gsm_process_modem - process received modem status * @tty: virtual tty bound to the DLCI * @dlci: DLCI to affect * @modem: modem bits (full EA) * @slen: number of signal octets * * Used when a modem control message or line state inline in adaption * layer 2 is processed. Sort out the local modem state and throttles */ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci, u32 modem, int slen) { int mlines = 0; u8 brk = 0; int fc; /* The modem status command can either contain one octet (V.24 signals) * or two octets (V.24 signals + break signals). This is specified in * section 5.4.6.3.7 of the 07.10 mux spec. */ if (slen == 1) modem = modem & 0x7f; else { brk = modem & 0x7f; modem = (modem >> 7) & 0x7f; } /* Flow control/ready to communicate */ fc = (modem & MDM_FC) || !(modem & MDM_RTR); if (fc && !dlci->constipated) { /* Need to throttle our output on this device */ dlci->constipated = true; } else if (!fc && dlci->constipated) { dlci->constipated = false; gsm_dlci_data_kick(dlci); } /* Map modem bits */ if (modem & MDM_RTC) mlines |= TIOCM_DSR | TIOCM_DTR; if (modem & MDM_RTR) mlines |= TIOCM_RTS | TIOCM_CTS; if (modem & MDM_IC) mlines |= TIOCM_RI; if (modem & MDM_DV) mlines |= TIOCM_CD; /* Carrier drop -> hangup */ if (tty) { if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD)) if (!C_CLOCAL(tty)) tty_hangup(tty); } if (brk & 0x01) tty_insert_flip_char(&dlci->port, 0, TTY_BREAK); dlci->modem_rx = mlines; } /** * gsm_control_modem - modem status received * @gsm: GSM channel * @data: data following command * @clen: command length * * We have received a modem status control message. This is used by * the GSM mux protocol to pass virtual modem line status and optionally * to indicate break signals. Unpack it, convert to Linux representation * and if need be stuff a break message down the tty. */ static void gsm_control_modem(struct gsm_mux *gsm, const u8 *data, int clen) { unsigned int addr = 0; unsigned int modem = 0; struct gsm_dlci *dlci; int len = clen; int slen; const u8 *dp = data; struct tty_struct *tty; while (gsm_read_ea(&addr, *dp++) == 0) { len--; if (len == 0) return; } /* Must be at least one byte following the EA */ len--; if (len <= 0) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; dlci = gsm->dlci[addr]; slen = len; while (gsm_read_ea(&modem, *dp++) == 0) { len--; if (len == 0) return; } len--; tty = tty_port_tty_get(&dlci->port); gsm_process_modem(tty, dlci, modem, slen - len); if (tty) { tty_wakeup(tty); tty_kref_put(tty); } gsm_control_reply(gsm, CMD_MSC, data, clen); } /** * gsm_control_rls - remote line status * @gsm: GSM channel * @data: data bytes * @clen: data length * * The modem sends us a two byte message on the control channel whenever * it wishes to send us an error state from the virtual link. Stuff * this into the uplink tty if present */ static void gsm_control_rls(struct gsm_mux *gsm, const u8 *data, int clen) { struct tty_port *port; unsigned int addr = 0; u8 bits; int len = clen; const u8 *dp = data; while (gsm_read_ea(&addr, *dp++) == 0) { len--; if (len == 0) return; } /* Must be at least one byte following ea */ len--; if (len <= 0) return; addr >>= 1; /* Closed port, or invalid ? */ if (addr == 0 || addr >= NUM_DLCI || gsm->dlci[addr] == NULL) return; /* No error ? */ bits = *dp; if ((bits & 1) == 0) return; port = &gsm->dlci[addr]->port; if (bits & 2) tty_insert_flip_char(port, 0, TTY_OVERRUN); if (bits & 4) tty_insert_flip_char(port, 0, TTY_PARITY); if (bits & 8) tty_insert_flip_char(port, 0, TTY_FRAME); tty_flip_buffer_push(port); gsm_control_reply(gsm, CMD_RLS, data, clen); } static void gsm_dlci_begin_close(struct gsm_dlci *dlci); static void gsm_dlci_close(struct gsm_dlci *dlci); /** * gsm_control_message - DLCI 0 control processing * @gsm: our GSM mux * @command: the command EA * @data: data beyond the command/length EAs * @clen: length * * Input processor for control messages from the other end of the link. * Processes the incoming request and queues a response frame or an * NSC response if not supported */ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { u8 buf[1]; unsigned long flags; struct gsm_dlci *dlci; int i; int address; switch (command) { case CMD_CLD: { if (addr_cnt > 0) { for (i = 0; i < addr_cnt; i++) { address = addr_open[i]; dlci = gsm->dlci[address]; gsm_dlci_close(dlci); addr_open[i] = 0; } } /* Modem wishes to close down */ dlci = gsm->dlci[0]; if (dlci) { dlci->dead = true; gsm->dead = true; gsm_dlci_close(dlci); addr_cnt = 0; gsm_response(gsm, 0, UA|PF); } } break; case CMD_TEST: /* Modem wishes to test, reply with the data */ gsm_control_reply(gsm, CMD_TEST, data, clen); break; case CMD_FCON: /* Modem can accept data again */ gsm->constipated = false; gsm_control_reply(gsm, CMD_FCON, NULL, 0); /* Kick the link in case it is idling */ spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm, NULL); spin_unlock_irqrestore(&gsm->tx_lock, flags); break; case CMD_FCOFF: /* Modem wants us to STFU */ gsm->constipated = true; gsm_control_reply(gsm, CMD_FCOFF, NULL, 0); break; case CMD_MSC: /* Out of band modem line change indicator for a DLCI */ gsm_control_modem(gsm, data, clen); break; case CMD_RLS: /* Out of band error reception for a DLCI */ gsm_control_rls(gsm, data, clen); break; case CMD_PSC: /* Modem wishes to enter power saving state */ gsm_control_reply(gsm, CMD_PSC, NULL, 0); break; /* Optional unsupported commands */ case CMD_PN: /* Parameter negotiation */ case CMD_RPN: /* Remote port negotiation */ case CMD_SNC: /* Service negotiation command */ default: /* Reply to bad commands with an NSC */ buf[0] = command; gsm_control_reply(gsm, CMD_NSC, buf, 1); break; } } /** * gsm_control_response - process a response to our control * @gsm: our GSM mux * @command: the command (response) EA * @data: data beyond the command/length EA * @clen: length * * Process a response to an outstanding command. We only allow a single * control message in flight so this is fairly easy. All the clean up * is done by the caller, we just update the fields, flag it as done * and return */ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command, const u8 *data, int clen) { struct gsm_control *ctrl; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; /* Does the reply match our command */ command |= 1; if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) { /* Our command was replied to, kill the retry timer */ del_timer(&gsm->t2_timer); gsm->pending_cmd = NULL; /* Rejected by the other end */ if (command == CMD_NSC) ctrl->error = -EOPNOTSUPP; ctrl->done = 1; wake_up(&gsm->event); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_transmit - send control packet * @gsm: gsm mux * @ctrl: frame to send * * Send out a pending control command (called under control lock) */ static void gsm_control_transmit(struct gsm_mux *gsm, struct gsm_control *ctrl) { struct gsm_msg *msg = gsm_data_alloc(gsm, 0, ctrl->len + 2, gsm->ftype); if (msg == NULL) return; msg->data[0] = (ctrl->cmd << 1) | CR | EA; /* command */ msg->data[1] = (ctrl->len << 1) | EA; memcpy(msg->data + 2, ctrl->data, ctrl->len); gsm_data_queue(gsm->dlci[0], msg); } /** * gsm_control_retransmit - retransmit a control frame * @t: timer contained in our gsm object * * Called off the T2 timer expiry in order to retransmit control frames * that have been lost in the system somewhere. The control_lock protects * us from colliding with another sender or a receive completion event. * In that situation the timer may still occur in a small window but * gsm->pending_cmd will be NULL and we just let the timer expire. */ static void gsm_control_retransmit(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, t2_timer); struct gsm_control *ctrl; unsigned long flags; spin_lock_irqsave(&gsm->control_lock, flags); ctrl = gsm->pending_cmd; if (ctrl) { if (gsm->cretries == 0 || !gsm->dlci[0] || gsm->dlci[0]->dead) { gsm->pending_cmd = NULL; ctrl->error = -ETIMEDOUT; ctrl->done = 1; spin_unlock_irqrestore(&gsm->control_lock, flags); wake_up(&gsm->event); return; } gsm->cretries--; gsm_control_transmit(gsm, ctrl); mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); } spin_unlock_irqrestore(&gsm->control_lock, flags); } /** * gsm_control_send - send a control frame on DLCI 0 * @gsm: the GSM channel * @command: command to send including CR bit * @data: bytes of data (must be kmalloced) * @clen: length of the block to send * * Queue and dispatch a control command. Only one command can be * active at a time. In theory more can be outstanding but the matching * gets really complicated so for now stick to one outstanding. */ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; retry: wait_event(gsm->event, gsm->pending_cmd == NULL); spin_lock_irqsave(&gsm->control_lock, flags); if (gsm->pending_cmd != NULL) { spin_unlock_irqrestore(&gsm->control_lock, flags); goto retry; } ctrl->cmd = command; ctrl->data = data; ctrl->len = clen; gsm->pending_cmd = ctrl; /* If DLCI0 is in ADM mode skip retries, it won't respond */ if (gsm->dlci[0]->mode == DLCI_MODE_ADM) gsm->cretries = 0; else gsm->cretries = gsm->n2; mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100); gsm_control_transmit(gsm, ctrl); spin_unlock_irqrestore(&gsm->control_lock, flags); return ctrl; } /** * gsm_control_wait - wait for a control to finish * @gsm: GSM mux * @control: control we are waiting on * * Waits for the control to complete or time out. Frees any used * resources and returns 0 for success, or an error if the remote * rejected or ignored the request. */ static int gsm_control_wait(struct gsm_mux *gsm, struct gsm_control *control) { int err; wait_event(gsm->event, control->done == 1); err = control->error; kfree(control); return err; } /* * DLCI level handling: Needs krefs */ /* * State transitions and timers */ /** * gsm_dlci_close - a DLCI has closed * @dlci: DLCI that closed * * Perform processing when moving a DLCI into closed state. If there * is an attached tty this is hung up */ static void gsm_dlci_close(struct gsm_dlci *dlci) { unsigned long flags; del_timer(&dlci->t1); if (debug & 8) pr_debug("DLCI %d goes closed.\n", dlci->addr); dlci->state = DLCI_CLOSED; /* Prevent us from sending data before the link is up again */ dlci->constipated = true; if (dlci->addr != 0) { tty_port_tty_hangup(&dlci->port, false); spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); spin_unlock_irqrestore(&dlci->lock, flags); /* Ensure that gsmtty_open() can return. */ tty_port_set_initialized(&dlci->port, 0); wake_up_interruptible(&dlci->port.open_wait); } else dlci->gsm->dead = true; wake_up(&dlci->gsm->event); /* A DLCI 0 close is a MUX termination so we need to kick that back to userspace somehow */ } /** * gsm_dlci_open - a DLCI has opened * @dlci: DLCI that opened * * Perform processing when moving a DLCI into open state. */ static void gsm_dlci_open(struct gsm_dlci *dlci) { /* Note that SABM UA .. SABM UA first UA lost can mean that we go open -> open */ del_timer(&dlci->t1); /* This will let a tty open continue */ dlci->state = DLCI_OPEN; dlci->constipated = false; if (debug & 8) pr_debug("DLCI %d goes open.\n", dlci->addr); /* Send current modem state */ if (dlci->addr) gsm_modem_update(dlci, 0); wake_up(&dlci->gsm->event); } /** * gsm_dlci_t1 - T1 timer expiry * @t: timer contained in the DLCI that opened * * The T1 timer handles retransmits of control frames (essentially of * SABM and DISC). We resend the command until the retry count runs out * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) * * Some control dlci can stay in ADM mode with other dlci working just * fine. In that case we can just keep the control dlci open after the * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(struct timer_list *t) { struct gsm_dlci *dlci = from_timer(dlci, t, t1); struct gsm_mux *gsm = dlci->gsm; switch (dlci->state) { case DLCI_OPENING: if (dlci->retries) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else if (!dlci->addr && gsm->control == (DM | PF)) { if (debug & 8) pr_info("DLCI %d opening in ADM mode.\n", dlci->addr); dlci->mode = DLCI_MODE_ADM; gsm_dlci_open(dlci); } else { gsm_dlci_begin_close(dlci); /* prevent half open link */ } break; case DLCI_CLOSING: if (dlci->retries) { dlci->retries--; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } else gsm_dlci_close(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } } /** * gsm_dlci_begin_open - start channel open procedure * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages * to the modem which should then reply with a UA or ADM, at which point * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. */ static void gsm_dlci_begin_open(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; if (dlci->state == DLCI_OPEN || dlci->state == DLCI_OPENING) return; dlci->retries = gsm->n2; dlci->state = DLCI_OPENING; gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } /** * gsm_dlci_set_opening - change state to opening * @dlci: DLCI to open * * Change internal state to wait for DLCI open from initiator side. * We set off timers and responses upon reception of an SABM. */ static void gsm_dlci_set_opening(struct gsm_dlci *dlci) { switch (dlci->state) { case DLCI_CLOSED: case DLCI_CLOSING: dlci->state = DLCI_OPENING; break; default: break; } } /** * gsm_dlci_begin_close - start channel open procedure * @dlci: DLCI to open * * Commence closing a DLCI from the Linux side. We issue DISC messages * to the modem which should then reply with a UA, at which point we * will move into closed state. Closing is done asynchronously with retry * off timers. We may also receive a DM reply from the other end which * indicates the channel was already closed. */ static void gsm_dlci_begin_close(struct gsm_dlci *dlci) { struct gsm_mux *gsm = dlci->gsm; if (dlci->state == DLCI_CLOSED || dlci->state == DLCI_CLOSING) return; dlci->retries = gsm->n2; dlci->state = DLCI_CLOSING; gsm_command(dlci->gsm, dlci->addr, DISC|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); } /** * gsm_dlci_data - data arrived * @dlci: channel * @data: block of bytes received * @clen: length of received block * * A UI or UIH frame has arrived which contains data for a channel * other than the control channel. If the relevant virtual tty is * open we shovel the bits down it, if not we drop them. */ static void gsm_dlci_data(struct gsm_dlci *dlci, const u8 *data, int clen) { /* krefs .. */ struct tty_port *port = &dlci->port; struct tty_struct *tty; unsigned int modem = 0; int len = clen; int slen = 0; if (debug & 16) pr_debug("%d bytes for tty\n", len); switch (dlci->adaption) { /* Unsupported types */ case 4: /* Packetised interruptible data */ break; case 3: /* Packetised uininterruptible voice/data */ break; case 2: /* Asynchronous serial with line state in each frame */ while (gsm_read_ea(&modem, *data++) == 0) { len--; slen++; if (len == 0) return; } len--; slen++; tty = tty_port_tty_get(port); if (tty) { gsm_process_modem(tty, dlci, modem, slen); tty_wakeup(tty); tty_kref_put(tty); } fallthrough; case 1: /* Line state will go via DLCI 0 controls only */ default: tty_insert_flip_string(port, data, len); tty_flip_buffer_push(port); } } /** * gsm_dlci_command - data arrived on control channel * @dlci: channel * @data: block of bytes received * @len: length of received block * * A UI or UIH frame has arrived which contains data for DLCI 0 the * control channel. This should contain a command EA followed by * control data bytes. The command EA contains a command/response bit * and we divide up the work accordingly. */ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len) { /* See what command is involved */ unsigned int command = 0; while (len-- > 0) { if (gsm_read_ea(&command, *data++) == 1) { int clen = *data++; len--; /* FIXME: this is properly an EA */ clen >>= 1; /* Malformed command ? */ if (clen > len) return; if (command & 1) gsm_control_message(dlci->gsm, command, data, clen); else gsm_control_response(dlci->gsm, command, data, clen); return; } } } /** * gsm_kick_timer - transmit if possible * @t: timer contained in our gsm object * * Transmit data from DLCIs if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ static void gsm_kick_timer(struct timer_list *t) { struct gsm_mux *gsm = from_timer(gsm, t, kick_timer); unsigned long flags; int sent = 0; spin_lock_irqsave(&gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ if (gsm->tx_bytes < TX_THRESH_LO) sent = gsm_dlci_data_sweep(gsm); spin_unlock_irqrestore(&gsm->tx_lock, flags); if (sent && debug & 4) pr_info("%s TX queue stalled\n", __func__); } /* * Allocate/Free DLCI channels */ /** * gsm_dlci_alloc - allocate a DLCI * @gsm: GSM mux * @addr: address of the DLCI * * Allocate and install a new DLCI object into the GSM mux. * * FIXME: review locking races */ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr) { struct gsm_dlci *dlci = kzalloc(sizeof(struct gsm_dlci), GFP_ATOMIC); if (dlci == NULL) return NULL; spin_lock_init(&dlci->lock); mutex_init(&dlci->mutex); if (kfifo_alloc(&dlci->fifo, TX_SIZE, GFP_KERNEL) < 0) { kfree(dlci); return NULL; } skb_queue_head_init(&dlci->skb_list); timer_setup(&dlci->t1, gsm_dlci_t1, 0); tty_port_init(&dlci->port); dlci->port.ops = &gsm_port_ops; dlci->gsm = gsm; dlci->addr = addr; dlci->adaption = gsm->adaption; dlci->state = DLCI_CLOSED; if (addr) { dlci->data = gsm_dlci_data; /* Prevent us from sending data before the link is up */ dlci->constipated = true; } else { dlci->data = gsm_dlci_command; } gsm->dlci[addr] = dlci; return dlci; } /** * gsm_dlci_free - free DLCI * @port: tty port for DLCI to free * * Free up a DLCI. * * Can sleep. */ static void gsm_dlci_free(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); del_timer_sync(&dlci->t1); dlci->gsm->dlci[dlci->addr] = NULL; kfifo_free(&dlci->fifo); while ((dlci->skb = skb_dequeue(&dlci->skb_list))) dev_kfree_skb(dlci->skb); kfree(dlci); } static inline void dlci_get(struct gsm_dlci *dlci) { tty_port_get(&dlci->port); } static inline void dlci_put(struct gsm_dlci *dlci) { tty_port_put(&dlci->port); } static void gsm_destroy_network(struct gsm_dlci *dlci); /** * gsm_dlci_release - release DLCI * @dlci: DLCI to destroy * * Release a DLCI. Actual free is deferred until either * mux is closed or tty is closed - whichever is last. * * Can sleep. */ static void gsm_dlci_release(struct gsm_dlci *dlci) { struct tty_struct *tty = tty_port_tty_get(&dlci->port); if (tty) { mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); /* We cannot use tty_hangup() because in tty_kref_put() the tty * driver assumes that the hangup queue is free and reuses it to * queue release_one_tty() -> NULL pointer panic in * process_one_work(). */ tty_vhangup(tty); tty_port_tty_set(&dlci->port, NULL); tty_kref_put(tty); } dlci->state = DLCI_CLOSED; dlci_put(dlci); } /* * LAPBish link layer logic */ /** * gsm_queue - a GSM frame is ready to process * @gsm: pointer to our gsm mux * * At this point in time a frame has arrived and been demangled from * the line encoding. All the differences between the encodings have * been handled below us and the frame is unpacked into the structures. * The fcs holds the header FCS but any data FCS must be added here. */ static void gsm_queue(struct gsm_mux *gsm) { struct gsm_dlci *dlci; u8 cr; int address; int i, j, k, address_tmp; if (gsm->fcs != GOOD_FCS) { gsm->bad_fcs++; if (debug & 4) pr_debug("BAD FCS %02x\n", gsm->fcs); return; } address = gsm->address >> 1; if (address >= NUM_DLCI) goto invalid; cr = gsm->address & 1; /* C/R bit */ cr ^= gsm->initiator ? 0 : 1; /* Flip so 1 always means command */ gsm_print_packet("<--", address, cr, gsm->control, gsm->buf, gsm->len); dlci = gsm->dlci[address]; switch (gsm->control) { case SABM|PF: if (cr == 0) goto invalid; if (dlci == NULL) dlci = gsm_dlci_alloc(gsm, address); if (dlci == NULL) return; if (dlci->dead) gsm_response(gsm, address, DM|PF); else { gsm_response(gsm, address, UA|PF); gsm_dlci_open(dlci); /* Save dlci open address */ if (address) { addr_open[addr_cnt] = address; addr_cnt++; } } break; case DISC|PF: if (cr == 0) goto invalid; if (dlci == NULL || dlci->state == DLCI_CLOSED) { gsm_response(gsm, address, DM|PF); return; } /* Real close complete */ if (!address) { if (addr_cnt > 0) { for (i = 0; i < addr_cnt; i++) { address = addr_open[i]; dlci = gsm->dlci[address]; gsm_dlci_close(dlci); addr_open[i] = 0; } } dlci = gsm->dlci[0]; gsm_dlci_close(dlci); addr_cnt = 0; gsm_response(gsm, 0, UA|PF); } else { gsm_response(gsm, address, UA|PF); gsm_dlci_close(dlci); /* clear dlci address */ for (j = 0; j < addr_cnt; j++) { address_tmp = addr_open[j]; if (address_tmp == address) { for (k = j; k < addr_cnt; k++) addr_open[k] = addr_open[k+1]; addr_cnt--; break; } } } break; case UA|PF: if (cr == 0 || dlci == NULL) break; switch (dlci->state) { case DLCI_CLOSING: gsm_dlci_close(dlci); break; case DLCI_OPENING: gsm_dlci_open(dlci); break; default: pr_debug("%s: unhandled state: %d\n", __func__, dlci->state); break; } break; case DM: /* DM can be valid unsolicited */ case DM|PF: if (cr) goto invalid; if (dlci == NULL) return; gsm_dlci_close(dlci); break; case UI: case UI|PF: case UIH: case UIH|PF: #if 0 if (cr) goto invalid; #endif if (dlci == NULL || dlci->state != DLCI_OPEN) { gsm_response(gsm, address, DM|PF); return; } dlci->data(dlci, gsm->buf, gsm->len); break; default: goto invalid; } return; invalid: gsm->malformed++; return; } /** * gsm0_receive_state_check_and_fix - check and correct receive state * @gsm: gsm data for this ldisc instance * * Ensures that the current receive state is valid for basic option mode. */ static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm) { switch (gsm->state) { case GSM_SEARCH: case GSM0_ADDRESS: case GSM0_CONTROL: case GSM0_LEN0: case GSM0_LEN1: case GSM0_DATA: case GSM0_FCS: case GSM0_SSOF: break; default: gsm->state = GSM_SEARCH; break; } } /** * gsm0_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in gsm mode 0 */ static void gsm0_receive(struct gsm_mux *gsm, unsigned char c) { unsigned int len; gsm0_receive_state_check_and_fix(gsm); switch (gsm->state) { case GSM_SEARCH: /* SOF marker */ if (c == GSM0_SOF) { gsm->state = GSM0_ADDRESS; gsm->address = 0; gsm->len = 0; gsm->fcs = INIT_FCS; } break; case GSM0_ADDRESS: /* Address EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM0_CONTROL; break; case GSM0_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->state = GSM0_LEN0; break; case GSM0_LEN0: /* Length EA */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->len, c)) { if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM0_FCS; else gsm->state = GSM0_DATA; break; } gsm->state = GSM0_LEN1; break; case GSM0_LEN1: gsm->fcs = gsm_fcs_add(gsm->fcs, c); len = c; gsm->len |= len << 7; if (gsm->len > gsm->mru) { gsm->bad_size++; gsm->state = GSM_SEARCH; break; } gsm->count = 0; if (!gsm->len) gsm->state = GSM0_FCS; else gsm->state = GSM0_DATA; break; case GSM0_DATA: /* Data */ gsm->buf[gsm->count++] = c; if (gsm->count >= MAX_MRU) { gsm->bad_size++; gsm->state = GSM_SEARCH; } else if (gsm->count >= gsm->len) { /* Calculate final FCS for UI frames over all data */ if ((gsm->control & ~PF) != UIH) { gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } gsm->state = GSM0_FCS; } break; case GSM0_FCS: /* FCS follows the packet */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->state = GSM0_SSOF; break; case GSM0_SSOF: gsm->state = GSM_SEARCH; if (c == GSM0_SOF) gsm_queue(gsm); else gsm->bad_size++; break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm1_receive_state_check_and_fix - check and correct receive state * @gsm: gsm data for this ldisc instance * * Ensures that the current receive state is valid for advanced option mode. */ static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm) { switch (gsm->state) { case GSM_SEARCH: case GSM1_START: case GSM1_ADDRESS: case GSM1_CONTROL: case GSM1_DATA: case GSM1_OVERRUN: break; default: gsm->state = GSM_SEARCH; break; } } /** * gsm1_receive - perform processing for non-transparency * @gsm: gsm data for this ldisc instance * @c: character * * Receive bytes in mode 1 (Advanced option) */ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c) { gsm1_receive_state_check_and_fix(gsm); /* handle XON/XOFF */ if ((c & ISO_IEC_646_MASK) == XON) { gsm->constipated = true; return; } else if ((c & ISO_IEC_646_MASK) == XOFF) { gsm->constipated = false; /* Kick the link in case it is idling */ gsm_data_kick(gsm, NULL); return; } if (c == GSM1_SOF) { /* EOF is only valid in frame if we have got to the data state */ if (gsm->state == GSM1_DATA) { if (gsm->count < 1) { /* Missing FSC */ gsm->malformed++; gsm->state = GSM1_START; return; } /* Remove the FCS from data */ gsm->count--; if ((gsm->control & ~PF) != UIH) { /* Calculate final FCS for UI frames over all * data but FCS */ gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->count); } /* Add the FCS itself to test against GOOD_FCS */ gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]); gsm->len = gsm->count; gsm_queue(gsm); gsm->state = GSM1_START; return; } /* Any partial frame was a runt so go back to start */ if (gsm->state != GSM1_START) { if (gsm->state != GSM_SEARCH) gsm->malformed++; gsm->state = GSM1_START; } /* A SOF in GSM_START means we are still reading idling or framing bytes */ return; } if (c == GSM1_ESCAPE) { gsm->escape = true; return; } /* Only an unescaped SOF gets us out of GSM search */ if (gsm->state == GSM_SEARCH) return; if (gsm->escape) { c ^= GSM1_ESCAPE_BITS; gsm->escape = false; } switch (gsm->state) { case GSM1_START: /* First byte after SOF */ gsm->address = 0; gsm->state = GSM1_ADDRESS; gsm->fcs = INIT_FCS; fallthrough; case GSM1_ADDRESS: /* Address continuation */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); if (gsm_read_ea(&gsm->address, c)) gsm->state = GSM1_CONTROL; break; case GSM1_CONTROL: /* Control Byte */ gsm->fcs = gsm_fcs_add(gsm->fcs, c); gsm->control = c; gsm->count = 0; gsm->state = GSM1_DATA; break; case GSM1_DATA: /* Data */ if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */ gsm->state = GSM1_OVERRUN; gsm->bad_size++; } else gsm->buf[gsm->count++] = c; break; case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */ break; default: pr_debug("%s: unhandled state: %d\n", __func__, gsm->state); break; } } /** * gsm_error - handle tty error * @gsm: ldisc data * @data: byte received (may be invalid) * @flag: error received * * Handle an error in the receipt of data for a frame. Currently we just * go back to hunting for a SOF. * * FIXME: better diagnostics ? */ static void gsm_error(struct gsm_mux *gsm, unsigned char data, unsigned char flag) { gsm->state = GSM_SEARCH; gsm->io_error++; } /** * gsm_cleanup_mux - generic GSM protocol cleanup * @gsm: our mux * @disc: disconnect link? * * Clean up the bits of the mux which are the same for all framing * protocols. Remove the mux from the mux table, stop all the timers * and then shut down each device hanging up the channels as we go. */ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); wait_event(gsm->event, dlci->state == DLCI_CLOSED); } dlci->dead = true; } /* Finish outstanding timers, making sure they are done */ del_timer_sync(&gsm->kick_timer); del_timer_sync(&gsm->t2_timer); /* Free up any link layer users and finally the control channel */ if (gsm->has_devices) { gsm_unregister_devices(gsm_tty_driver, gsm->num); gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) if (gsm->dlci[i]) gsm_dlci_release(gsm->dlci[i]); mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); list_for_each_entry_safe(txq, ntxq, &gsm->tx_list, list) kfree(txq); INIT_LIST_HEAD(&gsm->tx_list); } /** * gsm_activate_mux - generic GSM setup * @gsm: our mux * * Set up the bits of the mux which are the same for all framing * protocols. Add the mux to the mux table so it can be opened and * finally kick off connecting to DLCI 0 on the modem. */ static int gsm_activate_mux(struct gsm_mux *gsm) { struct gsm_dlci *dlci; int ret; dlci = gsm_dlci_alloc(gsm, 0); if (dlci == NULL) return -ENOMEM; timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); init_waitqueue_head(&gsm->event); spin_lock_init(&gsm->control_lock); spin_lock_init(&gsm->tx_lock); if (gsm->encoding == 0) gsm->receive = gsm0_receive; else gsm->receive = gsm1_receive; ret = gsm_register_devices(gsm_tty_driver, gsm->num); if (ret) return ret; gsm->has_devices = true; gsm->dead = false; /* Tty opens are now permissible */ return 0; } /** * gsm_free_mux - free up a mux * @gsm: mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_mux(struct gsm_mux *gsm) { int i; for (i = 0; i < MAX_MUX; i++) { if (gsm == gsm_mux[i]) { gsm_mux[i] = NULL; break; } } mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); } /** * gsm_free_muxr - free up a mux * @ref: kreference to the mux to free * * Dispose of allocated resources for a dead mux */ static void gsm_free_muxr(struct kref *ref) { struct gsm_mux *gsm = container_of(ref, struct gsm_mux, ref); gsm_free_mux(gsm); } static inline void mux_get(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_get(&gsm->ref); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline void mux_put(struct gsm_mux *gsm) { unsigned long flags; spin_lock_irqsave(&gsm_mux_lock, flags); kref_put(&gsm->ref, gsm_free_muxr); spin_unlock_irqrestore(&gsm_mux_lock, flags); } static inline unsigned int mux_num_to_base(struct gsm_mux *gsm) { return gsm->num * NUM_DLCI; } static inline unsigned int mux_line_to_num(unsigned int line) { return line / NUM_DLCI; } /** * gsm_alloc_mux - allocate a mux * * Creates a new mux ready for activation. */ static struct gsm_mux *gsm_alloc_mux(void) { int i; struct gsm_mux *gsm = kzalloc(sizeof(struct gsm_mux), GFP_KERNEL); if (gsm == NULL) return NULL; gsm->buf = kmalloc(MAX_MRU + 1, GFP_KERNEL); if (gsm->buf == NULL) { kfree(gsm); return NULL; } gsm->txframe = kmalloc(2 * (MAX_MTU + PROT_OVERHEAD - 1), GFP_KERNEL); if (gsm->txframe == NULL) { kfree(gsm->buf); kfree(gsm); return NULL; } spin_lock_init(&gsm->lock); mutex_init(&gsm->mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_list); gsm->t1 = T1; gsm->t2 = T2; gsm->n2 = N2; gsm->ftype = UIH; gsm->adaption = 1; gsm->encoding = 1; gsm->mru = 64; /* Default to encoding 1 so these should be 64 */ gsm->mtu = 64; gsm->dead = true; /* Avoid early tty opens */ /* Store the instance to the mux array or abort if no space is * available. */ spin_lock(&gsm_mux_lock); for (i = 0; i < MAX_MUX; i++) { if (!gsm_mux[i]) { gsm_mux[i] = gsm; gsm->num = i; break; } } spin_unlock(&gsm_mux_lock); if (i == MAX_MUX) { mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); kfree(gsm); return NULL; } return gsm; } static void gsm_copy_config_values(struct gsm_mux *gsm, struct gsm_config *c) { memset(c, 0, sizeof(*c)); c->adaption = gsm->adaption; c->encapsulation = gsm->encoding; c->initiator = gsm->initiator; c->t1 = gsm->t1; c->t2 = gsm->t2; c->t3 = 0; /* Not supported */ c->n2 = gsm->n2; if (gsm->ftype == UIH) c->i = 1; else c->i = 2; pr_debug("Ftype %d i %d\n", gsm->ftype, c->i); c->mru = gsm->mru; c->mtu = gsm->mtu; c->k = 0; } static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c) { int ret = 0; int need_close = 0; int need_restart = 0; /* Stuff we don't support yet - UI or I frame transport, windowing */ if ((c->adaption != 1 && c->adaption != 2) || c->k) return -EOPNOTSUPP; /* Check the MRU/MTU range looks sane */ if (c->mru > MAX_MRU || c->mtu > MAX_MTU || c->mru < 8 || c->mtu < 8) return -EINVAL; if (c->n2 > 255) return -EINVAL; if (c->encapsulation > 1) /* Basic, advanced, no I */ return -EINVAL; if (c->initiator > 1) return -EINVAL; if (c->i == 0 || c->i > 2) /* UIH and UI only */ return -EINVAL; /* * See what is needed for reconfiguration */ /* Timing fields */ if (c->t1 != 0 && c->t1 != gsm->t1) need_restart = 1; if (c->t2 != 0 && c->t2 != gsm->t2) need_restart = 1; if (c->encapsulation != gsm->encoding) need_restart = 1; if (c->adaption != gsm->adaption) need_restart = 1; /* Requires care */ if (c->initiator != gsm->initiator) need_close = 1; if (c->mru != gsm->mru) need_restart = 1; if (c->mtu != gsm->mtu) need_restart = 1; /* * Close down what is needed, restart and initiate the new * configuration. On the first time there is no DLCI[0] * and closing or cleaning up is not necessary. */ if (need_close || need_restart) gsm_cleanup_mux(gsm, true); gsm->initiator = c->initiator; gsm->mru = c->mru; gsm->mtu = c->mtu; gsm->encoding = c->encapsulation; gsm->adaption = c->adaption; gsm->n2 = c->n2; if (c->i == 1) gsm->ftype = UIH; else if (c->i == 2) gsm->ftype = UI; if (c->t1) gsm->t1 = c->t1; if (c->t2) gsm->t2 = c->t2; /* * FIXME: We need to separate activation/deactivation from adding * and removing from the mux array */ if (gsm->dead) { ret = gsm_activate_mux(gsm); if (ret) return ret; if (gsm->initiator) gsm_dlci_begin_open(gsm->dlci[0]); } return 0; } /** * gsmld_output - write to link * @gsm: our mux * @data: bytes to output * @len: size * * Write a block of data from the GSM mux to the data channel. This * will eventually be serialized from above but at the moment isn't. */ static int gsmld_output(struct gsm_mux *gsm, u8 *data, int len) { if (tty_write_room(gsm->tty) < len) { set_bit(TTY_DO_WRITE_WAKEUP, &gsm->tty->flags); return -ENOSPC; } if (debug & 4) gsm_hex_dump_bytes(__func__, data, len); return gsm->tty->ops->write(gsm->tty, data, len); } /** * gsmld_attach_gsm - mode set up * @tty: our tty structure * @gsm: our mux * * Set up the MUX for basic mode and commence connecting to the * modem. Currently called from the line discipline set up but * will need moving to an ioctl path. */ static void gsmld_attach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { gsm->tty = tty_kref_get(tty); /* Turn off tty XON/XOFF handling to handle it explicitly. */ gsm->old_c_iflag = tty->termios.c_iflag; tty->termios.c_iflag &= (IXON | IXOFF); } /** * gsmld_detach_gsm - stop doing 0710 mux * @tty: tty attached to the mux * @gsm: mux * * Shutdown and then clean up the resources used by the line discipline */ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm) { WARN_ON(tty != gsm->tty); /* Restore tty XON/XOFF handling. */ gsm->tty->termios.c_iflag = gsm->old_c_iflag; tty_kref_put(gsm->tty); gsm->tty = NULL; } static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp, const char *fp, int count) { struct gsm_mux *gsm = tty->disc_data; char flags = TTY_NORMAL; if (debug & 4) gsm_hex_dump_bytes(__func__, cp, count); for (; count; count--, cp++) { if (fp) flags = *fp++; switch (flags) { case TTY_NORMAL: if (gsm->receive) gsm->receive(gsm, *cp); break; case TTY_OVERRUN: case TTY_BREAK: case TTY_PARITY: case TTY_FRAME: gsm_error(gsm, *cp, flags); break; default: WARN_ONCE(1, "%s: unknown flag %d\n", tty_name(tty), flags); break; } } /* FASYNC if needed ? */ /* If clogged call tty_throttle(tty); */ } /** * gsmld_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the line discipline is * being closed, when the tty layer wants the buffer flushed (eg * at hangup). */ static void gsmld_flush_buffer(struct tty_struct *tty) { } /** * gsmld_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void gsmld_close(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; /* The ldisc locks and closes the port before calling our close. This * means we have no way to do a proper disconnect. We will not bother * to do one. */ gsm_cleanup_mux(gsm, false); gsmld_detach_gsm(tty, gsm); gsmld_flush_buffer(tty); /* Do other clean up here */ mux_put(gsm); } /** * gsmld_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int gsmld_open(struct tty_struct *tty) { struct gsm_mux *gsm; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (tty->ops->write == NULL) return -EINVAL; /* Attach our ldisc data */ gsm = gsm_alloc_mux(); if (gsm == NULL) return -ENOMEM; tty->disc_data = gsm; tty->receive_room = 65536; /* Attach the initial passive connection */ gsm->encoding = 1; gsmld_attach_gsm(tty, gsm); timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); return 0; } /** * gsmld_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void gsmld_write_wakeup(struct tty_struct *tty) { struct gsm_mux *gsm = tty->disc_data; unsigned long flags; /* Queue poll */ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_lock_irqsave(&gsm->tx_lock, flags); gsm_data_kick(gsm, NULL); if (gsm->tx_bytes < TX_THRESH_LO) { gsm_dlci_data_sweep(gsm); } spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsmld_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * @cookie: unused * @offset: unused * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. */ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, unsigned char *buf, size_t nr, void **cookie, unsigned long offset) { return -EOPNOTSUPP; } /** * gsmld_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Called when the owner of the device wants to send a frame * itself (or some other control data). The data is transferred * as-is and must be properly framed and checksummed as appropriate * by userspace. Frames are either sent whole or not at all as this * avoids pain user side. */ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { struct gsm_mux *gsm = tty->disc_data; unsigned long flags; int space; int ret; if (!gsm) return -ENODEV; ret = -ENOBUFS; spin_lock_irqsave(&gsm->tx_lock, flags); space = tty_write_room(tty); if (space >= nr) ret = tty->ops->write(tty, buf, nr); else set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); spin_unlock_irqrestore(&gsm->tx_lock, flags); return ret; } /** * gsmld_poll - poll method for N_GSM0710 * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { __poll_t mask = 0; struct gsm_mux *gsm = tty->disc_data; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (gsm->dead) mask |= EPOLLHUP; if (tty_hung_up_p(file)) mask |= EPOLLHUP; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= EPOLLHUP; if (!tty_is_writelocked(tty) && tty_write_room(tty) > 0) mask |= EPOLLOUT | EPOLLWRNORM; return mask; } static int gsmld_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct gsm_config c; struct gsm_mux *gsm = tty->disc_data; unsigned int base; switch (cmd) { case GSMIOC_GETCONF: gsm_copy_config_values(gsm, &c); if (copy_to_user((void __user *)arg, &c, sizeof(c))) return -EFAULT; return 0; case GSMIOC_SETCONF: if (copy_from_user(&c, (void __user *)arg, sizeof(c))) return -EFAULT; return gsm_config(gsm, &c); case GSMIOC_GETFIRST: base = mux_num_to_base(gsm); return put_user(base + 1, (__u32 __user *)arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } } /* * Network interface * */ static int gsm_mux_net_open(struct net_device *net) { pr_debug("%s called\n", __func__); netif_start_queue(net); return 0; } static int gsm_mux_net_close(struct net_device *net) { netif_stop_queue(net); return 0; } static void dlci_net_free(struct gsm_dlci *dlci) { if (!dlci->net) { WARN_ON(1); return; } dlci->adaption = dlci->prev_adaption; dlci->data = dlci->prev_data; free_netdev(dlci->net); dlci->net = NULL; } static void net_free(struct kref *ref) { struct gsm_mux_net *mux_net; struct gsm_dlci *dlci; mux_net = container_of(ref, struct gsm_mux_net, ref); dlci = mux_net->dlci; if (dlci->net) { unregister_netdev(dlci->net); dlci_net_free(dlci); } } static inline void muxnet_get(struct gsm_mux_net *mux_net) { kref_get(&mux_net->ref); } static inline void muxnet_put(struct gsm_mux_net *mux_net) { kref_put(&mux_net->ref, net_free); } static netdev_tx_t gsm_mux_net_start_xmit(struct sk_buff *skb, struct net_device *net) { struct gsm_mux_net *mux_net = netdev_priv(net); struct gsm_dlci *dlci = mux_net->dlci; muxnet_get(mux_net); skb_queue_head(&dlci->skb_list, skb); net->stats.tx_packets++; net->stats.tx_bytes += skb->len; gsm_dlci_data_kick(dlci); /* And tell the kernel when the last transmit started. */ netif_trans_update(net); muxnet_put(mux_net); return NETDEV_TX_OK; } /* called when a packet did not ack after watchdogtimeout */ static void gsm_mux_net_tx_timeout(struct net_device *net, unsigned int txqueue) { /* Tell syslog we are hosed. */ dev_dbg(&net->dev, "Tx timed out.\n"); /* Update statistics */ net->stats.tx_errors++; } static void gsm_mux_rx_netchar(struct gsm_dlci *dlci, const unsigned char *in_buf, int size) { struct net_device *net = dlci->net; struct sk_buff *skb; struct gsm_mux_net *mux_net = netdev_priv(net); muxnet_get(mux_net); /* Allocate an sk_buff */ skb = dev_alloc_skb(size + NET_IP_ALIGN); if (!skb) { /* We got no receive buffer. */ net->stats.rx_dropped++; muxnet_put(mux_net); return; } skb_reserve(skb, NET_IP_ALIGN); skb_put_data(skb, in_buf, size); skb->dev = net; skb->protocol = htons(ETH_P_IP); /* Ship it off to the kernel */ netif_rx(skb); /* update out statistics */ net->stats.rx_packets++; net->stats.rx_bytes += size; muxnet_put(mux_net); return; } static void gsm_mux_net_init(struct net_device *net) { static const struct net_device_ops gsm_netdev_ops = { .ndo_open = gsm_mux_net_open, .ndo_stop = gsm_mux_net_close, .ndo_start_xmit = gsm_mux_net_start_xmit, .ndo_tx_timeout = gsm_mux_net_tx_timeout, }; net->netdev_ops = &gsm_netdev_ops; /* fill in the other fields */ net->watchdog_timeo = GSM_NET_TX_TIMEOUT; net->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; net->type = ARPHRD_NONE; net->tx_queue_len = 10; } /* caller holds the dlci mutex */ static void gsm_destroy_network(struct gsm_dlci *dlci) { struct gsm_mux_net *mux_net; pr_debug("destroy network interface\n"); if (!dlci->net) return; mux_net = netdev_priv(dlci->net); muxnet_put(mux_net); } /* caller holds the dlci mutex */ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc) { char *netname; int retval = 0; struct net_device *net; struct gsm_mux_net *mux_net; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Already in a non tty mode */ if (dlci->adaption > 2) return -EBUSY; if (nc->protocol != htons(ETH_P_IP)) return -EPROTONOSUPPORT; if (nc->adaption != 3 && nc->adaption != 4) return -EPROTONOSUPPORT; pr_debug("create network interface\n"); netname = "gsm%d"; if (nc->if_name[0] != '\0') netname = nc->if_name; net = alloc_netdev(sizeof(struct gsm_mux_net), netname, NET_NAME_UNKNOWN, gsm_mux_net_init); if (!net) { pr_err("alloc_netdev failed\n"); return -ENOMEM; } net->mtu = dlci->gsm->mtu; net->min_mtu = 8; net->max_mtu = dlci->gsm->mtu; mux_net = netdev_priv(net); mux_net->dlci = dlci; kref_init(&mux_net->ref); strncpy(nc->if_name, net->name, IFNAMSIZ); /* return net name */ /* reconfigure dlci for network */ dlci->prev_adaption = dlci->adaption; dlci->prev_data = dlci->data; dlci->adaption = nc->adaption; dlci->data = gsm_mux_rx_netchar; dlci->net = net; pr_debug("register netdev\n"); retval = register_netdev(net); if (retval) { pr_err("network register fail %d\n", retval); dlci_net_free(dlci); return retval; } return net->ifindex; /* return network index */ } /* Line discipline for real tty */ static struct tty_ldisc_ops tty_ldisc_packet = { .owner = THIS_MODULE, .num = N_GSM0710, .name = "n_gsm", .open = gsmld_open, .close = gsmld_close, .flush_buffer = gsmld_flush_buffer, .read = gsmld_read, .write = gsmld_write, .ioctl = gsmld_ioctl, .poll = gsmld_poll, .receive_buf = gsmld_receive_buf, .write_wakeup = gsmld_write_wakeup }; /* * Virtual tty side */ /** * gsm_modem_upd_via_data - send modem bits via convergence layer * @dlci: channel * @brk: break signal * * Send an empty frame to signal mobile state changes and to transmit the * break signal for adaption 2. */ static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { struct gsm_mux *gsm = dlci->gsm; unsigned long flags; if (dlci->state != DLCI_OPEN || dlci->adaption != 2) return; spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_modem_output(gsm, dlci, brk); spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** * gsm_modem_upd_via_msc - send modem bits via control frame * @dlci: channel * @brk: break signal */ static int gsm_modem_upd_via_msc(struct gsm_dlci *dlci, u8 brk) { u8 modembits[3]; struct gsm_control *ctrl; int len = 2; if (dlci->gsm->encoding != 0) return 0; modembits[0] = (dlci->addr << 2) | 2 | EA; /* DLCI, Valid, EA */ if (!brk) { modembits[1] = (gsm_encode_modem(dlci) << 1) | EA; } else { modembits[1] = gsm_encode_modem(dlci) << 1; modembits[2] = (brk << 4) | 2 | EA; /* Length, Break, EA */ len++; } ctrl = gsm_control_send(dlci->gsm, CMD_MSC, modembits, len); if (ctrl == NULL) return -ENOMEM; return gsm_control_wait(dlci->gsm, ctrl); } /** * gsm_modem_update - send modem status line state * @dlci: channel * @brk: break signal */ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk) { if (dlci->gsm->dead) return -EL2HLT; if (dlci->adaption == 2) { /* Send convergence layer type 2 empty data frame. */ gsm_modem_upd_via_data(dlci, brk); return 0; } else if (dlci->gsm->encoding == 0) { /* Send as MSC control message. */ return gsm_modem_upd_via_msc(dlci, brk); } /* Modem status lines are not supported. */ return -EPROTONOSUPPORT; } static int gsm_carrier_raised(struct tty_port *port) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); struct gsm_mux *gsm = dlci->gsm; /* Not yet open so no carrier info */ if (dlci->state != DLCI_OPEN) return 0; if (debug & 2) return 1; /* * Basic mode with control channel in ADM mode may not respond * to CMD_MSC at all and modem_rx is empty. */ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx) return 1; return dlci->modem_rx & TIOCM_CD; } static void gsm_dtr_rts(struct tty_port *port, int onoff) { struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port); unsigned int modem_tx = dlci->modem_tx; if (onoff) modem_tx |= TIOCM_DTR | TIOCM_RTS; else modem_tx &= ~(TIOCM_DTR | TIOCM_RTS); if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; gsm_modem_update(dlci, 0); } } static const struct tty_port_operations gsm_port_ops = { .carrier_raised = gsm_carrier_raised, .dtr_rts = gsm_dtr_rts, .destruct = gsm_dlci_free, }; static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty) { struct gsm_mux *gsm; struct gsm_dlci *dlci; unsigned int line = tty->index; unsigned int mux = mux_line_to_num(line); bool alloc = false; int ret; line = line & 0x3F; if (mux >= MAX_MUX) return -ENXIO; /* FIXME: we need to lock gsm_mux for lifetimes of ttys eventually */ if (gsm_mux[mux] == NULL) return -EUNATCH; if (line == 0 || line > 61) /* 62/63 reserved */ return -ECHRNG; gsm = gsm_mux[mux]; if (gsm->dead) return -EL2HLT; /* If DLCI 0 is not yet fully open return an error. This is ok from a locking perspective as we don't have to worry about this if DLCI0 is lost */ mutex_lock(&gsm->mutex); if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN) { mutex_unlock(&gsm->mutex); return -EL2NSYNC; } dlci = gsm->dlci[line]; if (dlci == NULL) { alloc = true; dlci = gsm_dlci_alloc(gsm, line); } if (dlci == NULL) { mutex_unlock(&gsm->mutex); return -ENOMEM; } ret = tty_port_install(&dlci->port, driver, tty); if (ret) { if (alloc) dlci_put(dlci); mutex_unlock(&gsm->mutex); return ret; } dlci_get(dlci); dlci_get(gsm->dlci[0]); mux_get(gsm); tty->driver_data = dlci; mutex_unlock(&gsm->mutex); return 0; } static int gsmtty_open(struct tty_struct *tty, struct file *filp) { struct gsm_dlci *dlci = tty->driver_data; struct tty_port *port = &dlci->port; struct gsm_mux *gsm = dlci->gsm; port->count++; tty_port_tty_set(port, tty); dlci->modem_rx = 0; /* We could in theory open and close before we wait - eg if we get a DM straight back. This is ok as that will have caused a hangup */ tty_port_set_initialized(port, 1); /* Start sending off SABM messages */ if (gsm->initiator) gsm_dlci_begin_open(dlci); else gsm_dlci_set_opening(dlci); /* And wait for virtual carrier */ return tty_port_block_til_ready(port, tty, filp); } static void gsmtty_close(struct tty_struct *tty, struct file *filp) { struct gsm_dlci *dlci = tty->driver_data; if (dlci == NULL) return; if (dlci->state == DLCI_CLOSED) return; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); if (tty_port_close_start(&dlci->port, tty, filp) == 0) return; gsm_dlci_begin_close(dlci); if (tty_port_initialized(&dlci->port) && C_HUPCL(tty)) tty_port_lower_dtr_rts(&dlci->port); tty_port_close_end(&dlci->port, tty); tty_port_tty_set(&dlci->port, NULL); return; } static void gsmtty_hangup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; tty_port_hangup(&dlci->port); gsm_dlci_begin_close(dlci); } static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf, int len) { int sent; struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return -EINVAL; /* Stuff the bytes into the fifo queue */ sent = kfifo_in_locked(&dlci->fifo, buf, len, &dlci->lock); /* Need to kick the channel */ gsm_dlci_data_kick(dlci); return sent; } static unsigned int gsmtty_write_room(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return 0; return kfifo_avail(&dlci->fifo); } static unsigned int gsmtty_chars_in_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return 0; return kfifo_len(&dlci->fifo); } static void gsmtty_flush_buffer(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; unsigned long flags; if (dlci->state == DLCI_CLOSED) return; /* Caution needed: If we implement reliable transport classes then the data being transmitted can't simply be junked once it has first hit the stack. Until then we can just blow it away */ spin_lock_irqsave(&dlci->lock, flags); kfifo_reset(&dlci->fifo); spin_unlock_irqrestore(&dlci->lock, flags); /* Need to unhook this DLCI from the transmit queue logic */ } static void gsmtty_wait_until_sent(struct tty_struct *tty, int timeout) { /* The FIFO handles the queue so the kernel will do the right thing waiting on chars_in_buffer before calling us. No work to do here */ } static int gsmtty_tiocmget(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return -EINVAL; return dlci->modem_rx; } static int gsmtty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct gsm_dlci *dlci = tty->driver_data; unsigned int modem_tx = dlci->modem_tx; if (dlci->state == DLCI_CLOSED) return -EINVAL; modem_tx &= ~clear; modem_tx |= set; if (modem_tx != dlci->modem_tx) { dlci->modem_tx = modem_tx; return gsm_modem_update(dlci, 0); } return 0; } static int gsmtty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct gsm_dlci *dlci = tty->driver_data; struct gsm_netconfig nc; int index; if (dlci->state == DLCI_CLOSED) return -EINVAL; switch (cmd) { case GSMIOC_ENABLE_NET: if (copy_from_user(&nc, (void __user *)arg, sizeof(nc))) return -EFAULT; nc.if_name[IFNAMSIZ-1] = '\0'; /* return net interface index or error code */ mutex_lock(&dlci->mutex); index = gsm_create_network(dlci, &nc); mutex_unlock(&dlci->mutex); if (copy_to_user((void __user *)arg, &nc, sizeof(nc))) return -EFAULT; return index; case GSMIOC_DISABLE_NET: if (!capable(CAP_NET_ADMIN)) return -EPERM; mutex_lock(&dlci->mutex); gsm_destroy_network(dlci); mutex_unlock(&dlci->mutex); return 0; default: return -ENOIOCTLCMD; } } static void gsmtty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; /* For the moment its fixed. In actual fact the speed information for the virtual channel can be propogated in both directions by the RPN control message. This however rapidly gets nasty as we then have to remap modem signals each way according to whether our virtual cable is null modem etc .. */ tty_termios_copy_hw(&tty->termios, old); } static void gsmtty_throttle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) dlci->modem_tx &= ~TIOCM_RTS; dlci->throttled = true; /* Send an MSC with RTS cleared */ gsm_modem_update(dlci, 0); } static void gsmtty_unthrottle(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; if (dlci->state == DLCI_CLOSED) return; if (C_CRTSCTS(tty)) dlci->modem_tx |= TIOCM_RTS; dlci->throttled = false; /* Send an MSC with RTS set */ gsm_modem_update(dlci, 0); } static int gsmtty_break_ctl(struct tty_struct *tty, int state) { struct gsm_dlci *dlci = tty->driver_data; int encode = 0; /* Off */ if (dlci->state == DLCI_CLOSED) return -EINVAL; if (state == -1) /* "On indefinitely" - we can't encode this properly */ encode = 0x0F; else if (state > 0) { encode = state / 200; /* mS to encoding */ if (encode > 0x0F) encode = 0x0F; /* Best effort */ } return gsm_modem_update(dlci, encode); } static void gsmtty_cleanup(struct tty_struct *tty) { struct gsm_dlci *dlci = tty->driver_data; struct gsm_mux *gsm = dlci->gsm; dlci_put(dlci); dlci_put(gsm->dlci[0]); mux_put(gsm); } /* Virtual ttys for the demux */ static const struct tty_operations gsmtty_ops = { .install = gsmtty_install, .open = gsmtty_open, .close = gsmtty_close, .write = gsmtty_write, .write_room = gsmtty_write_room, .chars_in_buffer = gsmtty_chars_in_buffer, .flush_buffer = gsmtty_flush_buffer, .ioctl = gsmtty_ioctl, .throttle = gsmtty_throttle, .unthrottle = gsmtty_unthrottle, .set_termios = gsmtty_set_termios, .hangup = gsmtty_hangup, .wait_until_sent = gsmtty_wait_until_sent, .tiocmget = gsmtty_tiocmget, .tiocmset = gsmtty_tiocmset, .break_ctl = gsmtty_break_ctl, .cleanup = gsmtty_cleanup, }; static int __init gsm_init(void) { /* Fill in our line protocol discipline, and register it */ int status = tty_register_ldisc(&tty_ldisc_packet); if (status != 0) { pr_err("n_gsm: can't register line discipline (err = %d)\n", status); return status; } gsm_tty_driver = tty_alloc_driver(256, TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK); if (IS_ERR(gsm_tty_driver)) { pr_err("gsm_init: tty allocation failed.\n"); status = PTR_ERR(gsm_tty_driver); goto err_unreg_ldisc; } gsm_tty_driver->driver_name = "gsmtty"; gsm_tty_driver->name = "gsmtty"; gsm_tty_driver->major = 0; /* Dynamic */ gsm_tty_driver->minor_start = 0; gsm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; gsm_tty_driver->subtype = SERIAL_TYPE_NORMAL; gsm_tty_driver->init_termios = tty_std_termios; /* Fixme */ gsm_tty_driver->init_termios.c_lflag &= ~ECHO; tty_set_operations(gsm_tty_driver, &gsmtty_ops); if (tty_register_driver(gsm_tty_driver)) { pr_err("gsm_init: tty registration failed.\n"); status = -EBUSY; goto err_put_driver; } pr_debug("gsm_init: loaded as %d,%d.\n", gsm_tty_driver->major, gsm_tty_driver->minor_start); return 0; err_put_driver: tty_driver_kref_put(gsm_tty_driver); err_unreg_ldisc: tty_unregister_ldisc(&tty_ldisc_packet); return status; } static void __exit gsm_exit(void) { tty_unregister_ldisc(&tty_ldisc_packet); tty_unregister_driver(gsm_tty_driver); tty_driver_kref_put(gsm_tty_driver); } module_init(gsm_init); module_exit(gsm_exit); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_GSM0710); |
1 1 4 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 | // SPDX-License-Identifier: MIT /* * Copyright 2020 Noralf Trønnes */ #include <linux/dma-buf.h> #include <linux/dma-mapping.h> #include <linux/lz4.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/string_helpers.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" /* Only used internally */ static const struct drm_format_info gud_drm_format_r1 = { .format = GUD_DRM_FORMAT_R1, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 8, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static const struct drm_format_info gud_drm_format_xrgb1111 = { .format = GUD_DRM_FORMAT_XRGB1111, .num_planes = 1, .char_per_block = { 1, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 1, .vsub = 1, }; static int gud_usb_control_msg(struct usb_interface *intf, bool in, u8 request, u16 value, void *buf, size_t len) { u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE; u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber; struct usb_device *usb = interface_to_usbdev(intf); unsigned int pipe; if (len && !buf) return -EINVAL; if (in) { pipe = usb_rcvctrlpipe(usb, 0); requesttype |= USB_DIR_IN; } else { pipe = usb_sndctrlpipe(usb, 0); requesttype |= USB_DIR_OUT; } return usb_control_msg(usb, pipe, request, requesttype, value, ifnum, buf, len, USB_CTRL_GET_TIMEOUT); } static int gud_get_display_descriptor(struct usb_interface *intf, struct gud_display_descriptor_req *desc) { void *buf; int ret; buf = kmalloc(sizeof(*desc), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc)); memcpy(desc, buf, sizeof(*desc)); kfree(buf); if (ret < 0) return ret; if (ret != sizeof(*desc)) return -EIO; if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC)) return -ENODATA; DRM_DEV_DEBUG_DRIVER(&intf->dev, "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n", desc->version, le32_to_cpu(desc->flags), desc->compression, le32_to_cpu(desc->max_buffer_size)); if (!desc->version || !desc->max_width || !desc->max_height || le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) || le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height)) return -EINVAL; return 0; } static int gud_status_to_errno(u8 status) { switch (status) { case GUD_STATUS_OK: return 0; case GUD_STATUS_BUSY: return -EBUSY; case GUD_STATUS_REQUEST_NOT_SUPPORTED: return -EOPNOTSUPP; case GUD_STATUS_PROTOCOL_ERROR: return -EPROTO; case GUD_STATUS_INVALID_PARAMETER: return -EINVAL; case GUD_STATUS_ERROR: return -EREMOTEIO; default: return -EREMOTEIO; } } static int gud_usb_get_status(struct usb_interface *intf) { int ret, status = -EIO; u8 *buf; buf = kmalloc(sizeof(*buf), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf)); if (ret == sizeof(*buf)) status = gud_status_to_errno(*buf); kfree(buf); if (ret < 0) return ret; return status; } static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index, void *buf, size_t len) { struct usb_interface *intf = to_usb_interface(gdrm->drm.dev); int idx, ret; drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n", in ? "get" : "set", request, index, len); if (!drm_dev_enter(&gdrm->drm, &idx)) return -ENODEV; mutex_lock(&gdrm->ctrl_lock); ret = gud_usb_control_msg(intf, in, request, index, buf, len); if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) { int status; status = gud_usb_get_status(intf); if (status < 0) { ret = status; } else if (ret < 0) { dev_err_once(gdrm->drm.dev, "Unexpected status OK for failed transfer\n"); ret = -EPIPE; } } if (ret < 0) { drm_dbg(&gdrm->drm, "ret=%d\n", ret); gdrm->stats_num_errors++; } mutex_unlock(&gdrm->ctrl_lock); drm_dev_exit(idx); return ret; } /* * @buf cannot be allocated on the stack. * Returns number of bytes received or negative error code on failure. */ int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len) { return gud_usb_transfer(gdrm, true, request, index, buf, max_len); } /* * @buf can be allocated on the stack or NULL. * Returns zero on success or negative error code on failure. */ int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len) { void *trbuf = NULL; int ret; if (buf && len) { trbuf = kmemdup(buf, len, GFP_KERNEL); if (!trbuf) return -ENOMEM; } ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len); kfree(trbuf); if (ret < 0) return ret; return ret != len ? -EIO : 0; } /* * @val can be allocated on the stack. * Returns zero on success or negative error code on failure. */ int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val) { u8 *buf; int ret; buf = kmalloc(sizeof(*val), GFP_KERNEL); if (!buf) return -ENOMEM; ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val)); *val = *buf; kfree(buf); if (ret < 0) return ret; return ret != sizeof(*val) ? -EIO : 0; } /* Returns zero on success or negative error code on failure. */ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val) { return gud_usb_set(gdrm, request, 0, &val, sizeof(val)); } static int gud_get_properties(struct gud_device *gdrm) { struct gud_property_req *properties; unsigned int i, num_properties; int ret; properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL); if (!properties) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0, properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties)); if (ret <= 0) goto out; if (ret % sizeof(*properties)) { ret = -EIO; goto out; } num_properties = ret / sizeof(*properties); ret = 0; gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties), GFP_KERNEL); if (!gdrm->properties) { ret = -ENOMEM; goto out; } for (i = 0; i < num_properties; i++) { u16 prop = le16_to_cpu(properties[i].prop); u64 val = le64_to_cpu(properties[i].val); switch (prop) { case GUD_PROPERTY_ROTATION: /* * DRM UAPI matches the protocol so use the value directly, * but mask out any additions on future devices. */ val &= GUD_ROTATION_MASK; ret = drm_plane_create_rotation_property(&gdrm->pipe.plane, DRM_MODE_ROTATE_0, val); break; default: /* New ones might show up in future devices, skip those we don't know. */ drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop); continue; } if (ret) goto out; gdrm->properties[gdrm->num_properties++] = prop; } out: kfree(properties); return ret; } /* * FIXME: Dma-buf sharing requires DMA support by the importing device. * This function is a workaround to make USB devices work as well. * See todo.rst for how to fix the issue in the dma-buf framework. */ static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf) { struct gud_device *gdrm = to_gud_device(drm); if (!gdrm->dmadev) return ERR_PTR(-ENODEV); return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev); } static int gud_stats_debugfs(struct seq_file *m, void *data) { struct drm_info_node *node = m->private; struct gud_device *gdrm = to_gud_device(node->minor->dev); char buf[10]; string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf)); seq_printf(m, "Max buffer size: %s\n", buf); seq_printf(m, "Number of errors: %u\n", gdrm->stats_num_errors); seq_puts(m, "Compression: "); if (gdrm->compression & GUD_COMPRESSION_LZ4) seq_puts(m, " lz4"); if (!gdrm->compression) seq_puts(m, " none"); seq_puts(m, "\n"); if (gdrm->compression) { u64 remainder; u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length, &remainder); u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length); seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac); } return 0; } static const struct drm_info_list gud_debugfs_list[] = { { "stats", gud_stats_debugfs, 0, NULL }, }; static void gud_debugfs_init(struct drm_minor *minor) { drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list), minor->debugfs_root, minor); } static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { .check = gud_pipe_check, .update = gud_pipe_update, }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static const u64 gud_pipe_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; DEFINE_DRM_GEM_FOPS(gud_fops); static const struct drm_driver gud_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, .gem_prime_import = gud_gem_prime_import, .debugfs_init = gud_debugfs_init, .name = "gud", .desc = "Generic USB Display", .date = "20200422", .major = 1, .minor = 0, }; static int gud_alloc_bulk_buffer(struct gud_device *gdrm) { unsigned int i, num_pages; struct page **pages; void *ptr; int ret; gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len); if (!gdrm->bulk_buf) return -ENOMEM; num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE); pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -ENOMEM; for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE) pages[i] = vmalloc_to_page(ptr); ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages, 0, gdrm->bulk_len, GFP_KERNEL); kfree(pages); return ret; } static void gud_free_buffers_and_mutex(void *data) { struct gud_device *gdrm = data; vfree(gdrm->compress_buf); gdrm->compress_buf = NULL; sg_free_table(&gdrm->bulk_sgt); vfree(gdrm->bulk_buf); gdrm->bulk_buf = NULL; mutex_destroy(&gdrm->ctrl_lock); } static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct drm_format_info *xrgb8888_emulation_format = NULL; bool rgb565_supported = false, xrgb8888_supported = false; unsigned int num_formats_dev, num_formats = 0; struct usb_endpoint_descriptor *bulk_out; struct gud_display_descriptor_req desc; struct device *dev = &intf->dev; size_t max_buffer_size = 0; struct gud_device *gdrm; struct drm_device *drm; u8 *formats_dev; u32 *formats; int ret, i; ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out); if (ret) return ret; ret = gud_get_display_descriptor(intf, &desc); if (ret) { DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret); return -ENODEV; } if (desc.version > 1) { dev_err(dev, "Protocol version %u is not supported\n", desc.version); return -ENODEV; } gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm); if (IS_ERR(gdrm)) return PTR_ERR(gdrm); drm = &gdrm->drm; drm->mode_config.funcs = &gud_mode_config_funcs; ret = drmm_mode_config_init(drm); if (ret) return ret; gdrm->flags = le32_to_cpu(desc.flags); gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4; if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression) return -EINVAL; mutex_init(&gdrm->ctrl_lock); mutex_init(&gdrm->damage_lock); INIT_WORK(&gdrm->work, gud_flush_work); gud_clear_damage(gdrm); ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm); if (ret) return ret; drm->mode_config.min_width = le32_to_cpu(desc.min_width); drm->mode_config.max_width = le32_to_cpu(desc.max_width); drm->mode_config.min_height = le32_to_cpu(desc.min_height); drm->mode_config.max_height = le32_to_cpu(desc.max_height); formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL); /* Add room for emulated XRGB8888 */ formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL); if (!formats_dev || !formats) return -ENOMEM; ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM); if (ret < 0) return ret; num_formats_dev = ret; for (i = 0; i < num_formats_dev; i++) { const struct drm_format_info *info; size_t fmt_buf_size; u32 format; format = gud_to_fourcc(formats_dev[i]); if (!format) { drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]); continue; } if (format == GUD_DRM_FORMAT_R1) info = &gud_drm_format_r1; else if (format == GUD_DRM_FORMAT_XRGB1111) info = &gud_drm_format_xrgb1111; else info = drm_format_info(format); switch (format) { case GUD_DRM_FORMAT_R1: fallthrough; case GUD_DRM_FORMAT_XRGB1111: if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_RGB565: rgb565_supported = true; if (!xrgb8888_emulation_format) xrgb8888_emulation_format = info; break; case DRM_FORMAT_XRGB8888: xrgb8888_supported = true; break; } fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) * drm->mode_config.max_height; max_buffer_size = max(max_buffer_size, fmt_buf_size); if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111) continue; /* Internal not for userspace */ formats[num_formats++] = format; } if (!num_formats && !xrgb8888_emulation_format) { dev_err(dev, "No supported pixel formats found\n"); return -EINVAL; } /* Prefer speed over color depth */ if (rgb565_supported) drm->mode_config.preferred_depth = 16; if (!xrgb8888_supported && xrgb8888_emulation_format) { gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format; formats[num_formats++] = DRM_FORMAT_XRGB8888; } if (desc.max_buffer_size) max_buffer_size = le32_to_cpu(desc.max_buffer_size); /* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */ if (max_buffer_size > SZ_64M) max_buffer_size = SZ_64M; gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out)); gdrm->bulk_len = max_buffer_size; ret = gud_alloc_bulk_buffer(gdrm); if (ret) return ret; if (gdrm->compression & GUD_COMPRESSION_LZ4) { gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL); if (!gdrm->lz4_comp_mem) return -ENOMEM; gdrm->compress_buf = vmalloc(gdrm->bulk_len); if (!gdrm->compress_buf) return -ENOMEM; } ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs, formats, num_formats, gud_pipe_modifiers, NULL); if (ret) return ret; devm_kfree(dev, formats); devm_kfree(dev, formats_dev); ret = gud_get_properties(gdrm); if (ret) { dev_err(dev, "Failed to get properties (error=%d)\n", ret); return ret; } drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane); ret = gud_get_connectors(gdrm); if (ret) { dev_err(dev, "Failed to get connectors (error=%d)\n", ret); return ret; } drm_mode_config_reset(drm); usb_set_intfdata(intf, gdrm); gdrm->dmadev = usb_intf_get_dma_device(intf); if (!gdrm->dmadev) dev_warn(dev, "buffer sharing not supported"); ret = drm_dev_register(drm, 0); if (ret) { put_device(gdrm->dmadev); return ret; } drm_kms_helper_poll_init(drm); drm_fbdev_generic_setup(drm, 0); return 0; } static void gud_disconnect(struct usb_interface *interface) { struct gud_device *gdrm = usb_get_intfdata(interface); struct drm_device *drm = &gdrm->drm; drm_dbg(drm, "%s:\n", __func__); drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); put_device(gdrm->dmadev); gdrm->dmadev = NULL; } static int gud_suspend(struct usb_interface *intf, pm_message_t message) { struct gud_device *gdrm = usb_get_intfdata(intf); return drm_mode_config_helper_suspend(&gdrm->drm); } static int gud_resume(struct usb_interface *intf) { struct gud_device *gdrm = usb_get_intfdata(intf); drm_mode_config_helper_resume(&gdrm->drm); return 0; } static const struct usb_device_id gud_id_table[] = { { USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) }, { USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) }, { } }; MODULE_DEVICE_TABLE(usb, gud_id_table); static struct usb_driver gud_usb_driver = { .name = "gud", .probe = gud_probe, .disconnect = gud_disconnect, .id_table = gud_id_table, .suspend = gud_suspend, .resume = gud_resume, .reset_resume = gud_resume, }; module_usb_driver(gud_usb_driver); MODULE_AUTHOR("Noralf Trønnes"); MODULE_LICENSE("Dual MIT/GPL"); |
14 11 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. */ #ifndef __XFS_FORMAT_H__ #define __XFS_FORMAT_H__ /* * XFS On Disk Format Definitions * * This header file defines all the on-disk format definitions for * general XFS objects. Directory and attribute related objects are defined in * xfs_da_format.h, which log and log item formats are defined in * xfs_log_format.h. Everything else goes here. */ struct xfs_mount; struct xfs_trans; struct xfs_inode; struct xfs_buf; struct xfs_ifork; /* * Super block * Fits into a sector-sized buffer at address 0 of each allocation group. * Only the first of these is ever updated except during growfs. */ #define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */ #define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */ #define XFS_SB_VERSION_2 2 /* 6.2 - attributes */ #define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */ #define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */ #define XFS_SB_VERSION_5 5 /* CRC enabled filesystem */ #define XFS_SB_VERSION_NUMBITS 0x000f #define XFS_SB_VERSION_ALLFBITS 0xfff0 #define XFS_SB_VERSION_ATTRBIT 0x0010 #define XFS_SB_VERSION_NLINKBIT 0x0020 #define XFS_SB_VERSION_QUOTABIT 0x0040 #define XFS_SB_VERSION_ALIGNBIT 0x0080 #define XFS_SB_VERSION_DALIGNBIT 0x0100 #define XFS_SB_VERSION_SHAREDBIT 0x0200 #define XFS_SB_VERSION_LOGV2BIT 0x0400 #define XFS_SB_VERSION_SECTORBIT 0x0800 #define XFS_SB_VERSION_EXTFLGBIT 0x1000 #define XFS_SB_VERSION_DIRV2BIT 0x2000 #define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */ #define XFS_SB_VERSION_MOREBITSBIT 0x8000 /* * The size of a single extended attribute on disk is limited by * the size of index values within the attribute entries themselves. * These are be16 fields, so we can only support attribute data * sizes up to 2^16 bytes in length. */ #define XFS_XATTR_SIZE_MAX (1 << 16) /* * Supported feature bit list is just all bits in the versionnum field because * we've used them all up and understand them all. Except, of course, for the * shared superblock bit, which nobody knows what it does and so is unsupported. */ #define XFS_SB_VERSION_OKBITS \ ((XFS_SB_VERSION_NUMBITS | XFS_SB_VERSION_ALLFBITS) & \ ~XFS_SB_VERSION_SHAREDBIT) /* * There are two words to hold XFS "feature" bits: the original * word, sb_versionnum, and sb_features2. Whenever a bit is set in * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set. * * These defines represent bits in sb_features2. */ #define XFS_SB_VERSION2_RESERVED1BIT 0x00000001 #define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */ #define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ #define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */ #define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */ #define XFS_SB_VERSION2_CRCBIT 0x00000100 /* metadata CRCs */ #define XFS_SB_VERSION2_FTYPE 0x00000200 /* inode type in dir */ #define XFS_SB_VERSION2_OKBITS \ (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ XFS_SB_VERSION2_ATTR2BIT | \ XFS_SB_VERSION2_PROJID32BIT | \ XFS_SB_VERSION2_FTYPE) /* Maximum size of the xfs filesystem label, no terminating NULL */ #define XFSLABEL_MAX 12 /* * Superblock - in core version. Must match the ondisk version below. * Must be padded to 64 bit alignment. */ typedef struct xfs_sb { uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ uint32_t sb_blocksize; /* logical block size, bytes */ xfs_rfsblock_t sb_dblocks; /* number of data blocks */ xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */ xfs_rtblock_t sb_rextents; /* number of realtime extents */ uuid_t sb_uuid; /* user-visible file system unique id */ xfs_fsblock_t sb_logstart; /* starting block of log if internal */ xfs_ino_t sb_rootino; /* root inode number */ xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */ xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */ xfs_agblock_t sb_agblocks; /* size of an allocation group */ xfs_agnumber_t sb_agcount; /* number of allocation groups */ xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */ xfs_extlen_t sb_logblocks; /* number of log blocks */ uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */ uint16_t sb_sectsize; /* volume sector size, bytes */ uint16_t sb_inodesize; /* inode size, bytes */ uint16_t sb_inopblock; /* inodes per block */ char sb_fname[XFSLABEL_MAX]; /* file system name */ uint8_t sb_blocklog; /* log2 of sb_blocksize */ uint8_t sb_sectlog; /* log2 of sb_sectsize */ uint8_t sb_inodelog; /* log2 of sb_inodesize */ uint8_t sb_inopblog; /* log2 of sb_inopblock */ uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */ uint8_t sb_rextslog; /* log2 of sb_rextents */ uint8_t sb_inprogress; /* mkfs is in progress, don't mount */ uint8_t sb_imax_pct; /* max % of fs for inode space */ /* statistics */ /* * These fields must remain contiguous. If you really * want to change their layout, make sure you fix the * code in xfs_trans_apply_sb_deltas(). */ uint64_t sb_icount; /* allocated inodes */ uint64_t sb_ifree; /* free inodes */ uint64_t sb_fdblocks; /* free data blocks */ uint64_t sb_frextents; /* free realtime extents */ /* * End contiguous fields. */ xfs_ino_t sb_uquotino; /* user quota inode */ xfs_ino_t sb_gquotino; /* group quota inode */ uint16_t sb_qflags; /* quota flags */ uint8_t sb_flags; /* misc. flags */ uint8_t sb_shared_vn; /* shared version number */ xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */ uint32_t sb_unit; /* stripe or raid unit */ uint32_t sb_width; /* stripe or raid width */ uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */ uint8_t sb_logsectlog; /* log2 of the log sector size */ uint16_t sb_logsectsize; /* sector size for the log, bytes */ uint32_t sb_logsunit; /* stripe unit size for the log */ uint32_t sb_features2; /* additional feature bits */ /* * bad features2 field as a result of failing to pad the sb structure to * 64 bits. Some machines will be using this field for features2 bits. * Easiest just to mark it bad and not use it for anything else. * * This is not kept up to date in memory; it is always overwritten by * the value in sb_features2 when formatting the incore superblock to * the disk buffer. */ uint32_t sb_bad_features2; /* version 5 superblock fields start here */ /* feature masks */ uint32_t sb_features_compat; uint32_t sb_features_ro_compat; uint32_t sb_features_incompat; uint32_t sb_features_log_incompat; uint32_t sb_crc; /* superblock crc */ xfs_extlen_t sb_spino_align; /* sparse inode chunk alignment */ xfs_ino_t sb_pquotino; /* project quota inode */ xfs_lsn_t sb_lsn; /* last write sequence */ uuid_t sb_meta_uuid; /* metadata file system unique id */ /* must be padded to 64 bit alignment */ } xfs_sb_t; #define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc) /* * Superblock - on disk version. Must match the in core version above. * Must be padded to 64 bit alignment. */ typedef struct xfs_dsb { __be32 sb_magicnum; /* magic number == XFS_SB_MAGIC */ __be32 sb_blocksize; /* logical block size, bytes */ __be64 sb_dblocks; /* number of data blocks */ __be64 sb_rblocks; /* number of realtime blocks */ __be64 sb_rextents; /* number of realtime extents */ uuid_t sb_uuid; /* user-visible file system unique id */ __be64 sb_logstart; /* starting block of log if internal */ __be64 sb_rootino; /* root inode number */ __be64 sb_rbmino; /* bitmap inode for realtime extents */ __be64 sb_rsumino; /* summary inode for rt bitmap */ __be32 sb_rextsize; /* realtime extent size, blocks */ __be32 sb_agblocks; /* size of an allocation group */ __be32 sb_agcount; /* number of allocation groups */ __be32 sb_rbmblocks; /* number of rt bitmap blocks */ __be32 sb_logblocks; /* number of log blocks */ __be16 sb_versionnum; /* header version == XFS_SB_VERSION */ __be16 sb_sectsize; /* volume sector size, bytes */ __be16 sb_inodesize; /* inode size, bytes */ __be16 sb_inopblock; /* inodes per block */ char sb_fname[XFSLABEL_MAX]; /* file system name */ __u8 sb_blocklog; /* log2 of sb_blocksize */ __u8 sb_sectlog; /* log2 of sb_sectsize */ __u8 sb_inodelog; /* log2 of sb_inodesize */ __u8 sb_inopblog; /* log2 of sb_inopblock */ __u8 sb_agblklog; /* log2 of sb_agblocks (rounded up) */ __u8 sb_rextslog; /* log2 of sb_rextents */ __u8 sb_inprogress; /* mkfs is in progress, don't mount */ __u8 sb_imax_pct; /* max % of fs for inode space */ /* statistics */ /* * These fields must remain contiguous. If you really * want to change their layout, make sure you fix the * code in xfs_trans_apply_sb_deltas(). */ __be64 sb_icount; /* allocated inodes */ __be64 sb_ifree; /* free inodes */ __be64 sb_fdblocks; /* free data blocks */ __be64 sb_frextents; /* free realtime extents */ /* * End contiguous fields. */ __be64 sb_uquotino; /* user quota inode */ __be64 sb_gquotino; /* group quota inode */ __be16 sb_qflags; /* quota flags */ __u8 sb_flags; /* misc. flags */ __u8 sb_shared_vn; /* shared version number */ __be32 sb_inoalignmt; /* inode chunk alignment, fsblocks */ __be32 sb_unit; /* stripe or raid unit */ __be32 sb_width; /* stripe or raid width */ __u8 sb_dirblklog; /* log2 of dir block size (fsbs) */ __u8 sb_logsectlog; /* log2 of the log sector size */ __be16 sb_logsectsize; /* sector size for the log, bytes */ __be32 sb_logsunit; /* stripe unit size for the log */ __be32 sb_features2; /* additional feature bits */ /* * bad features2 field as a result of failing to pad the sb * structure to 64 bits. Some machines will be using this field * for features2 bits. Easiest just to mark it bad and not use * it for anything else. */ __be32 sb_bad_features2; /* version 5 superblock fields start here */ /* feature masks */ __be32 sb_features_compat; __be32 sb_features_ro_compat; __be32 sb_features_incompat; __be32 sb_features_log_incompat; __le32 sb_crc; /* superblock crc */ __be32 sb_spino_align; /* sparse inode chunk alignment */ __be64 sb_pquotino; /* project quota inode */ __be64 sb_lsn; /* last write sequence */ uuid_t sb_meta_uuid; /* metadata file system unique id */ /* must be padded to 64 bit alignment */ } xfs_dsb_t; /* * Misc. Flags - warning - these will be cleared by xfs_repair unless * a feature bit is set when the flag is used. */ #define XFS_SBF_NOFLAGS 0x00 /* no flags set */ #define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */ /* * define max. shared version we can interoperate with */ #define XFS_SB_MAX_SHARED_VN 0 #define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS) static inline bool xfs_sb_is_v5(struct xfs_sb *sbp) { return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5; } /* * Detect a mismatched features2 field. Older kernels read/wrote * this into the wrong slot, so to be safe we keep them in sync. */ static inline bool xfs_sb_has_mismatched_features2(struct xfs_sb *sbp) { return sbp->sb_bad_features2 != sbp->sb_features2; } static inline bool xfs_sb_version_hasmorebits(struct xfs_sb *sbp) { return xfs_sb_is_v5(sbp) || (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT); } static inline void xfs_sb_version_addattr(struct xfs_sb *sbp) { sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT; } static inline void xfs_sb_version_addquota(struct xfs_sb *sbp) { sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT; } static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp) { sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT; } static inline void xfs_sb_version_addprojid32(struct xfs_sb *sbp) { sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT; sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT; } /* * Extended v5 superblock feature masks. These are to be used for new v5 * superblock features only. * * Compat features are new features that old kernels will not notice or affect * and so can mount read-write without issues. * * RO-Compat (read only) are features that old kernels can read but will break * if they write. Hence only read-only mounts of such filesystems are allowed on * kernels that don't support the feature bit. * * InCompat features are features which old kernels will not understand and so * must not mount. * * Log-InCompat features are for changes to log formats or new transactions that * can't be replayed on older kernels. The fields are set when the filesystem is * mounted, and a clean unmount clears the fields. */ #define XFS_SB_FEAT_COMPAT_ALL 0 #define XFS_SB_FEAT_COMPAT_UNKNOWN ~XFS_SB_FEAT_COMPAT_ALL static inline bool xfs_sb_has_compat_feature( struct xfs_sb *sbp, uint32_t feature) { return (sbp->sb_features_compat & feature) != 0; } #define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */ #define XFS_SB_FEAT_RO_COMPAT_RMAPBT (1 << 1) /* reverse map btree */ #define XFS_SB_FEAT_RO_COMPAT_REFLINK (1 << 2) /* reflinked files */ #define XFS_SB_FEAT_RO_COMPAT_INOBTCNT (1 << 3) /* inobt block counts */ #define XFS_SB_FEAT_RO_COMPAT_ALL \ (XFS_SB_FEAT_RO_COMPAT_FINOBT | \ XFS_SB_FEAT_RO_COMPAT_RMAPBT | \ XFS_SB_FEAT_RO_COMPAT_REFLINK| \ XFS_SB_FEAT_RO_COMPAT_INOBTCNT) #define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL static inline bool xfs_sb_has_ro_compat_feature( struct xfs_sb *sbp, uint32_t feature) { return (sbp->sb_features_ro_compat & feature) != 0; } #define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */ #define XFS_SB_FEAT_INCOMPAT_SPINODES (1 << 1) /* sparse inode chunks */ #define XFS_SB_FEAT_INCOMPAT_META_UUID (1 << 2) /* metadata UUID */ #define XFS_SB_FEAT_INCOMPAT_BIGTIME (1 << 3) /* large timestamps */ #define XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR (1 << 4) /* needs xfs_repair */ #define XFS_SB_FEAT_INCOMPAT_ALL \ (XFS_SB_FEAT_INCOMPAT_FTYPE| \ XFS_SB_FEAT_INCOMPAT_SPINODES| \ XFS_SB_FEAT_INCOMPAT_META_UUID| \ XFS_SB_FEAT_INCOMPAT_BIGTIME| \ XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR) #define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL static inline bool xfs_sb_has_incompat_feature( struct xfs_sb *sbp, uint32_t feature) { return (sbp->sb_features_incompat & feature) != 0; } #define XFS_SB_FEAT_INCOMPAT_LOG_ALL 0 #define XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_LOG_ALL static inline bool xfs_sb_has_incompat_log_feature( struct xfs_sb *sbp, uint32_t feature) { return (sbp->sb_features_log_incompat & feature) != 0; } static inline void xfs_sb_remove_incompat_log_features( struct xfs_sb *sbp) { sbp->sb_features_log_incompat &= ~XFS_SB_FEAT_INCOMPAT_LOG_ALL; } static inline void xfs_sb_add_incompat_log_features( struct xfs_sb *sbp, unsigned int features) { sbp->sb_features_log_incompat |= features; } static inline bool xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino) { return (ino == sbp->sb_uquotino || ino == sbp->sb_gquotino || ino == sbp->sb_pquotino); } #define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ #define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR) #define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d)) #define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \ xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d)) #define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \ XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno)) /* * File system sector to basic block conversions. */ #define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log) /* * File system block to basic block conversions. */ #define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log) #define XFS_BB_TO_FSB(mp,bb) \ (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log) #define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log) /* * File system block to byte conversions. */ #define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog) #define XFS_B_TO_FSB(mp,b) \ ((((uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog) #define XFS_B_TO_FSBT(mp,b) (((uint64_t)(b)) >> (mp)->m_sb.sb_blocklog) /* * Allocation group header * * This is divided into three structures, placed in sequential 512-byte * buffers after a copy of the superblock (also in a 512-byte buffer). */ #define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */ #define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */ #define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */ #define XFS_AGF_VERSION 1 #define XFS_AGI_VERSION 1 #define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION) #define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION) /* * Btree number 0 is bno, 1 is cnt, 2 is rmap. This value gives the size of the * arrays below. */ #define XFS_BTNUM_AGF ((int)XFS_BTNUM_RMAPi + 1) /* * The second word of agf_levels in the first a.g. overlaps the EFS * superblock's magic number. Since the magic numbers valid for EFS * are > 64k, our value cannot be confused for an EFS superblock's. */ typedef struct xfs_agf { /* * Common allocation group header information */ __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */ __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */ __be32 agf_seqno; /* sequence # starting from 0 */ __be32 agf_length; /* size in blocks of a.g. */ /* * Freespace and rmap information */ __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */ __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */ __be32 agf_flfirst; /* first freelist block's index */ __be32 agf_fllast; /* last freelist block's index */ __be32 agf_flcount; /* count of blocks in freelist */ __be32 agf_freeblks; /* total free blocks */ __be32 agf_longest; /* longest free space */ __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ uuid_t agf_uuid; /* uuid of filesystem */ __be32 agf_rmap_blocks; /* rmapbt blocks used */ __be32 agf_refcount_blocks; /* refcountbt blocks used */ __be32 agf_refcount_root; /* refcount tree root block */ __be32 agf_refcount_level; /* refcount btree levels */ /* * reserve some contiguous space for future logged fields before we add * the unlogged fields. This makes the range logging via flags and * structure offsets much simpler. */ __be64 agf_spare64[14]; /* unlogged fields, written during buffer writeback. */ __be64 agf_lsn; /* last write sequence */ __be32 agf_crc; /* crc of agf sector */ __be32 agf_spare2; /* structure must be padded to 64 bit alignment */ } xfs_agf_t; #define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc) #define XFS_AGF_MAGICNUM 0x00000001 #define XFS_AGF_VERSIONNUM 0x00000002 #define XFS_AGF_SEQNO 0x00000004 #define XFS_AGF_LENGTH 0x00000008 #define XFS_AGF_ROOTS 0x00000010 #define XFS_AGF_LEVELS 0x00000020 #define XFS_AGF_FLFIRST 0x00000040 #define XFS_AGF_FLLAST 0x00000080 #define XFS_AGF_FLCOUNT 0x00000100 #define XFS_AGF_FREEBLKS 0x00000200 #define XFS_AGF_LONGEST 0x00000400 #define XFS_AGF_BTREEBLKS 0x00000800 #define XFS_AGF_UUID 0x00001000 #define XFS_AGF_RMAP_BLOCKS 0x00002000 #define XFS_AGF_REFCOUNT_BLOCKS 0x00004000 #define XFS_AGF_REFCOUNT_ROOT 0x00008000 #define XFS_AGF_REFCOUNT_LEVEL 0x00010000 #define XFS_AGF_SPARE64 0x00020000 #define XFS_AGF_NUM_BITS 18 #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) #define XFS_AGF_FLAGS \ { XFS_AGF_MAGICNUM, "MAGICNUM" }, \ { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \ { XFS_AGF_SEQNO, "SEQNO" }, \ { XFS_AGF_LENGTH, "LENGTH" }, \ { XFS_AGF_ROOTS, "ROOTS" }, \ { XFS_AGF_LEVELS, "LEVELS" }, \ { XFS_AGF_FLFIRST, "FLFIRST" }, \ { XFS_AGF_FLLAST, "FLLAST" }, \ { XFS_AGF_FLCOUNT, "FLCOUNT" }, \ { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ { XFS_AGF_LONGEST, "LONGEST" }, \ { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ { XFS_AGF_UUID, "UUID" }, \ { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" }, \ { XFS_AGF_REFCOUNT_BLOCKS, "REFCOUNT_BLOCKS" }, \ { XFS_AGF_REFCOUNT_ROOT, "REFCOUNT_ROOT" }, \ { XFS_AGF_REFCOUNT_LEVEL, "REFCOUNT_LEVEL" }, \ { XFS_AGF_SPARE64, "SPARE64" } /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) #define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp)) /* * Size of the unlinked inode hash table in the agi. */ #define XFS_AGI_UNLINKED_BUCKETS 64 typedef struct xfs_agi { /* * Common allocation group header information */ __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */ __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */ __be32 agi_seqno; /* sequence # starting from 0 */ __be32 agi_length; /* size in blocks of a.g. */ /* * Inode information * Inodes are mapped by interpreting the inode number, so no * mapping data is needed here. */ __be32 agi_count; /* count of allocated inodes */ __be32 agi_root; /* root of inode btree */ __be32 agi_level; /* levels in inode btree */ __be32 agi_freecount; /* number of free inodes */ __be32 agi_newino; /* new inode just allocated */ __be32 agi_dirino; /* last directory inode chunk */ /* * Hash table of inodes which have been unlinked but are * still being referenced. */ __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS]; /* * This marks the end of logging region 1 and start of logging region 2. */ uuid_t agi_uuid; /* uuid of filesystem */ __be32 agi_crc; /* crc of agi sector */ __be32 agi_pad32; __be64 agi_lsn; /* last write sequence */ __be32 agi_free_root; /* root of the free inode btree */ __be32 agi_free_level;/* levels in free inode btree */ __be32 agi_iblocks; /* inobt blocks used */ __be32 agi_fblocks; /* finobt blocks used */ /* structure must be padded to 64 bit alignment */ } xfs_agi_t; #define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc) #define XFS_AGI_MAGICNUM (1 << 0) #define XFS_AGI_VERSIONNUM (1 << 1) #define XFS_AGI_SEQNO (1 << 2) #define XFS_AGI_LENGTH (1 << 3) #define XFS_AGI_COUNT (1 << 4) #define XFS_AGI_ROOT (1 << 5) #define XFS_AGI_LEVEL (1 << 6) #define XFS_AGI_FREECOUNT (1 << 7) #define XFS_AGI_NEWINO (1 << 8) #define XFS_AGI_DIRINO (1 << 9) #define XFS_AGI_UNLINKED (1 << 10) #define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */ #define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1) #define XFS_AGI_FREE_ROOT (1 << 11) #define XFS_AGI_FREE_LEVEL (1 << 12) #define XFS_AGI_IBLOCKS (1 << 13) /* both inobt/finobt block counters */ #define XFS_AGI_NUM_BITS_R2 14 /* disk block (xfs_daddr_t) in the AG */ #define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log)) #define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp)) /* * The third a.g. block contains the a.g. freelist, an array * of block pointers to blocks owned by the allocation btree code. */ #define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log)) #define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp)) #define XFS_BUF_TO_AGFL(bp) ((struct xfs_agfl *)((bp)->b_addr)) struct xfs_agfl { __be32 agfl_magicnum; __be32 agfl_seqno; uuid_t agfl_uuid; __be64 agfl_lsn; __be32 agfl_crc; } __attribute__((packed)); #define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc) #define XFS_AGB_TO_FSB(mp,agno,agbno) \ (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno)) #define XFS_FSB_TO_AGNO(mp,fsbno) \ ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog)) #define XFS_FSB_TO_AGBNO(mp,fsbno) \ ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog))) #define XFS_AGB_TO_DADDR(mp,agno,agbno) \ ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \ (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno))) #define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d)) /* * For checking for bad ranges of xfs_daddr_t's, covering multiple * allocation groups or a single xfs_daddr_t that's a superblock copy. */ #define XFS_AG_CHECK_DADDR(mp,d,len) \ ((len) == 1 ? \ ASSERT((d) == XFS_SB_DADDR || \ xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \ ASSERT(xfs_daddr_to_agno(mp, d) == \ xfs_daddr_to_agno(mp, (d) + (len) - 1))) /* * XFS Timestamps * ============== * * Traditional ondisk inode timestamps consist of signed 32-bit counters for * seconds and nanoseconds; time zero is the Unix epoch, Jan 1 00:00:00 UTC * 1970, which means that the timestamp epoch is the same as the Unix epoch. * Therefore, the ondisk min and max defined here can be used directly to * constrain the incore timestamps on a Unix system. Note that we actually * encode a __be64 value on disk. * * When the bigtime feature is enabled, ondisk inode timestamps become an * unsigned 64-bit nanoseconds counter. This means that the bigtime inode * timestamp epoch is the start of the classic timestamp range, which is * Dec 31 20:45:52 UTC 1901. Because the epochs are not the same, callers * /must/ use the bigtime conversion functions when encoding and decoding raw * timestamps. */ typedef __be64 xfs_timestamp_t; /* Legacy timestamp encoding format. */ struct xfs_legacy_timestamp { __be32 t_sec; /* timestamp seconds */ __be32 t_nsec; /* timestamp nanoseconds */ }; /* * Smallest possible ondisk seconds value with traditional timestamps. This * corresponds exactly with the incore timestamp Dec 13 20:45:52 UTC 1901. */ #define XFS_LEGACY_TIME_MIN ((int64_t)S32_MIN) /* * Largest possible ondisk seconds value with traditional timestamps. This * corresponds exactly with the incore timestamp Jan 19 03:14:07 UTC 2038. */ #define XFS_LEGACY_TIME_MAX ((int64_t)S32_MAX) /* * Smallest possible ondisk seconds value with bigtime timestamps. This * corresponds (after conversion to a Unix timestamp) with the traditional * minimum timestamp of Dec 13 20:45:52 UTC 1901. */ #define XFS_BIGTIME_TIME_MIN ((int64_t)0) /* * Largest supported ondisk seconds value with bigtime timestamps. This * corresponds (after conversion to a Unix timestamp) with an incore timestamp * of Jul 2 20:20:24 UTC 2486. * * We round down the ondisk limit so that the bigtime quota and inode max * timestamps will be the same. */ #define XFS_BIGTIME_TIME_MAX ((int64_t)((-1ULL / NSEC_PER_SEC) & ~0x3ULL)) /* * Bigtime epoch is set exactly to the minimum time value that a traditional * 32-bit timestamp can represent when using the Unix epoch as a reference. * Hence the Unix epoch is at a fixed offset into the supported bigtime * timestamp range. * * The bigtime epoch also matches the minimum value an on-disk 32-bit XFS * timestamp can represent so we will not lose any fidelity in converting * to/from unix and bigtime timestamps. * * The following conversion factor converts a seconds counter from the Unix * epoch to the bigtime epoch. */ #define XFS_BIGTIME_EPOCH_OFFSET (-(int64_t)S32_MIN) /* Convert a timestamp from the Unix epoch to the bigtime epoch. */ static inline uint64_t xfs_unix_to_bigtime(time64_t unix_seconds) { return (uint64_t)unix_seconds + XFS_BIGTIME_EPOCH_OFFSET; } /* Convert a timestamp from the bigtime epoch to the Unix epoch. */ static inline time64_t xfs_bigtime_to_unix(uint64_t ondisk_seconds) { return (time64_t)ondisk_seconds - XFS_BIGTIME_EPOCH_OFFSET; } /* * On-disk inode structure. * * This is just the header or "dinode core", the inode is expanded to fill a * variable size the leftover area split into a data and an attribute fork. * The format of the data and attribute fork depends on the format of the * inode as indicated by di_format and di_aformat. To access the data and * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros * below. * * There is a very similar struct xfs_log_dinode which matches the layout of * this structure, but is kept in native format instead of big endian. * * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed * padding field for v3 inodes. */ #define XFS_DINODE_MAGIC 0x494e /* 'IN' */ typedef struct xfs_dinode { __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */ __be16 di_mode; /* mode and type of file */ __u8 di_version; /* inode version */ __u8 di_format; /* format of di_c data */ __be16 di_onlink; /* old number of links to file */ __be32 di_uid; /* owner's user id */ __be32 di_gid; /* owner's group id */ __be32 di_nlink; /* number of links to file */ __be16 di_projid_lo; /* lower part of owner's project id */ __be16 di_projid_hi; /* higher part owner's project id */ __u8 di_pad[6]; /* unused, zeroed space */ __be16 di_flushiter; /* incremented on flush */ xfs_timestamp_t di_atime; /* time last accessed */ xfs_timestamp_t di_mtime; /* time last modified */ xfs_timestamp_t di_ctime; /* time created/inode modified */ __be64 di_size; /* number of bytes in file */ __be64 di_nblocks; /* # of direct & btree blocks used */ __be32 di_extsize; /* basic/minimum extent size for file */ __be32 di_nextents; /* number of extents in data fork */ __be16 di_anextents; /* number of extents in attribute fork*/ __u8 di_forkoff; /* attr fork offs, <<3 for 64b align */ __s8 di_aformat; /* format of attr fork's data */ __be32 di_dmevmask; /* DMIG event mask */ __be16 di_dmstate; /* DMIG state info */ __be16 di_flags; /* random flags, XFS_DIFLAG_... */ __be32 di_gen; /* generation number */ /* di_next_unlinked is the only non-core field in the old dinode */ __be32 di_next_unlinked;/* agi unlinked list ptr */ /* start of the extended dinode, writable fields */ __le32 di_crc; /* CRC of the inode */ __be64 di_changecount; /* number of attribute changes */ __be64 di_lsn; /* flush sequence */ __be64 di_flags2; /* more random flags */ __be32 di_cowextsize; /* basic cow extent size for file */ __u8 di_pad2[12]; /* more padding for future expansion */ /* fields only written to during inode creation */ xfs_timestamp_t di_crtime; /* time created */ __be64 di_ino; /* inode number */ uuid_t di_uuid; /* UUID of the filesystem */ /* structure must be padded to 64 bit alignment */ } xfs_dinode_t; #define XFS_DINODE_CRC_OFF offsetof(struct xfs_dinode, di_crc) #define DI_MAX_FLUSH 0xffff /* * Size of the core inode on disk. Version 1 and 2 inodes have * the same size, but version 3 has grown a few additional fields. */ static inline uint xfs_dinode_size(int version) { if (version == 3) return sizeof(struct xfs_dinode); return offsetof(struct xfs_dinode, di_crc); } /* * The 32 bit link count in the inode theoretically maxes out at UINT_MAX. * Since the pathconf interface is signed, we use 2^31 - 1 instead. */ #define XFS_MAXLINK ((1U << 31) - 1U) /* * Values for di_format * * This enum is used in string mapping in xfs_trace.h; please keep the * TRACE_DEFINE_ENUMs for it up to date. */ enum xfs_dinode_fmt { XFS_DINODE_FMT_DEV, /* xfs_dev_t */ XFS_DINODE_FMT_LOCAL, /* bulk data */ XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */ XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */ XFS_DINODE_FMT_UUID /* added long ago, but never used */ }; #define XFS_INODE_FORMAT_STR \ { XFS_DINODE_FMT_DEV, "dev" }, \ { XFS_DINODE_FMT_LOCAL, "local" }, \ { XFS_DINODE_FMT_EXTENTS, "extent" }, \ { XFS_DINODE_FMT_BTREE, "btree" }, \ { XFS_DINODE_FMT_UUID, "uuid" } /* * Inode minimum and maximum sizes. */ #define XFS_DINODE_MIN_LOG 8 #define XFS_DINODE_MAX_LOG 11 #define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG) #define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG) /* * Inode size for given fs. */ #define XFS_DINODE_SIZE(mp) \ (xfs_has_v3inodes(mp) ? \ sizeof(struct xfs_dinode) : \ offsetof(struct xfs_dinode, di_crc)) #define XFS_LITINO(mp) \ ((mp)->m_sb.sb_inodesize - XFS_DINODE_SIZE(mp)) /* * Inode data & attribute fork sizes, per inode. */ #define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3)) #define XFS_DFORK_DSIZE(dip,mp) \ ((dip)->di_forkoff ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp)) #define XFS_DFORK_ASIZE(dip,mp) \ ((dip)->di_forkoff ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0) #define XFS_DFORK_SIZE(dip,mp,w) \ ((w) == XFS_DATA_FORK ? \ XFS_DFORK_DSIZE(dip, mp) : \ XFS_DFORK_ASIZE(dip, mp)) #define XFS_DFORK_MAXEXT(dip, mp, w) \ (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec)) /* * Return pointers to the data or attribute forks. */ #define XFS_DFORK_DPTR(dip) \ ((char *)dip + xfs_dinode_size(dip->di_version)) #define XFS_DFORK_APTR(dip) \ (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip)) #define XFS_DFORK_PTR(dip,w) \ ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip)) #define XFS_DFORK_FORMAT(dip,w) \ ((w) == XFS_DATA_FORK ? \ (dip)->di_format : \ (dip)->di_aformat) #define XFS_DFORK_NEXTENTS(dip,w) \ ((w) == XFS_DATA_FORK ? \ be32_to_cpu((dip)->di_nextents) : \ be16_to_cpu((dip)->di_anextents)) /* * For block and character special files the 32bit dev_t is stored at the * beginning of the data fork. */ static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip) { return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip)); } static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev) { *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev); } /* * Values for di_flags */ #define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */ #define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */ #define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */ #define XFS_DIFLAG_IMMUTABLE_BIT 3 /* inode is immutable */ #define XFS_DIFLAG_APPEND_BIT 4 /* inode is append-only */ #define XFS_DIFLAG_SYNC_BIT 5 /* inode is written synchronously */ #define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */ #define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */ #define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */ #define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */ #define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */ #define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */ #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ #define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ #define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ /* Do not use bit 15, di_flags is legacy and unchanging now */ #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) #define XFS_DIFLAG_IMMUTABLE (1 << XFS_DIFLAG_IMMUTABLE_BIT) #define XFS_DIFLAG_APPEND (1 << XFS_DIFLAG_APPEND_BIT) #define XFS_DIFLAG_SYNC (1 << XFS_DIFLAG_SYNC_BIT) #define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT) #define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT) #define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT) #define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT) #define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT) #define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT) #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) #define XFS_DIFLAG_ANY \ (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM) /* * Values for di_flags2 These start by being exposed to userspace in the upper * 16 bits of the XFS_XFLAG_s range. */ #define XFS_DIFLAG2_DAX_BIT 0 /* use DAX for this inode */ #define XFS_DIFLAG2_REFLINK_BIT 1 /* file's blocks may be shared */ #define XFS_DIFLAG2_COWEXTSIZE_BIT 2 /* copy on write extent size hint */ #define XFS_DIFLAG2_BIGTIME_BIT 3 /* big timestamps */ #define XFS_DIFLAG2_DAX (1 << XFS_DIFLAG2_DAX_BIT) #define XFS_DIFLAG2_REFLINK (1 << XFS_DIFLAG2_REFLINK_BIT) #define XFS_DIFLAG2_COWEXTSIZE (1 << XFS_DIFLAG2_COWEXTSIZE_BIT) #define XFS_DIFLAG2_BIGTIME (1 << XFS_DIFLAG2_BIGTIME_BIT) #define XFS_DIFLAG2_ANY \ (XFS_DIFLAG2_DAX | XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE | \ XFS_DIFLAG2_BIGTIME) static inline bool xfs_dinode_has_bigtime(const struct xfs_dinode *dip) { return dip->di_version >= 3 && (dip->di_flags2 & cpu_to_be64(XFS_DIFLAG2_BIGTIME)); } /* * Inode number format: * low inopblog bits - offset in block * next agblklog bits - block number in ag * next agno_log bits - ag number * high agno_log-agblklog-inopblog bits - 0 */ #define XFS_INO_MASK(k) (uint32_t)((1ULL << (k)) - 1) #define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog #define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog #define XFS_INO_AGINO_BITS(mp) ((mp)->m_ino_geo.agino_log) #define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log #define XFS_INO_BITS(mp) \ XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp) #define XFS_INO_TO_AGNO(mp,i) \ ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp))) #define XFS_INO_TO_AGINO(mp,i) \ ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp))) #define XFS_INO_TO_AGBNO(mp,i) \ (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \ XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp))) #define XFS_INO_TO_OFFSET(mp,i) \ ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) #define XFS_INO_TO_FSB(mp,i) \ XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i)) #define XFS_AGINO_TO_INO(mp,a,i) \ (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i)) #define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp)) #define XFS_AGINO_TO_OFFSET(mp,i) \ ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp))) #define XFS_OFFBNO_TO_AGINO(mp,b,o) \ ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) #define XFS_FSB_TO_INO(mp, b) ((xfs_ino_t)((b) << XFS_INO_OFFSET_BITS(mp))) #define XFS_AGB_TO_AGINO(mp, b) ((xfs_agino_t)((b) << XFS_INO_OFFSET_BITS(mp))) #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) #define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) /* * RealTime Device format definitions */ /* Min and max rt extent sizes, specified in bytes */ #define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ #define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64kB */ #define XFS_MIN_RTEXTSIZE (4 * 1024) /* 4kB */ #define XFS_BLOCKSIZE(mp) ((mp)->m_sb.sb_blocksize) #define XFS_BLOCKMASK(mp) ((mp)->m_blockmask) #define XFS_BLOCKWSIZE(mp) ((mp)->m_blockwsize) #define XFS_BLOCKWMASK(mp) ((mp)->m_blockwmask) /* * RT Summary and bit manipulation macros. */ #define XFS_SUMOFFS(mp,ls,bb) ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb))) #define XFS_SUMOFFSTOBLOCK(mp,s) \ (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog) #define XFS_SUMPTR(mp,bp,so) \ ((xfs_suminfo_t *)((bp)->b_addr + \ (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp)))) #define XFS_BITTOBLOCK(mp,bi) ((bi) >> (mp)->m_blkbit_log) #define XFS_BLOCKTOBIT(mp,bb) ((bb) << (mp)->m_blkbit_log) #define XFS_BITTOWORD(mp,bi) \ ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp))) #define XFS_RTMIN(a,b) ((a) < (b) ? (a) : (b)) #define XFS_RTMAX(a,b) ((a) > (b) ? (a) : (b)) #define XFS_RTLOBIT(w) xfs_lowbit32(w) #define XFS_RTHIBIT(w) xfs_highbit32(w) #define XFS_RTBLOCKLOG(b) xfs_highbit64(b) /* * Dquot and dquot block format definitions */ #define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ #define XFS_DQUOT_VERSION (uint8_t)0x01 /* latest version number */ #define XFS_DQTYPE_USER 0x01 /* user dquot record */ #define XFS_DQTYPE_PROJ 0x02 /* project dquot record */ #define XFS_DQTYPE_GROUP 0x04 /* group dquot record */ #define XFS_DQTYPE_BIGTIME 0x80 /* large expiry timestamps */ /* bitmask to determine if this is a user/group/project dquot */ #define XFS_DQTYPE_REC_MASK (XFS_DQTYPE_USER | \ XFS_DQTYPE_PROJ | \ XFS_DQTYPE_GROUP) #define XFS_DQTYPE_ANY (XFS_DQTYPE_REC_MASK | \ XFS_DQTYPE_BIGTIME) /* * XFS Quota Timers * ================ * * Traditional quota grace period expiration timers are an unsigned 32-bit * seconds counter; time zero is the Unix epoch, Jan 1 00:00:01 UTC 1970. * Note that an expiration value of zero means that the quota limit has not * been reached, and therefore no expiration has been set. Therefore, the * ondisk min and max defined here can be used directly to constrain the incore * quota expiration timestamps on a Unix system. * * When bigtime is enabled, we trade two bits of precision to expand the * expiration timeout range to match that of big inode timestamps. The min and * max recorded here are the on-disk limits, not a Unix timestamp. * * The grace period for each quota type is stored in the root dquot (id = 0) * and is applied to a non-root dquot when it exceeds the soft or hard limits. * The length of quota grace periods are unsigned 32-bit quantities measured in * units of seconds. A value of zero means to use the default period. */ /* * Smallest possible ondisk quota expiration value with traditional timestamps. * This corresponds exactly with the incore expiration Jan 1 00:00:01 UTC 1970. */ #define XFS_DQ_LEGACY_EXPIRY_MIN ((int64_t)1) /* * Largest possible ondisk quota expiration value with traditional timestamps. * This corresponds exactly with the incore expiration Feb 7 06:28:15 UTC 2106. */ #define XFS_DQ_LEGACY_EXPIRY_MAX ((int64_t)U32_MAX) /* * Smallest possible ondisk quota expiration value with bigtime timestamps. * This corresponds (after conversion to a Unix timestamp) with the incore * expiration of Jan 1 00:00:04 UTC 1970. */ #define XFS_DQ_BIGTIME_EXPIRY_MIN (XFS_DQ_LEGACY_EXPIRY_MIN) /* * Largest supported ondisk quota expiration value with bigtime timestamps. * This corresponds (after conversion to a Unix timestamp) with an incore * expiration of Jul 2 20:20:24 UTC 2486. * * The ondisk field supports values up to -1U, which corresponds to an incore * expiration in 2514. This is beyond the maximum the bigtime inode timestamp, * so we cap the maximum bigtime quota expiration to the max inode timestamp. */ #define XFS_DQ_BIGTIME_EXPIRY_MAX ((int64_t)4074815106U) /* * The following conversion factors assist in converting a quota expiration * timestamp between the incore and ondisk formats. */ #define XFS_DQ_BIGTIME_SHIFT (2) #define XFS_DQ_BIGTIME_SLACK ((int64_t)(1ULL << XFS_DQ_BIGTIME_SHIFT) - 1) /* Convert an incore quota expiration timestamp to an ondisk bigtime value. */ static inline uint32_t xfs_dq_unix_to_bigtime(time64_t unix_seconds) { /* * Round the expiration timestamp up to the nearest bigtime timestamp * that we can store, to give users the most time to fix problems. */ return ((uint64_t)unix_seconds + XFS_DQ_BIGTIME_SLACK) >> XFS_DQ_BIGTIME_SHIFT; } /* Convert an ondisk bigtime quota expiration value to an incore timestamp. */ static inline time64_t xfs_dq_bigtime_to_unix(uint32_t ondisk_seconds) { return (time64_t)ondisk_seconds << XFS_DQ_BIGTIME_SHIFT; } /* * Default quota grace periods, ranging from zero (use the compiled defaults) * to ~136 years. These are applied to a non-root dquot that has exceeded * either limit. */ #define XFS_DQ_GRACE_MIN ((int64_t)0) #define XFS_DQ_GRACE_MAX ((int64_t)U32_MAX) /* * This is the main portion of the on-disk representation of quota information * for a user. We pad this with some more expansion room to construct the on * disk structure. */ struct xfs_disk_dquot { __be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ __u8 d_version; /* dquot version */ __u8 d_type; /* XFS_DQTYPE_USER/PROJ/GROUP */ __be32 d_id; /* user,project,group id */ __be64 d_blk_hardlimit;/* absolute limit on disk blks */ __be64 d_blk_softlimit;/* preferred limit on disk blks */ __be64 d_ino_hardlimit;/* maximum # allocated inodes */ __be64 d_ino_softlimit;/* preferred inode limit */ __be64 d_bcount; /* disk blocks owned by the user */ __be64 d_icount; /* inodes owned by the user */ __be32 d_itimer; /* zero if within inode limits if not, this is when we refuse service */ __be32 d_btimer; /* similar to above; for disk blocks */ __be16 d_iwarns; /* warnings issued wrt num inodes */ __be16 d_bwarns; /* warnings issued wrt disk blocks */ __be32 d_pad0; /* 64 bit align */ __be64 d_rtb_hardlimit;/* absolute limit on realtime blks */ __be64 d_rtb_softlimit;/* preferred limit on RT disk blks */ __be64 d_rtbcount; /* realtime blocks owned */ __be32 d_rtbtimer; /* similar to above; for RT disk blocks */ __be16 d_rtbwarns; /* warnings issued wrt RT disk blocks */ __be16 d_pad; }; /* * This is what goes on disk. This is separated from the xfs_disk_dquot because * carrying the unnecessary padding would be a waste of memory. */ typedef struct xfs_dqblk { struct xfs_disk_dquot dd_diskdq; /* portion living incore as well */ char dd_fill[4];/* filling for posterity */ /* * These two are only present on filesystems with the CRC bits set. */ __be32 dd_crc; /* checksum */ __be64 dd_lsn; /* last modification in log */ uuid_t dd_uuid; /* location information */ } xfs_dqblk_t; #define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc) /* * This defines the unit of allocation of dquots. * * Currently, it is just one file system block, and a 4K blk contains 30 * (136 * 30 = 4080) dquots. It's probably not worth trying to make * this more dynamic. * * However, if this number is changed, we have to make sure that we don't * implicitly assume that we do allocations in chunks of a single filesystem * block in the dquot/xqm code. * * This is part of the ondisk format because the structure size is not a power * of two, which leaves slack at the end of the disk block. */ #define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 /* * Remote symlink format and access functions. */ #define XFS_SYMLINK_MAGIC 0x58534c4d /* XSLM */ struct xfs_dsymlink_hdr { __be32 sl_magic; __be32 sl_offset; __be32 sl_bytes; __be32 sl_crc; uuid_t sl_uuid; __be64 sl_owner; __be64 sl_blkno; __be64 sl_lsn; }; #define XFS_SYMLINK_CRC_OFF offsetof(struct xfs_dsymlink_hdr, sl_crc) #define XFS_SYMLINK_MAXLEN 1024 /* * The maximum pathlen is 1024 bytes. Since the minimum file system * blocksize is 512 bytes, we can get a max of 3 extents back from * bmapi when crc headers are taken into account. */ #define XFS_SYMLINK_MAPS 3 #define XFS_SYMLINK_BUF_SPACE(mp, bufsize) \ ((bufsize) - (xfs_has_crc((mp)) ? \ sizeof(struct xfs_dsymlink_hdr) : 0)) /* * Allocation Btree format definitions * * There are two on-disk btrees, one sorted by blockno and one sorted * by blockcount and blockno. All blocks look the same to make the code * simpler; if we have time later, we'll make the optimizations. */ #define XFS_ABTB_MAGIC 0x41425442 /* 'ABTB' for bno tree */ #define XFS_ABTB_CRC_MAGIC 0x41423342 /* 'AB3B' */ #define XFS_ABTC_MAGIC 0x41425443 /* 'ABTC' for cnt tree */ #define XFS_ABTC_CRC_MAGIC 0x41423343 /* 'AB3C' */ /* * Data record/key structure */ typedef struct xfs_alloc_rec { __be32 ar_startblock; /* starting block number */ __be32 ar_blockcount; /* count of free blocks */ } xfs_alloc_rec_t, xfs_alloc_key_t; typedef struct xfs_alloc_rec_incore { xfs_agblock_t ar_startblock; /* starting block number */ xfs_extlen_t ar_blockcount; /* count of free blocks */ } xfs_alloc_rec_incore_t; /* btree pointer type */ typedef __be32 xfs_alloc_ptr_t; /* * Block numbers in the AG: * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3. */ #define XFS_BNO_BLOCK(mp) ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1)) #define XFS_CNT_BLOCK(mp) ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1)) /* * Inode Allocation Btree format definitions * * There is a btree for the inode map per allocation group. */ #define XFS_IBT_MAGIC 0x49414254 /* 'IABT' */ #define XFS_IBT_CRC_MAGIC 0x49414233 /* 'IAB3' */ #define XFS_FIBT_MAGIC 0x46494254 /* 'FIBT' */ #define XFS_FIBT_CRC_MAGIC 0x46494233 /* 'FIB3' */ typedef uint64_t xfs_inofree_t; #define XFS_INODES_PER_CHUNK (NBBY * sizeof(xfs_inofree_t)) #define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3) #define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1) #define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) #define XFS_INOBT_HOLEMASK_FULL 0 /* holemask for full chunk */ #define XFS_INOBT_HOLEMASK_BITS (NBBY * sizeof(uint16_t)) #define XFS_INODES_PER_HOLEMASK_BIT \ (XFS_INODES_PER_CHUNK / (NBBY * sizeof(uint16_t))) static inline xfs_inofree_t xfs_inobt_maskn(int i, int n) { return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i; } /* * The on-disk inode record structure has two formats. The original "full" * format uses a 4-byte freecount. The "sparse" format uses a 1-byte freecount * and replaces the 3 high-order freecount bytes wth the holemask and inode * count. * * The holemask of the sparse record format allows an inode chunk to have holes * that refer to blocks not owned by the inode record. This facilitates inode * allocation in the event of severe free space fragmentation. */ typedef struct xfs_inobt_rec { __be32 ir_startino; /* starting inode number */ union { struct { __be32 ir_freecount; /* count of free inodes */ } f; struct { __be16 ir_holemask;/* hole mask for sparse chunks */ __u8 ir_count; /* total inode count */ __u8 ir_freecount; /* count of free inodes */ } sp; } ir_u; __be64 ir_free; /* free inode mask */ } xfs_inobt_rec_t; typedef struct xfs_inobt_rec_incore { xfs_agino_t ir_startino; /* starting inode number */ uint16_t ir_holemask; /* hole mask for sparse chunks */ uint8_t ir_count; /* total inode count */ uint8_t ir_freecount; /* count of free inodes (set bits) */ xfs_inofree_t ir_free; /* free inode mask */ } xfs_inobt_rec_incore_t; static inline bool xfs_inobt_issparse(uint16_t holemask) { /* non-zero holemask represents a sparse rec. */ return holemask; } /* * Key structure */ typedef struct xfs_inobt_key { __be32 ir_startino; /* starting inode number */ } xfs_inobt_key_t; /* btree pointer type */ typedef __be32 xfs_inobt_ptr_t; /* * block numbers in the AG. */ #define XFS_IBT_BLOCK(mp) ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1)) #define XFS_FIBT_BLOCK(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1)) /* * Reverse mapping btree format definitions * * There is a btree for the reverse map per allocation group */ #define XFS_RMAP_CRC_MAGIC 0x524d4233 /* 'RMB3' */ /* * Ownership info for an extent. This is used to create reverse-mapping * entries. */ #define XFS_OWNER_INFO_ATTR_FORK (1 << 0) #define XFS_OWNER_INFO_BMBT_BLOCK (1 << 1) struct xfs_owner_info { uint64_t oi_owner; xfs_fileoff_t oi_offset; unsigned int oi_flags; }; /* * Special owner types. * * Seeing as we only support up to 8EB, we have the upper bit of the owner field * to tell us we have a special owner value. We use these for static metadata * allocated at mkfs/growfs time, as well as for freespace management metadata. */ #define XFS_RMAP_OWN_NULL (-1ULL) /* No owner, for growfs */ #define XFS_RMAP_OWN_UNKNOWN (-2ULL) /* Unknown owner, for EFI recovery */ #define XFS_RMAP_OWN_FS (-3ULL) /* static fs metadata */ #define XFS_RMAP_OWN_LOG (-4ULL) /* static fs metadata */ #define XFS_RMAP_OWN_AG (-5ULL) /* AG freespace btree blocks */ #define XFS_RMAP_OWN_INOBT (-6ULL) /* Inode btree blocks */ #define XFS_RMAP_OWN_INODES (-7ULL) /* Inode chunk */ #define XFS_RMAP_OWN_REFC (-8ULL) /* refcount tree */ #define XFS_RMAP_OWN_COW (-9ULL) /* cow allocations */ #define XFS_RMAP_OWN_MIN (-10ULL) /* guard */ #define XFS_RMAP_NON_INODE_OWNER(owner) (!!((owner) & (1ULL << 63))) /* * Data record structure */ struct xfs_rmap_rec { __be32 rm_startblock; /* extent start block */ __be32 rm_blockcount; /* extent length */ __be64 rm_owner; /* extent owner */ __be64 rm_offset; /* offset within the owner */ }; /* * rmap btree record * rm_offset:63 is the attribute fork flag * rm_offset:62 is the bmbt block flag * rm_offset:61 is the unwritten extent flag (same as l0:63 in bmbt) * rm_offset:54-60 aren't used and should be zero * rm_offset:0-53 is the block offset within the inode */ #define XFS_RMAP_OFF_ATTR_FORK ((uint64_t)1ULL << 63) #define XFS_RMAP_OFF_BMBT_BLOCK ((uint64_t)1ULL << 62) #define XFS_RMAP_OFF_UNWRITTEN ((uint64_t)1ULL << 61) #define XFS_RMAP_LEN_MAX ((uint32_t)~0U) #define XFS_RMAP_OFF_FLAGS (XFS_RMAP_OFF_ATTR_FORK | \ XFS_RMAP_OFF_BMBT_BLOCK | \ XFS_RMAP_OFF_UNWRITTEN) #define XFS_RMAP_OFF_MASK ((uint64_t)0x3FFFFFFFFFFFFFULL) #define XFS_RMAP_OFF(off) ((off) & XFS_RMAP_OFF_MASK) #define XFS_RMAP_IS_BMBT_BLOCK(off) (!!((off) & XFS_RMAP_OFF_BMBT_BLOCK)) #define XFS_RMAP_IS_ATTR_FORK(off) (!!((off) & XFS_RMAP_OFF_ATTR_FORK)) #define XFS_RMAP_IS_UNWRITTEN(len) (!!((off) & XFS_RMAP_OFF_UNWRITTEN)) #define RMAPBT_STARTBLOCK_BITLEN 32 #define RMAPBT_BLOCKCOUNT_BITLEN 32 #define RMAPBT_OWNER_BITLEN 64 #define RMAPBT_ATTRFLAG_BITLEN 1 #define RMAPBT_BMBTFLAG_BITLEN 1 #define RMAPBT_EXNTFLAG_BITLEN 1 #define RMAPBT_UNUSED_OFFSET_BITLEN 7 #define RMAPBT_OFFSET_BITLEN 54 #define XFS_RMAP_ATTR_FORK (1 << 0) #define XFS_RMAP_BMBT_BLOCK (1 << 1) #define XFS_RMAP_UNWRITTEN (1 << 2) #define XFS_RMAP_KEY_FLAGS (XFS_RMAP_ATTR_FORK | \ XFS_RMAP_BMBT_BLOCK) #define XFS_RMAP_REC_FLAGS (XFS_RMAP_UNWRITTEN) struct xfs_rmap_irec { xfs_agblock_t rm_startblock; /* extent start block */ xfs_extlen_t rm_blockcount; /* extent length */ uint64_t rm_owner; /* extent owner */ uint64_t rm_offset; /* offset within the owner */ unsigned int rm_flags; /* state flags */ }; /* * Key structure * * We don't use the length for lookups */ struct xfs_rmap_key { __be32 rm_startblock; /* extent start block */ __be64 rm_owner; /* extent owner */ __be64 rm_offset; /* offset within the owner */ } __attribute__((packed)); /* btree pointer type */ typedef __be32 xfs_rmap_ptr_t; #define XFS_RMAP_BLOCK(mp) \ (xfs_has_finobt(((mp))) ? \ XFS_FIBT_BLOCK(mp) + 1 : \ XFS_IBT_BLOCK(mp) + 1) /* * Reference Count Btree format definitions * */ #define XFS_REFC_CRC_MAGIC 0x52334643 /* 'R3FC' */ unsigned int xfs_refc_block(struct xfs_mount *mp); /* * Data record/key structure * * Each record associates a range of physical blocks (starting at * rc_startblock and ending rc_blockcount blocks later) with a reference * count (rc_refcount). Extents that are being used to stage a copy on * write (CoW) operation are recorded in the refcount btree with a * refcount of 1. All other records must have a refcount > 1 and must * track an extent mapped only by file data forks. * * Extents with a single owner (attributes, metadata, non-shared file * data) are not tracked here. Free space is also not tracked here. * This is consistent with pre-reflink XFS. */ /* * Extents that are being used to stage a copy on write are stored * in the refcount btree with a refcount of 1 and the upper bit set * on the startblock. This speeds up mount time deletion of stale * staging extents because they're all at the right side of the tree. */ #define XFS_REFC_COW_START ((xfs_agblock_t)(1U << 31)) #define REFCNTBT_COWFLAG_BITLEN 1 #define REFCNTBT_AGBLOCK_BITLEN 31 struct xfs_refcount_rec { __be32 rc_startblock; /* starting block number */ __be32 rc_blockcount; /* count of blocks */ __be32 rc_refcount; /* number of inodes linked here */ }; struct xfs_refcount_key { __be32 rc_startblock; /* starting block number */ }; struct xfs_refcount_irec { xfs_agblock_t rc_startblock; /* starting block number */ xfs_extlen_t rc_blockcount; /* count of free blocks */ xfs_nlink_t rc_refcount; /* number of inodes linked here */ }; #define MAXREFCOUNT ((xfs_nlink_t)~0U) #define MAXREFCEXTLEN ((xfs_extlen_t)~0U) /* btree pointer type */ typedef __be32 xfs_refcount_ptr_t; /* * BMAP Btree format definitions * * This includes both the root block definition that sits inside an inode fork * and the record/pointer formats for the leaf/node in the blocks. */ #define XFS_BMAP_MAGIC 0x424d4150 /* 'BMAP' */ #define XFS_BMAP_CRC_MAGIC 0x424d4133 /* 'BMA3' */ /* * Bmap root header, on-disk form only. */ typedef struct xfs_bmdr_block { __be16 bb_level; /* 0 is a leaf */ __be16 bb_numrecs; /* current # of data records */ } xfs_bmdr_block_t; /* * Bmap btree record and extent descriptor. * l0:63 is an extent flag (value 1 indicates non-normal). * l0:9-62 are startoff. * l0:0-8 and l1:21-63 are startblock. * l1:0-20 are blockcount. */ #define BMBT_EXNTFLAG_BITLEN 1 #define BMBT_STARTOFF_BITLEN 54 #define BMBT_STARTBLOCK_BITLEN 52 #define BMBT_BLOCKCOUNT_BITLEN 21 #define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1) #define BMBT_BLOCKCOUNT_MASK ((1ULL << BMBT_BLOCKCOUNT_BITLEN) - 1) /* * bmbt records have a file offset (block) field that is 54 bits wide, so this * is the largest xfs_fileoff_t that we ever expect to see. */ #define XFS_MAX_FILEOFF (BMBT_STARTOFF_MASK + BMBT_BLOCKCOUNT_MASK) typedef struct xfs_bmbt_rec { __be64 l0, l1; } xfs_bmbt_rec_t; typedef uint64_t xfs_bmbt_rec_base_t; /* use this for casts */ typedef xfs_bmbt_rec_t xfs_bmdr_rec_t; /* * Values and macros for delayed-allocation startblock fields. */ #define STARTBLOCKVALBITS 17 #define STARTBLOCKMASKBITS (15 + 20) #define STARTBLOCKMASK \ (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) static inline int isnullstartblock(xfs_fsblock_t x) { return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; } static inline xfs_fsblock_t nullstartblock(int k) { ASSERT(k < (1 << STARTBLOCKVALBITS)); return STARTBLOCKMASK | (k); } static inline xfs_filblks_t startblockval(xfs_fsblock_t x) { return (xfs_filblks_t)((x) & ~STARTBLOCKMASK); } /* * Key structure for non-leaf levels of the tree. */ typedef struct xfs_bmbt_key { __be64 br_startoff; /* starting file offset */ } xfs_bmbt_key_t, xfs_bmdr_key_t; /* btree pointer type */ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t; /* * Generic Btree block format definitions * * This is a combination of the actual format used on disk for short and long * format btrees. The first three fields are shared by both format, but the * pointers are different and should be used with care. * * To get the size of the actual short or long form headers please use the size * macros below. Never use sizeof(xfs_btree_block). * * The blkno, crc, lsn, owner and uuid fields are only available in filesystems * with the crc feature bit, and all accesses to them must be conditional on * that flag. */ /* short form block header */ struct xfs_btree_block_shdr { __be32 bb_leftsib; __be32 bb_rightsib; __be64 bb_blkno; __be64 bb_lsn; uuid_t bb_uuid; __be32 bb_owner; __le32 bb_crc; }; /* long form block header */ struct xfs_btree_block_lhdr { __be64 bb_leftsib; __be64 bb_rightsib; __be64 bb_blkno; __be64 bb_lsn; uuid_t bb_uuid; __be64 bb_owner; __le32 bb_crc; __be32 bb_pad; /* padding for alignment */ }; struct xfs_btree_block { __be32 bb_magic; /* magic number for block type */ __be16 bb_level; /* 0 is a leaf */ __be16 bb_numrecs; /* current # of data records */ union { struct xfs_btree_block_shdr s; struct xfs_btree_block_lhdr l; } bb_u; /* rest */ }; /* size of a short form block */ #define XFS_BTREE_SBLOCK_LEN \ (offsetof(struct xfs_btree_block, bb_u) + \ offsetof(struct xfs_btree_block_shdr, bb_blkno)) /* size of a long form block */ #define XFS_BTREE_LBLOCK_LEN \ (offsetof(struct xfs_btree_block, bb_u) + \ offsetof(struct xfs_btree_block_lhdr, bb_blkno)) /* sizes of CRC enabled btree blocks */ #define XFS_BTREE_SBLOCK_CRC_LEN \ (offsetof(struct xfs_btree_block, bb_u) + \ sizeof(struct xfs_btree_block_shdr)) #define XFS_BTREE_LBLOCK_CRC_LEN \ (offsetof(struct xfs_btree_block, bb_u) + \ sizeof(struct xfs_btree_block_lhdr)) #define XFS_BTREE_SBLOCK_CRC_OFF \ offsetof(struct xfs_btree_block, bb_u.s.bb_crc) #define XFS_BTREE_LBLOCK_CRC_OFF \ offsetof(struct xfs_btree_block, bb_u.l.bb_crc) /* * On-disk XFS access control list structure. */ struct xfs_acl_entry { __be32 ae_tag; __be32 ae_id; __be16 ae_perm; __be16 ae_pad; /* fill the implicit hole in the structure */ }; struct xfs_acl { __be32 acl_cnt; struct xfs_acl_entry acl_entry[]; }; /* * The number of ACL entries allowed is defined by the on-disk format. * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is * limited only by the maximum size of the xattr that stores the information. */ #define XFS_ACL_MAX_ENTRIES(mp) \ (xfs_has_crc(mp) \ ? (XFS_XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \ sizeof(struct xfs_acl_entry) \ : 25) #define XFS_ACL_SIZE(cnt) \ (sizeof(struct xfs_acl) + \ sizeof(struct xfs_acl_entry) * cnt) #define XFS_ACL_MAX_SIZE(mp) \ XFS_ACL_SIZE(XFS_ACL_MAX_ENTRIES((mp))) /* On-disk XFS extended attribute names */ #define SGI_ACL_FILE "SGI_ACL_FILE" #define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT" #define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1) #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) #endif /* __XFS_FORMAT_H__ */ |
2 35 2 33 22 22 22 1 1 21 1 21 22 1 21 22 22 22 22 22 22 22 2 24 2 33 33 33 22 22 22 22 21 22 22 22 22 22 22 22 1 21 1 1 21 22 22 22 2 21 22 22 22 11 11 10 1 10 11 5 4 6 5 6 2 11 11 11 11 11 11 10 1 10 1 11 5 6 3 1 8 1 7 4 6 4 4 8 4 8 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/proc/array.c * * Copyright (C) 1992 by Linus Torvalds * based on ideas by Darren Senn * * Fixes: * Michael. K. Johnson: stat,statm extensions. * <johnsonm@stolaf.edu> * * Pauline Middelink : Made cmdline,envline only break at '\0's, to * make sure SET_PROCTITLE works. Also removed * bad '!' which forced address recalculation for * EVERY character on the current page. * <middelin@polyware.iaf.nl> * * Danny ter Haar : added cpuinfo * <dth@cistron.nl> * * Alessandro Rubini : profile extension. * <rubini@ipvvis.unipv.it> * * Jeff Tranter : added BogoMips field to cpuinfo * <Jeff_Tranter@Mitel.COM> * * Bruno Haible : remove 4K limit for the maps file * <haible@ma2s2.mathematik.uni-karlsruhe.de> * * Yves Arrouye : remove removal of trailing spaces in get_array. * <Yves.Arrouye@marin.fdn.fr> * * Jerome Forissier : added per-CPU time information to /proc/stat * and /proc/<pid>/cpu extension * <forissier@isia.cma.fr> * - Incorporation and non-SMP safe operation * of forissier patch in 2.1.78 by * Hans Marcus <crowbar@concepts.nl> * * aeb@cwi.nl : /proc/partitions * * * Alan Cox : security fixes. * <alan@lxorguk.ukuu.org.uk> * * Al Viro : safe handling of mm_struct * * Gerhard Wichert : added BIGMEM support * Siemens AG <Gerhard.Wichert@pdb.siemens.de> * * Al Viro & Jeff Garzik : moved most of the thing into base.c and * : proc_misc.c. The rest may eventually go into * : base.c too. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/time.h> #include <linux/time_namespace.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/mman.h> #include <linux/sched/mm.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/task_stack.h> #include <linux/sched/task.h> #include <linux/sched/cputime.h> #include <linux/proc_fs.h> #include <linux/ioport.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/signal.h> #include <linux/highmem.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/times.h> #include <linux/cpuset.h> #include <linux/rcupdate.h> #include <linux/delayacct.h> #include <linux/seq_file.h> #include <linux/pid_namespace.h> #include <linux/prctl.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/string_helpers.h> #include <linux/user_namespace.h> #include <linux/fs_struct.h> #include <asm/processor.h> #include "internal.h" void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape) { char tcomm[64]; if (p->flags & PF_WQ_WORKER) wq_worker_comm(tcomm, sizeof(tcomm), p); else __get_task_comm(tcomm, sizeof(tcomm), p); if (escape) seq_escape_str(m, tcomm, ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); else seq_printf(m, "%.64s", tcomm); } /* * The task state array is a strange "bitmap" of * reasons to sleep. Thus "running" is zero, and * you can test for combinations of others with * simple bit tests. */ static const char * const task_state_array[] = { /* states in TASK_REPORT: */ "R (running)", /* 0x00 */ "S (sleeping)", /* 0x01 */ "D (disk sleep)", /* 0x02 */ "T (stopped)", /* 0x04 */ "t (tracing stop)", /* 0x08 */ "X (dead)", /* 0x10 */ "Z (zombie)", /* 0x20 */ "P (parked)", /* 0x40 */ /* states beyond TASK_REPORT: */ "I (idle)", /* 0x80 */ }; static inline const char *get_task_state(struct task_struct *tsk) { BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != ARRAY_SIZE(task_state_array)); return task_state_array[task_state_index(tsk)]; } static inline void task_state(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *p) { struct user_namespace *user_ns = seq_user_ns(m); struct group_info *group_info; int g, umask = -1; struct task_struct *tracer; const struct cred *cred; pid_t ppid, tpid = 0, tgid, ngid; unsigned int max_fds = 0; rcu_read_lock(); ppid = pid_alive(p) ? task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; tracer = ptrace_parent(p); if (tracer) tpid = task_pid_nr_ns(tracer, ns); tgid = task_tgid_nr_ns(p, ns); ngid = task_numa_group_id(p); cred = get_task_cred(p); task_lock(p); if (p->fs) umask = p->fs->umask; if (p->files) max_fds = files_fdtable(p->files)->max_fds; task_unlock(p); rcu_read_unlock(); if (umask >= 0) seq_printf(m, "Umask:\t%#04o\n", umask); seq_puts(m, "State:\t"); seq_puts(m, get_task_state(p)); seq_put_decimal_ull(m, "\nTgid:\t", tgid); seq_put_decimal_ull(m, "\nNgid:\t", ngid); seq_put_decimal_ull(m, "\nPid:\t", pid_nr_ns(pid, ns)); seq_put_decimal_ull(m, "\nPPid:\t", ppid); seq_put_decimal_ull(m, "\nTracerPid:\t", tpid); seq_put_decimal_ull(m, "\nUid:\t", from_kuid_munged(user_ns, cred->uid)); seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->euid)); seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->suid)); seq_put_decimal_ull(m, "\t", from_kuid_munged(user_ns, cred->fsuid)); seq_put_decimal_ull(m, "\nGid:\t", from_kgid_munged(user_ns, cred->gid)); seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->egid)); seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->sgid)); seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->fsgid)); seq_put_decimal_ull(m, "\nFDSize:\t", max_fds); seq_puts(m, "\nGroups:\t"); group_info = cred->group_info; for (g = 0; g < group_info->ngroups; g++) seq_put_decimal_ull(m, g ? " " : "", from_kgid_munged(user_ns, group_info->gid[g])); put_cred(cred); /* Trailing space shouldn't have been added in the first place. */ seq_putc(m, ' '); #ifdef CONFIG_PID_NS seq_puts(m, "\nNStgid:"); for (g = ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_tgid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpid:"); for (g = ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_pid_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSpgid:"); for (g = ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_pgrp_nr_ns(p, pid->numbers[g].ns)); seq_puts(m, "\nNSsid:"); for (g = ns->level; g <= pid->level; g++) seq_put_decimal_ull(m, "\t", task_session_nr_ns(p, pid->numbers[g].ns)); #endif seq_putc(m, '\n'); } void render_sigset_t(struct seq_file *m, const char *header, sigset_t *set) { int i; seq_puts(m, header); i = _NSIG; do { int x = 0; i -= 4; if (sigismember(set, i+1)) x |= 1; if (sigismember(set, i+2)) x |= 2; if (sigismember(set, i+3)) x |= 4; if (sigismember(set, i+4)) x |= 8; seq_putc(m, hex_asc[x]); } while (i >= 4); seq_putc(m, '\n'); } static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *sigign, sigset_t *sigcatch) { struct k_sigaction *k; int i; k = p->sighand->action; for (i = 1; i <= _NSIG; ++i, ++k) { if (k->sa.sa_handler == SIG_IGN) sigaddset(sigign, i); else if (k->sa.sa_handler != SIG_DFL) sigaddset(sigcatch, i); } } static inline void task_sig(struct seq_file *m, struct task_struct *p) { unsigned long flags; sigset_t pending, shpending, blocked, ignored, caught; int num_threads = 0; unsigned int qsize = 0; unsigned long qlim = 0; sigemptyset(&pending); sigemptyset(&shpending); sigemptyset(&blocked); sigemptyset(&ignored); sigemptyset(&caught); if (lock_task_sighand(p, &flags)) { pending = p->pending.signal; shpending = p->signal->shared_pending.signal; blocked = p->blocked; collect_sigign_sigcatch(p, &ignored, &caught); num_threads = get_nr_threads(p); rcu_read_lock(); /* FIXME: is this correct? */ qsize = get_ucounts_value(task_ucounts(p), UCOUNT_RLIMIT_SIGPENDING); rcu_read_unlock(); qlim = task_rlimit(p, RLIMIT_SIGPENDING); unlock_task_sighand(p, &flags); } seq_put_decimal_ull(m, "Threads:\t", num_threads); seq_put_decimal_ull(m, "\nSigQ:\t", qsize); seq_put_decimal_ull(m, "/", qlim); /* render them all */ render_sigset_t(m, "\nSigPnd:\t", &pending); render_sigset_t(m, "ShdPnd:\t", &shpending); render_sigset_t(m, "SigBlk:\t", &blocked); render_sigset_t(m, "SigIgn:\t", &ignored); render_sigset_t(m, "SigCgt:\t", &caught); } static void render_cap_t(struct seq_file *m, const char *header, kernel_cap_t *a) { unsigned __capi; seq_puts(m, header); CAP_FOR_EACH_U32(__capi) { seq_put_hex_ll(m, NULL, a->cap[CAP_LAST_U32 - __capi], 8); } seq_putc(m, '\n'); } static inline void task_cap(struct seq_file *m, struct task_struct *p) { const struct cred *cred; kernel_cap_t cap_inheritable, cap_permitted, cap_effective, cap_bset, cap_ambient; rcu_read_lock(); cred = __task_cred(p); cap_inheritable = cred->cap_inheritable; cap_permitted = cred->cap_permitted; cap_effective = cred->cap_effective; cap_bset = cred->cap_bset; cap_ambient = cred->cap_ambient; rcu_read_unlock(); render_cap_t(m, "CapInh:\t", &cap_inheritable); render_cap_t(m, "CapPrm:\t", &cap_permitted); render_cap_t(m, "CapEff:\t", &cap_effective); render_cap_t(m, "CapBnd:\t", &cap_bset); render_cap_t(m, "CapAmb:\t", &cap_ambient); } static inline void task_seccomp(struct seq_file *m, struct task_struct *p) { seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p)); #ifdef CONFIG_SECCOMP seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); #ifdef CONFIG_SECCOMP_FILTER seq_put_decimal_ull(m, "\nSeccomp_filters:\t", atomic_read(&p->seccomp.filter_count)); #endif #endif seq_puts(m, "\nSpeculation_Store_Bypass:\t"); switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { case -EINVAL: seq_puts(m, "unknown"); break; case PR_SPEC_NOT_AFFECTED: seq_puts(m, "not vulnerable"); break; case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: seq_puts(m, "thread force mitigated"); break; case PR_SPEC_PRCTL | PR_SPEC_DISABLE: seq_puts(m, "thread mitigated"); break; case PR_SPEC_PRCTL | PR_SPEC_ENABLE: seq_puts(m, "thread vulnerable"); break; case PR_SPEC_DISABLE: seq_puts(m, "globally mitigated"); break; default: seq_puts(m, "vulnerable"); break; } seq_puts(m, "\nSpeculationIndirectBranch:\t"); switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_INDIRECT_BRANCH)) { case -EINVAL: seq_puts(m, "unsupported"); break; case PR_SPEC_NOT_AFFECTED: seq_puts(m, "not affected"); break; case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE: seq_puts(m, "conditional force disabled"); break; case PR_SPEC_PRCTL | PR_SPEC_DISABLE: seq_puts(m, "conditional disabled"); break; case PR_SPEC_PRCTL | PR_SPEC_ENABLE: seq_puts(m, "conditional enabled"); break; case PR_SPEC_ENABLE: seq_puts(m, "always enabled"); break; case PR_SPEC_DISABLE: seq_puts(m, "always disabled"); break; default: seq_puts(m, "unknown"); break; } seq_putc(m, '\n'); } static inline void task_context_switch_counts(struct seq_file *m, struct task_struct *p) { seq_put_decimal_ull(m, "voluntary_ctxt_switches:\t", p->nvcsw); seq_put_decimal_ull(m, "\nnonvoluntary_ctxt_switches:\t", p->nivcsw); seq_putc(m, '\n'); } static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) { seq_printf(m, "Cpus_allowed:\t%*pb\n", cpumask_pr_args(&task->cpus_mask)); seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", cpumask_pr_args(&task->cpus_mask)); } static inline void task_core_dumping(struct seq_file *m, struct mm_struct *mm) { seq_put_decimal_ull(m, "CoreDumping:\t", !!mm->core_state); seq_putc(m, '\n'); } static inline void task_thp_status(struct seq_file *m, struct mm_struct *mm) { bool thp_enabled = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE); if (thp_enabled) thp_enabled = !test_bit(MMF_DISABLE_THP, &mm->flags); seq_printf(m, "THP_enabled:\t%d\n", thp_enabled); } int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm = get_task_mm(task); seq_puts(m, "Name:\t"); proc_task_name(m, task, true); seq_putc(m, '\n'); task_state(m, ns, pid, task); if (mm) { task_mem(m, mm); task_core_dumping(m, mm); task_thp_status(m, mm); mmput(mm); } task_sig(m, task); task_cap(m, task); task_seccomp(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); task_context_switch_counts(m, task); return 0; } static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task, int whole) { unsigned long vsize, eip, esp, wchan = 0; int priority, nice; int tty_pgrp = -1, tty_nr = 0; sigset_t sigign, sigcatch; char state; pid_t ppid = 0, pgid = -1, sid = -1; int num_threads = 0; int permitted; struct mm_struct *mm; unsigned long long start_time; unsigned long cmin_flt = 0, cmaj_flt = 0; unsigned long min_flt = 0, maj_flt = 0; u64 cutime, cstime, utime, stime; u64 cgtime, gtime; unsigned long rsslim = 0; unsigned long flags; state = *get_task_state(task); vsize = eip = esp = 0; permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT); mm = get_task_mm(task); if (mm) { vsize = task_vsize(mm); /* * esp and eip are intentionally zeroed out. There is no * non-racy way to read them without freezing the task. * Programs that need reliable values can use ptrace(2). * * The only exception is if the task is core dumping because * a program is not able to use ptrace(2) in that case. It is * safe because the task has stopped executing permanently. */ if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) { if (try_get_task_stack(task)) { eip = KSTK_EIP(task); esp = KSTK_ESP(task); put_task_stack(task); } } } sigemptyset(&sigign); sigemptyset(&sigcatch); cutime = cstime = 0; cgtime = gtime = 0; if (lock_task_sighand(task, &flags)) { struct signal_struct *sig = task->signal; if (sig->tty) { struct pid *pgrp = tty_get_pgrp(sig->tty); tty_pgrp = pid_nr_ns(pgrp, ns); put_pid(pgrp); tty_nr = new_encode_dev(tty_devnum(sig->tty)); } num_threads = get_nr_threads(task); collect_sigign_sigcatch(task, &sigign, &sigcatch); cmin_flt = sig->cmin_flt; cmaj_flt = sig->cmaj_flt; cutime = sig->cutime; cstime = sig->cstime; cgtime = sig->cgtime; rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur); /* add up live thread stats at the group level */ if (whole) { struct task_struct *t = task; do { min_flt += t->min_flt; maj_flt += t->maj_flt; gtime += task_gtime(t); } while_each_thread(task, t); min_flt += sig->min_flt; maj_flt += sig->maj_flt; gtime += sig->gtime; } sid = task_session_nr_ns(task, ns); ppid = task_tgid_nr_ns(task->real_parent, ns); pgid = task_pgrp_nr_ns(task, ns); unlock_task_sighand(task, &flags); } if (permitted && (!whole || num_threads < 2)) wchan = !task_is_running(task); if (whole) { thread_group_cputime_adjusted(task, &utime, &stime); } else { task_cputime_adjusted(task, &utime, &stime); min_flt = task->min_flt; maj_flt = task->maj_flt; gtime = task_gtime(task); } /* scale priority and nice values from timeslices to -20..20 */ /* to make it look like a "normal" Unix priority/nice value */ priority = task_prio(task); nice = task_nice(task); /* apply timens offset for boottime and convert nsec -> ticks */ start_time = nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime)); seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); seq_puts(m, " ("); proc_task_name(m, task, false); seq_puts(m, ") "); seq_putc(m, state); seq_put_decimal_ll(m, " ", ppid); seq_put_decimal_ll(m, " ", pgid); seq_put_decimal_ll(m, " ", sid); seq_put_decimal_ll(m, " ", tty_nr); seq_put_decimal_ll(m, " ", tty_pgrp); seq_put_decimal_ull(m, " ", task->flags); seq_put_decimal_ull(m, " ", min_flt); seq_put_decimal_ull(m, " ", cmin_flt); seq_put_decimal_ull(m, " ", maj_flt); seq_put_decimal_ull(m, " ", cmaj_flt); seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime)); seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime)); seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime)); seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime)); seq_put_decimal_ll(m, " ", priority); seq_put_decimal_ll(m, " ", nice); seq_put_decimal_ll(m, " ", num_threads); seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ull(m, " ", start_time); seq_put_decimal_ull(m, " ", vsize); seq_put_decimal_ull(m, " ", mm ? get_mm_rss(mm) : 0); seq_put_decimal_ull(m, " ", rsslim); seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->start_code : 1) : 0); seq_put_decimal_ull(m, " ", mm ? (permitted ? mm->end_code : 1) : 0); seq_put_decimal_ull(m, " ", (permitted && mm) ? mm->start_stack : 0); seq_put_decimal_ull(m, " ", esp); seq_put_decimal_ull(m, " ", eip); /* The signal information here is obsolete. * It must be decimal for Linux 2.0 compatibility. * Use /proc/#/status for real-time signals. */ seq_put_decimal_ull(m, " ", task->pending.signal.sig[0] & 0x7fffffffUL); seq_put_decimal_ull(m, " ", task->blocked.sig[0] & 0x7fffffffUL); seq_put_decimal_ull(m, " ", sigign.sig[0] & 0x7fffffffUL); seq_put_decimal_ull(m, " ", sigcatch.sig[0] & 0x7fffffffUL); /* * We used to output the absolute kernel address, but that's an * information leak - so instead we show a 0/1 flag here, to signal * to user-space whether there's a wchan field in /proc/PID/wchan. * * This works with older implementations of procps as well. */ seq_put_decimal_ull(m, " ", wchan); seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ll(m, " ", task->exit_signal); seq_put_decimal_ll(m, " ", task_cpu(task)); seq_put_decimal_ull(m, " ", task->rt_priority); seq_put_decimal_ull(m, " ", task->policy); seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task)); seq_put_decimal_ull(m, " ", nsec_to_clock_t(gtime)); seq_put_decimal_ll(m, " ", nsec_to_clock_t(cgtime)); if (mm && permitted) { seq_put_decimal_ull(m, " ", mm->start_data); seq_put_decimal_ull(m, " ", mm->end_data); seq_put_decimal_ull(m, " ", mm->start_brk); seq_put_decimal_ull(m, " ", mm->arg_start); seq_put_decimal_ull(m, " ", mm->arg_end); seq_put_decimal_ull(m, " ", mm->env_start); seq_put_decimal_ull(m, " ", mm->env_end); } else seq_puts(m, " 0 0 0 0 0 0 0"); if (permitted) seq_put_decimal_ll(m, " ", task->exit_code); else seq_puts(m, " 0"); seq_putc(m, '\n'); if (mm) mmput(mm); return 0; } int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { return do_task_stat(m, ns, pid, task, 0); } int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { return do_task_stat(m, ns, pid, task, 1); } int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm = get_task_mm(task); if (mm) { unsigned long size; unsigned long resident = 0; unsigned long shared = 0; unsigned long text = 0; unsigned long data = 0; size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); /* * For quick read, open code by putting numbers directly * expected format is * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", * size, resident, shared, text, data); */ seq_put_decimal_ull(m, "", size); seq_put_decimal_ull(m, " ", resident); seq_put_decimal_ull(m, " ", shared); seq_put_decimal_ull(m, " ", text); seq_put_decimal_ull(m, " ", 0); seq_put_decimal_ull(m, " ", data); seq_put_decimal_ull(m, " ", 0); seq_putc(m, '\n'); } else { seq_write(m, "0 0 0 0 0 0 0\n", 14); } return 0; } #ifdef CONFIG_PROC_CHILDREN static struct pid * get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos) { struct task_struct *start, *task; struct pid *pid = NULL; read_lock(&tasklist_lock); start = pid_task(proc_pid(inode), PIDTYPE_PID); if (!start) goto out; /* * Lets try to continue searching first, this gives * us significant speedup on children-rich processes. */ if (pid_prev) { task = pid_task(pid_prev, PIDTYPE_PID); if (task && task->real_parent == start && !(list_empty(&task->sibling))) { if (list_is_last(&task->sibling, &start->children)) goto out; task = list_first_entry(&task->sibling, struct task_struct, sibling); pid = get_pid(task_pid(task)); goto out; } } /* * Slow search case. * * We might miss some children here if children * are exited while we were not holding the lock, * but it was never promised to be accurate that * much. * * "Just suppose that the parent sleeps, but N children * exit after we printed their tids. Now the slow paths * skips N extra children, we miss N tasks." (c) * * So one need to stop or freeze the leader and all * its children to get a precise result. */ list_for_each_entry(task, &start->children, sibling) { if (pos-- == 0) { pid = get_pid(task_pid(task)); break; } } out: read_unlock(&tasklist_lock); return pid; } static int children_seq_show(struct seq_file *seq, void *v) { struct inode *inode = file_inode(seq->file); seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode->i_sb))); return 0; } static void *children_seq_start(struct seq_file *seq, loff_t *pos) { return get_children_pid(file_inode(seq->file), NULL, *pos); } static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct pid *pid; pid = get_children_pid(file_inode(seq->file), v, *pos + 1); put_pid(v); ++*pos; return pid; } static void children_seq_stop(struct seq_file *seq, void *v) { put_pid(v); } static const struct seq_operations children_seq_ops = { .start = children_seq_start, .next = children_seq_next, .stop = children_seq_stop, .show = children_seq_show, }; static int children_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &children_seq_ops); } const struct file_operations proc_tid_children_operations = { .open = children_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #endif /* CONFIG_PROC_CHILDREN */ |
76 82 79 11 42 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IPV6_FRAG_H #define _IPV6_FRAG_H #include <linux/kernel.h> #include <net/addrconf.h> #include <net/ipv6.h> #include <net/inet_frag.h> enum ip6_defrag_users { IP6_DEFRAG_LOCAL_DELIVER, IP6_DEFRAG_CONNTRACK_IN, __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, IP6_DEFRAG_CONNTRACK_OUT, __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, IP6_DEFRAG_CONNTRACK_BRIDGE_IN, __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, }; /* * Equivalent of ipv4 struct ip */ struct frag_queue { struct inet_frag_queue q; int iif; __u16 nhoffset; u8 ecn; }; #if IS_ENABLED(CONFIG_IPV6) static inline void ip6frag_init(struct inet_frag_queue *q, const void *a) { struct frag_queue *fq = container_of(q, struct frag_queue, q); const struct frag_v6_compare_key *key = a; q->key.v6 = *key; fq->ecn = 0; } static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed) { return jhash2(data, sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); } static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed) { const struct inet_frag_queue *fq = data; return jhash2((const u32 *)&fq->key.v6, sizeof(struct frag_v6_compare_key) / sizeof(u32), seed); } static inline int ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) { const struct frag_v6_compare_key *key = arg->key; const struct inet_frag_queue *fq = ptr; return !!memcmp(&fq->key, key, sizeof(*key)); } static inline void ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) { struct net_device *dev = NULL; struct sk_buff *head; rcu_read_lock(); /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ if (READ_ONCE(fq->q.fqdir->dead)) goto out_rcu_unlock; spin_lock(&fq->q.lock); if (fq->q.flags & INET_FRAG_COMPLETE) goto out; inet_frag_kill(&fq->q); dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) goto out; __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); /* Don't send error if the first segment did not arrive. */ if (!(fq->q.flags & INET_FRAG_FIRST_IN)) goto out; /* sk_buff::dev and sk_buff::rbnode are unionized. So we * pull the head out of the tree in order to be able to * deal with head->dev. */ head = inet_frag_pull_head(&fq->q); if (!head) goto out; head->dev = dev; spin_unlock(&fq->q.lock); icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); kfree_skb(head); goto out_rcu_unlock; out: spin_unlock(&fq->q.lock); out_rcu_unlock: rcu_read_unlock(); inet_frag_put(&fq->q); } /* Check if the upper layer header is truncated in the first fragment. */ static inline bool ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp) { u8 nexthdr = *nexthdrp; __be16 frag_off; int offset; offset = ipv6_skip_exthdr(skb, start, &nexthdr, &frag_off); if (offset < 0 || (frag_off & htons(IP6_OFFSET))) return false; switch (nexthdr) { case NEXTHDR_TCP: offset += sizeof(struct tcphdr); break; case NEXTHDR_UDP: offset += sizeof(struct udphdr); break; case NEXTHDR_ICMP: offset += sizeof(struct icmp6hdr); break; default: offset += 1; } if (offset > skb->len) return true; return false; } #endif #endif |
209 291 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 | /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_CORE_H #define __SOUND_CORE_H /* * Main header file for the ALSA driver * Copyright (c) 1994-2001 by Jaroslav Kysela <perex@perex.cz> */ #include <linux/device.h> #include <linux/sched.h> /* wake_up() */ #include <linux/mutex.h> /* struct mutex */ #include <linux/rwsem.h> /* struct rw_semaphore */ #include <linux/pm.h> /* pm_message_t */ #include <linux/stringify.h> #include <linux/printk.h> /* number of supported soundcards */ #ifdef CONFIG_SND_DYNAMIC_MINORS #define SNDRV_CARDS CONFIG_SND_MAX_CARDS #else #define SNDRV_CARDS 8 /* don't change - minor numbers */ #endif #define CONFIG_SND_MAJOR 116 /* standard configuration */ /* forward declarations */ struct pci_dev; struct module; struct completion; /* device allocation stuff */ /* type of the object used in snd_device_*() * this also defines the calling order */ enum snd_device_type { SNDRV_DEV_LOWLEVEL, SNDRV_DEV_INFO, SNDRV_DEV_BUS, SNDRV_DEV_CODEC, SNDRV_DEV_PCM, SNDRV_DEV_COMPRESS, SNDRV_DEV_RAWMIDI, SNDRV_DEV_TIMER, SNDRV_DEV_SEQUENCER, SNDRV_DEV_HWDEP, SNDRV_DEV_JACK, SNDRV_DEV_CONTROL, /* NOTE: this must be the last one */ }; enum snd_device_state { SNDRV_DEV_BUILD, SNDRV_DEV_REGISTERED, SNDRV_DEV_DISCONNECTED, }; struct snd_device; struct snd_device_ops { int (*dev_free)(struct snd_device *dev); int (*dev_register)(struct snd_device *dev); int (*dev_disconnect)(struct snd_device *dev); }; struct snd_device { struct list_head list; /* list of registered devices */ struct snd_card *card; /* card which holds this device */ enum snd_device_state state; /* state of the device */ enum snd_device_type type; /* device type */ void *device_data; /* device structure */ const struct snd_device_ops *ops; /* operations */ }; #define snd_device(n) list_entry(n, struct snd_device, list) /* main structure for soundcard */ struct snd_card { int number; /* number of soundcard (index to snd_cards) */ char id[16]; /* id string of this card */ char driver[16]; /* driver name */ char shortname[32]; /* short name of this soundcard */ char longname[80]; /* name of this soundcard */ char irq_descr[32]; /* Interrupt description */ char mixername[80]; /* mixer name */ char components[128]; /* card components delimited with space */ struct module *module; /* top-level module */ void *private_data; /* private data for soundcard */ void (*private_free) (struct snd_card *card); /* callback for freeing of private data */ struct list_head devices; /* devices */ struct device ctl_dev; /* control device */ unsigned int last_numid; /* last used numeric ID */ struct rw_semaphore controls_rwsem; /* controls list lock */ rwlock_t ctl_files_rwlock; /* ctl_files list lock */ int controls_count; /* count of all controls */ size_t user_ctl_alloc_size; // current memory allocation by user controls. struct list_head controls; /* all controls for this card */ struct list_head ctl_files; /* active control files */ struct snd_info_entry *proc_root; /* root for soundcard specific files */ struct proc_dir_entry *proc_root_link; /* number link to real id */ struct list_head files_list; /* all files associated to this card */ struct snd_shutdown_f_ops *s_f_ops; /* file operations in the shutdown state */ spinlock_t files_lock; /* lock the files for this card */ int shutdown; /* this card is going down */ struct completion *release_completion; struct device *dev; /* device assigned to this card */ struct device card_dev; /* cardX object for sysfs */ const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */ bool registered; /* card_dev is registered? */ bool managed; /* managed via devres */ bool releasing; /* during card free process */ int sync_irq; /* assigned irq, used for PCM sync */ wait_queue_head_t remove_sleep; size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */ struct mutex memory_mutex; /* protection for the above */ #ifdef CONFIG_SND_DEBUG struct dentry *debugfs_root; /* debugfs root for card */ #endif #ifdef CONFIG_PM unsigned int power_state; /* power state */ atomic_t power_ref; wait_queue_head_t power_sleep; wait_queue_head_t power_ref_sleep; #endif #if IS_ENABLED(CONFIG_SND_MIXER_OSS) struct snd_mixer_oss *mixer_oss; int mixer_oss_change_count; #endif }; #define dev_to_snd_card(p) container_of(p, struct snd_card, card_dev) #ifdef CONFIG_PM static inline unsigned int snd_power_get_state(struct snd_card *card) { return READ_ONCE(card->power_state); } static inline void snd_power_change_state(struct snd_card *card, unsigned int state) { WRITE_ONCE(card->power_state, state); wake_up(&card->power_sleep); } /** * snd_power_ref - Take the reference count for power control * @card: sound card object * * The power_ref reference of the card is used for managing to block * the snd_power_sync_ref() operation. This function increments the reference. * The counterpart snd_power_unref() has to be called appropriately later. */ static inline void snd_power_ref(struct snd_card *card) { atomic_inc(&card->power_ref); } /** * snd_power_unref - Release the reference count for power control * @card: sound card object */ static inline void snd_power_unref(struct snd_card *card) { if (atomic_dec_and_test(&card->power_ref)) wake_up(&card->power_ref_sleep); } /** * snd_power_sync_ref - wait until the card power_ref is freed * @card: sound card object * * This function is used to synchronize with the pending power_ref being * released. */ static inline void snd_power_sync_ref(struct snd_card *card) { wait_event(card->power_ref_sleep, !atomic_read(&card->power_ref)); } /* init.c */ int snd_power_wait(struct snd_card *card); int snd_power_ref_and_wait(struct snd_card *card); #else /* ! CONFIG_PM */ static inline int snd_power_wait(struct snd_card *card) { return 0; } static inline void snd_power_ref(struct snd_card *card) {} static inline void snd_power_unref(struct snd_card *card) {} static inline int snd_power_ref_and_wait(struct snd_card *card) { return 0; } static inline void snd_power_sync_ref(struct snd_card *card) {} #define snd_power_get_state(card) ({ (void)(card); SNDRV_CTL_POWER_D0; }) #define snd_power_change_state(card, state) do { (void)(card); } while (0) #endif /* CONFIG_PM */ struct snd_minor { int type; /* SNDRV_DEVICE_TYPE_XXX */ int card; /* card number */ int device; /* device number */ const struct file_operations *f_ops; /* file operations */ void *private_data; /* private data for f_ops->open */ struct device *dev; /* device for sysfs */ struct snd_card *card_ptr; /* assigned card instance */ }; /* return a device pointer linked to each sound device as a parent */ static inline struct device *snd_card_get_device_link(struct snd_card *card) { return card ? &card->card_dev : NULL; } /* sound.c */ extern int snd_major; extern int snd_ecards_limit; extern struct class *sound_class; #ifdef CONFIG_SND_DEBUG extern struct dentry *sound_debugfs_root; #endif void snd_request_card(int card); void snd_device_initialize(struct device *dev, struct snd_card *card); int snd_register_device(int type, struct snd_card *card, int dev, const struct file_operations *f_ops, void *private_data, struct device *device); int snd_unregister_device(struct device *dev); void *snd_lookup_minor_data(unsigned int minor, int type); #ifdef CONFIG_SND_OSSEMUL int snd_register_oss_device(int type, struct snd_card *card, int dev, const struct file_operations *f_ops, void *private_data); int snd_unregister_oss_device(int type, struct snd_card *card, int dev); void *snd_lookup_oss_minor_data(unsigned int minor, int type); #endif int snd_minor_info_init(void); /* sound_oss.c */ #ifdef CONFIG_SND_OSSEMUL int snd_minor_info_oss_init(void); #else static inline int snd_minor_info_oss_init(void) { return 0; } #endif /* memory.c */ int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size_t count); int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size_t count); /* init.c */ int snd_card_locked(int card); #if IS_ENABLED(CONFIG_SND_MIXER_OSS) #define SND_MIXER_OSS_NOTIFY_REGISTER 0 #define SND_MIXER_OSS_NOTIFY_DISCONNECT 1 #define SND_MIXER_OSS_NOTIFY_FREE 2 extern int (*snd_mixer_oss_notify_callback)(struct snd_card *card, int cmd); #endif int snd_card_new(struct device *parent, int idx, const char *xid, struct module *module, int extra_size, struct snd_card **card_ret); int snd_devm_card_new(struct device *parent, int idx, const char *xid, struct module *module, size_t extra_size, struct snd_card **card_ret); int snd_card_disconnect(struct snd_card *card); void snd_card_disconnect_sync(struct snd_card *card); int snd_card_free(struct snd_card *card); int snd_card_free_when_closed(struct snd_card *card); int snd_card_free_on_error(struct device *dev, int ret); void snd_card_set_id(struct snd_card *card, const char *id); int snd_card_register(struct snd_card *card); int snd_card_info_init(void); int snd_card_add_dev_attr(struct snd_card *card, const struct attribute_group *group); int snd_component_add(struct snd_card *card, const char *component); int snd_card_file_add(struct snd_card *card, struct file *file); int snd_card_file_remove(struct snd_card *card, struct file *file); struct snd_card *snd_card_ref(int card); /** * snd_card_unref - Unreference the card object * @card: the card object to unreference * * Call this function for the card object that was obtained via snd_card_ref() * or snd_lookup_minor_data(). */ static inline void snd_card_unref(struct snd_card *card) { put_device(&card->card_dev); } #define snd_card_set_dev(card, devptr) ((card)->dev = (devptr)) /* device.c */ int snd_device_new(struct snd_card *card, enum snd_device_type type, void *device_data, const struct snd_device_ops *ops); int snd_device_register(struct snd_card *card, void *device_data); int snd_device_register_all(struct snd_card *card); void snd_device_disconnect(struct snd_card *card, void *device_data); void snd_device_disconnect_all(struct snd_card *card); void snd_device_free(struct snd_card *card, void *device_data); void snd_device_free_all(struct snd_card *card); int snd_device_get_state(struct snd_card *card, void *device_data); /* isadma.c */ #ifdef CONFIG_ISA_DMA_API #define DMA_MODE_NO_ENABLE 0x0100 void snd_dma_program(unsigned long dma, unsigned long addr, unsigned int size, unsigned short mode); void snd_dma_disable(unsigned long dma); unsigned int snd_dma_pointer(unsigned long dma, unsigned int size); int snd_devm_request_dma(struct device *dev, int dma, const char *name); #endif /* misc.c */ struct resource; void release_and_free_resource(struct resource *res); /* --- */ /* sound printk debug levels */ enum { SND_PR_ALWAYS, SND_PR_DEBUG, SND_PR_VERBOSE, }; #if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK) __printf(4, 5) void __snd_printk(unsigned int level, const char *file, int line, const char *format, ...); #else #define __snd_printk(level, file, line, format, ...) \ printk(format, ##__VA_ARGS__) #endif /** * snd_printk - printk wrapper * @fmt: format string * * Works like printk() but prints the file and the line of the caller * when configured with CONFIG_SND_VERBOSE_PRINTK. */ #define snd_printk(fmt, ...) \ __snd_printk(0, __FILE__, __LINE__, fmt, ##__VA_ARGS__) #ifdef CONFIG_SND_DEBUG /** * snd_printd - debug printk * @fmt: format string * * Works like snd_printk() for debugging purposes. * Ignored when CONFIG_SND_DEBUG is not set. */ #define snd_printd(fmt, ...) \ __snd_printk(1, __FILE__, __LINE__, fmt, ##__VA_ARGS__) #define _snd_printd(level, fmt, ...) \ __snd_printk(level, __FILE__, __LINE__, fmt, ##__VA_ARGS__) /** * snd_BUG - give a BUG warning message and stack trace * * Calls WARN() if CONFIG_SND_DEBUG is set. * Ignored when CONFIG_SND_DEBUG is not set. */ #define snd_BUG() WARN(1, "BUG?\n") /** * snd_printd_ratelimit - Suppress high rates of output when * CONFIG_SND_DEBUG is enabled. */ #define snd_printd_ratelimit() printk_ratelimit() /** * snd_BUG_ON - debugging check macro * @cond: condition to evaluate * * Has the same behavior as WARN_ON when CONFIG_SND_DEBUG is set, * otherwise just evaluates the conditional and returns the value. */ #define snd_BUG_ON(cond) WARN_ON((cond)) #else /* !CONFIG_SND_DEBUG */ __printf(1, 2) static inline void snd_printd(const char *format, ...) {} __printf(2, 3) static inline void _snd_printd(int level, const char *format, ...) {} #define snd_BUG() do { } while (0) #define snd_BUG_ON(condition) ({ \ int __ret_warn_on = !!(condition); \ unlikely(__ret_warn_on); \ }) static inline bool snd_printd_ratelimit(void) { return false; } #endif /* CONFIG_SND_DEBUG */ #ifdef CONFIG_SND_DEBUG_VERBOSE /** * snd_printdd - debug printk * @format: format string * * Works like snd_printk() for debugging purposes. * Ignored when CONFIG_SND_DEBUG_VERBOSE is not set. */ #define snd_printdd(format, ...) \ __snd_printk(2, __FILE__, __LINE__, format, ##__VA_ARGS__) #else __printf(1, 2) static inline void snd_printdd(const char *format, ...) {} #endif #define SNDRV_OSS_VERSION ((3<<16)|(8<<8)|(1<<4)|(0)) /* 3.8.1a */ /* for easier backward-porting */ #if IS_ENABLED(CONFIG_GAMEPORT) #define gameport_set_dev_parent(gp,xdev) ((gp)->dev.parent = (xdev)) #define gameport_set_port_data(gp,r) ((gp)->port_data = (r)) #define gameport_get_port_data(gp) (gp)->port_data #endif /* PCI quirk list helper */ struct snd_pci_quirk { unsigned short subvendor; /* PCI subvendor ID */ unsigned short subdevice; /* PCI subdevice ID */ unsigned short subdevice_mask; /* bitmask to match */ int value; /* value */ #ifdef CONFIG_SND_DEBUG_VERBOSE const char *name; /* name of the device (optional) */ #endif }; #define _SND_PCI_QUIRK_ID_MASK(vend, mask, dev) \ .subvendor = (vend), .subdevice = (dev), .subdevice_mask = (mask) #define _SND_PCI_QUIRK_ID(vend, dev) \ _SND_PCI_QUIRK_ID_MASK(vend, 0xffff, dev) #define SND_PCI_QUIRK_ID(vend,dev) {_SND_PCI_QUIRK_ID(vend, dev)} #ifdef CONFIG_SND_DEBUG_VERBOSE #define SND_PCI_QUIRK(vend,dev,xname,val) \ {_SND_PCI_QUIRK_ID(vend, dev), .value = (val), .name = (xname)} #define SND_PCI_QUIRK_VENDOR(vend, xname, val) \ {_SND_PCI_QUIRK_ID_MASK(vend, 0, 0), .value = (val), .name = (xname)} #define SND_PCI_QUIRK_MASK(vend, mask, dev, xname, val) \ {_SND_PCI_QUIRK_ID_MASK(vend, mask, dev), \ .value = (val), .name = (xname)} #define snd_pci_quirk_name(q) ((q)->name) #else #define SND_PCI_QUIRK(vend,dev,xname,val) \ {_SND_PCI_QUIRK_ID(vend, dev), .value = (val)} #define SND_PCI_QUIRK_MASK(vend, mask, dev, xname, val) \ {_SND_PCI_QUIRK_ID_MASK(vend, mask, dev), .value = (val)} #define SND_PCI_QUIRK_VENDOR(vend, xname, val) \ {_SND_PCI_QUIRK_ID_MASK(vend, 0, 0), .value = (val)} #define snd_pci_quirk_name(q) "" #endif #ifdef CONFIG_PCI const struct snd_pci_quirk * snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list); const struct snd_pci_quirk * snd_pci_quirk_lookup_id(u16 vendor, u16 device, const struct snd_pci_quirk *list); #else static inline const struct snd_pci_quirk * snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list) { return NULL; } static inline const struct snd_pci_quirk * snd_pci_quirk_lookup_id(u16 vendor, u16 device, const struct snd_pci_quirk *list) { return NULL; } #endif /* async signal helpers */ struct snd_fasync; int snd_fasync_helper(int fd, struct file *file, int on, struct snd_fasync **fasyncp); void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll); void snd_fasync_free(struct snd_fasync *fasync); #endif /* __SOUND_CORE_H */ |
25 2 2 2 20 66 23 2 45 45 6 4 2 31 3 28 22 28 21 4 10 17 24 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) International Business Machines Corp., 2000-2002 * Portions Copyright (C) Christoph Hellwig, 2001-2002 */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/posix_acl.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_inode.h" #include "jfs_dmap.h" #include "jfs_txnmgr.h" #include "jfs_xattr.h" #include "jfs_acl.h" #include "jfs_debug.h" int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int rc = 0; rc = file_write_and_wait_range(file, start, end); if (rc) return rc; inode_lock(inode); if (!(inode->i_state & I_DIRTY_ALL) || (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); inode_unlock(inode); return rc; } rc |= jfs_commit_inode(inode, 1); inode_unlock(inode); return rc ? -EIO : 0; } static int jfs_open(struct inode *inode, struct file *file) { int rc; if ((rc = dquot_file_open(inode, file))) return rc; /* * We attempt to allow only one "active" file open per aggregate * group. Otherwise, appending to files in parallel can cause * fragmentation within the files. * * If the file is empty, it was probably just created and going * to be written to. If it has a size, we'll hold off until the * file is actually grown. */ if (S_ISREG(inode->i_mode) && file->f_mode & FMODE_WRITE && (inode->i_size == 0)) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag == -1) { struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); atomic_inc(&jfs_sb->bmap->db_active[ji->active_ag]); } spin_unlock_irq(&ji->ag_lock); } return 0; } static int jfs_release(struct inode *inode, struct file *file) { struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag != -1) { struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap; atomic_dec(&bmap->db_active[ji->active_ag]); ji->active_ag = -1; } spin_unlock_irq(&ji->ag_lock); return 0; } int jfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, struct iattr *iattr) { struct inode *inode = d_inode(dentry); int rc; rc = setattr_prepare(&init_user_ns, dentry, iattr); if (rc) return rc; if (is_quota_modification(inode, iattr)) { rc = dquot_initialize(inode); if (rc) return rc; } if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { rc = dquot_transfer(inode, iattr); if (rc) return rc; } if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { inode_dio_wait(inode); rc = inode_newsize_ok(inode, iattr->ia_size); if (rc) return rc; truncate_setsize(inode, iattr->ia_size); jfs_truncate(inode); } setattr_copy(&init_user_ns, inode, iattr); mark_inode_dirty(inode); if (iattr->ia_valid & ATTR_MODE) rc = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); return rc; } const struct inode_operations jfs_file_inode_operations = { .listxattr = jfs_listxattr, .setattr = jfs_setattr, .fileattr_get = jfs_fileattr_get, .fileattr_set = jfs_fileattr_set, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, .set_acl = jfs_set_acl, #endif }; const struct file_operations jfs_file_operations = { .open = jfs_open, .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = generic_file_write_iter, .mmap = generic_file_mmap, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, .fsync = jfs_fsync, .release = jfs_release, .unlocked_ioctl = jfs_ioctl, .compat_ioctl = compat_ptr_ioctl, }; |
165 165 6 158 139 16 8 5 3 88 64 136 16 145 2 6 125 26 59 59 2 55 55 55 25 2 23 23 2 21 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | // SPDX-License-Identifier: GPL-2.0-or-later /* * GRE over IPv4 demultiplexer driver * * Authors: Dmitry Kozlov (xeb@mail.ru) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/if.h> #include <linux/icmp.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/if_tunnel.h> #include <linux/spinlock.h> #include <net/protocol.h> #include <net/gre.h> #include <net/erspan.h> #include <net/icmp.h> #include <net/route.h> #include <net/xfrm.h> static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; int gre_add_protocol(const struct gre_protocol *proto, u8 version) { if (version >= GREPROTO_MAX) return -EINVAL; return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? 0 : -EBUSY; } EXPORT_SYMBOL_GPL(gre_add_protocol); int gre_del_protocol(const struct gre_protocol *proto, u8 version) { int ret; if (version >= GREPROTO_MAX) return -EINVAL; ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? 0 : -EBUSY; if (ret) return ret; synchronize_rcu(); return 0; } EXPORT_SYMBOL_GPL(gre_del_protocol); /* Fills in tpi and returns header length to be pulled. * Note that caller must use pskb_may_pull() before pulling GRE header. */ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err, __be16 proto, int nhs) { const struct gre_base_hdr *greh; __be32 *options; int hdr_len; if (unlikely(!pskb_may_pull(skb, nhs + sizeof(struct gre_base_hdr)))) return -EINVAL; greh = (struct gre_base_hdr *)(skb->data + nhs); if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING))) return -EINVAL; tpi->flags = gre_flags_to_tnl_flags(greh->flags); hdr_len = gre_calc_hlen(tpi->flags); if (!pskb_may_pull(skb, nhs + hdr_len)) return -EINVAL; greh = (struct gre_base_hdr *)(skb->data + nhs); tpi->proto = greh->protocol; options = (__be32 *)(greh + 1); if (greh->flags & GRE_CSUM) { if (!skb_checksum_simple_validate(skb)) { skb_checksum_try_convert(skb, IPPROTO_GRE, null_compute_pseudo); } else if (csum_err) { *csum_err = true; return -EINVAL; } options++; } if (greh->flags & GRE_KEY) { tpi->key = *options; options++; } else { tpi->key = 0; } if (unlikely(greh->flags & GRE_SEQ)) { tpi->seq = *options; options++; } else { tpi->seq = 0; } /* WCCP version 1 and 2 protocol decoding. * - Change protocol to IPv4/IPv6 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) { u8 _val, *val; val = skb_header_pointer(skb, nhs + hdr_len, sizeof(_val), &_val); if (!val) return -EINVAL; tpi->proto = proto; if ((*val & 0xF0) != 0x40) hdr_len += 4; } tpi->hdr_len = hdr_len; /* ERSPAN ver 1 and 2 protocol sets GRE key field * to 0 and sets the configured key in the * inner erspan header field */ if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) || greh->protocol == htons(ETH_P_ERSPAN2)) { struct erspan_base_hdr *ershdr; if (!pskb_may_pull(skb, nhs + hdr_len + sizeof(*ershdr))) return -EINVAL; ershdr = (struct erspan_base_hdr *)(skb->data + nhs + hdr_len); tpi->key = cpu_to_be32(get_session_id(ershdr)); } return hdr_len; } EXPORT_SYMBOL(gre_parse_header); static int gre_rcv(struct sk_buff *skb) { const struct gre_protocol *proto; u8 ver; int ret; if (!pskb_may_pull(skb, 12)) goto drop; ver = skb->data[1]&0x7f; if (ver >= GREPROTO_MAX) goto drop; rcu_read_lock(); proto = rcu_dereference(gre_proto[ver]); if (!proto || !proto->handler) goto drop_unlock; ret = proto->handler(skb); rcu_read_unlock(); return ret; drop_unlock: rcu_read_unlock(); drop: kfree_skb(skb); return NET_RX_DROP; } static int gre_err(struct sk_buff *skb, u32 info) { const struct gre_protocol *proto; const struct iphdr *iph = (const struct iphdr *)skb->data; u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; int err = 0; if (ver >= GREPROTO_MAX) return -EINVAL; rcu_read_lock(); proto = rcu_dereference(gre_proto[ver]); if (proto && proto->err_handler) proto->err_handler(skb, info); else err = -EPROTONOSUPPORT; rcu_read_unlock(); return err; } static const struct net_protocol net_gre_protocol = { .handler = gre_rcv, .err_handler = gre_err, }; static int __init gre_init(void) { pr_info("GRE over IPv4 demultiplexor driver\n"); if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { pr_err("can't add protocol\n"); return -EAGAIN; } return 0; } static void __exit gre_exit(void) { inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); } module_init(gre_init); module_exit(gre_exit); MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); MODULE_LICENSE("GPL"); |
10 9 8 2 1857 1857 55 55 2 2 2 2 2 2 2 2 2 2 2 2 2 25 25 24 2 25 80 2 72 6 20 3 3 3 3 2 20 2 3 2 3 1 4 4 1 21 2 2 2 2 2 2 2 2 14 5 6 3 8 4 7 6 5 38 6 8 6 42 5 2 2 5 4 3 37 8 4 6 6 7 5 34 7 8 11 4 8 7 18 23 17 11 3 2 22 10 4 2 1 21 8 40 7 31 5 40 1 1 28 10 1 1 3 2 1 2 1 2 2 1 1 1 44 1 1 40 1 1 1 1 72 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface * * Copyright (c) 2019 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> #include <linux/rculist.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/skb.h> #include <linux/can/gw.h> #include <net/rtnetlink.h> #include <net/net_namespace.h> #include <net/sock.h> #define CAN_GW_NAME "can-gw" MODULE_DESCRIPTION("PF_CAN netlink gateway"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS(CAN_GW_NAME); #define CGW_MIN_HOPS 1 #define CGW_MAX_HOPS 6 #define CGW_DEFAULT_HOPS 1 static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS; module_param(max_hops, uint, 0444); MODULE_PARM_DESC(max_hops, "maximum " CAN_GW_NAME " routing hops for CAN frames " "(valid values: " __stringify(CGW_MIN_HOPS) "-" __stringify(CGW_MAX_HOPS) " hops, " "default: " __stringify(CGW_DEFAULT_HOPS) ")"); static struct notifier_block notifier; static struct kmem_cache *cgw_cache __read_mostly; /* structure that contains the (on-the-fly) CAN frame modifications */ struct cf_mod { struct { struct canfd_frame and; struct canfd_frame or; struct canfd_frame xor; struct canfd_frame set; } modframe; struct { u8 and; u8 or; u8 xor; u8 set; } modtype; void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf, struct cf_mod *mod); /* CAN frame checksum calculation after CAN frame modifications */ struct { struct cgw_csum_xor xor; struct cgw_csum_crc8 crc8; } csum; struct { void (*xor)(struct canfd_frame *cf, struct cgw_csum_xor *xor); void (*crc8)(struct canfd_frame *cf, struct cgw_csum_crc8 *crc8); } csumfunc; u32 uid; }; /* So far we just support CAN -> CAN routing and frame modifications. * * The internal can_can_gw structure contains data and attributes for * a CAN -> CAN gateway job. */ struct can_can_gw { struct can_filter filter; int src_idx; int dst_idx; }; /* list entry for CAN gateways jobs */ struct cgw_job { struct hlist_node list; struct rcu_head rcu; u32 handled_frames; u32 dropped_frames; u32 deleted_frames; struct cf_mod mod; union { /* CAN frame data source */ struct net_device *dev; } src; union { /* CAN frame data destination */ struct net_device *dev; } dst; union { struct can_can_gw ccgw; /* tbc */ }; u8 gwtype; u8 limit_hops; u16 flags; }; /* modification functions that are invoked in the hot path in can_can_gw_rcv */ #define MODFUNC(func, op) static void func(struct canfd_frame *cf, \ struct cf_mod *mod) { op ; } MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id) MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len) MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags) MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data) MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id) MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len) MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags) MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data) MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id) MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len) MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags) MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data) MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id) MODFUNC(mod_set_len, cf->len = mod->modframe.set.len) MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags) MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data) static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod) { int i; for (i = 0; i < CANFD_MAX_DLEN; i += 8) *(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i); } static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod) { int i; for (i = 0; i < CANFD_MAX_DLEN; i += 8) *(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i); } static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod) { int i; for (i = 0; i < CANFD_MAX_DLEN; i += 8) *(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i); } static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod) { memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN); } /* retrieve valid CC DLC value and store it into 'len' */ static void mod_retrieve_ccdlc(struct canfd_frame *cf) { struct can_frame *ccf = (struct can_frame *)cf; /* len8_dlc is only valid if len == CAN_MAX_DLEN */ if (ccf->len != CAN_MAX_DLEN) return; /* do we have a valid len8_dlc value from 9 .. 15 ? */ if (ccf->len8_dlc > CAN_MAX_DLEN && ccf->len8_dlc <= CAN_MAX_RAW_DLC) ccf->len = ccf->len8_dlc; } /* convert valid CC DLC value in 'len' into struct can_frame elements */ static void mod_store_ccdlc(struct canfd_frame *cf) { struct can_frame *ccf = (struct can_frame *)cf; /* clear potential leftovers */ ccf->len8_dlc = 0; /* plain data length 0 .. 8 - that was easy */ if (ccf->len <= CAN_MAX_DLEN) return; /* potentially broken values are caught in can_can_gw_rcv() */ if (ccf->len > CAN_MAX_RAW_DLC) return; /* we have a valid dlc value from 9 .. 15 in ccf->len */ ccf->len8_dlc = ccf->len; ccf->len = CAN_MAX_DLEN; } static void mod_and_ccdlc(struct canfd_frame *cf, struct cf_mod *mod) { mod_retrieve_ccdlc(cf); mod_and_len(cf, mod); mod_store_ccdlc(cf); } static void mod_or_ccdlc(struct canfd_frame *cf, struct cf_mod *mod) { mod_retrieve_ccdlc(cf); mod_or_len(cf, mod); mod_store_ccdlc(cf); } static void mod_xor_ccdlc(struct canfd_frame *cf, struct cf_mod *mod) { mod_retrieve_ccdlc(cf); mod_xor_len(cf, mod); mod_store_ccdlc(cf); } static void mod_set_ccdlc(struct canfd_frame *cf, struct cf_mod *mod) { mod_set_len(cf, mod); mod_store_ccdlc(cf); } static void canframecpy(struct canfd_frame *dst, struct can_frame *src) { /* Copy the struct members separately to ensure that no uninitialized * data are copied in the 3 bytes hole of the struct. This is needed * to make easy compares of the data in the struct cf_mod. */ dst->can_id = src->can_id; dst->len = src->len; *(u64 *)dst->data = *(u64 *)src->data; } static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src) { /* Copy the struct members separately to ensure that no uninitialized * data are copied in the 2 bytes hole of the struct. This is needed * to make easy compares of the data in the struct cf_mod. */ dst->can_id = src->can_id; dst->flags = src->flags; dst->len = src->len; memcpy(dst->data, src->data, CANFD_MAX_DLEN); } static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r) { s8 dlen = CAN_MAX_DLEN; if (r->flags & CGW_FLAGS_CAN_FD) dlen = CANFD_MAX_DLEN; /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0] * relative to received dlc -1 .. -8 : * e.g. for received dlc = 8 * -1 => index = 7 (data[7]) * -3 => index = 5 (data[5]) * -8 => index = 0 (data[0]) */ if (fr >= -dlen && fr < dlen && to >= -dlen && to < dlen && re >= -dlen && re < dlen) return 0; else return -EINVAL; } static inline int calc_idx(int idx, int rx_len) { if (idx < 0) return rx_len + idx; else return idx; } static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor) { int from = calc_idx(xor->from_idx, cf->len); int to = calc_idx(xor->to_idx, cf->len); int res = calc_idx(xor->result_idx, cf->len); u8 val = xor->init_xor_val; int i; if (from < 0 || to < 0 || res < 0) return; if (from <= to) { for (i = from; i <= to; i++) val ^= cf->data[i]; } else { for (i = from; i >= to; i--) val ^= cf->data[i]; } cf->data[res] = val; } static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor) { u8 val = xor->init_xor_val; int i; for (i = xor->from_idx; i <= xor->to_idx; i++) val ^= cf->data[i]; cf->data[xor->result_idx] = val; } static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor) { u8 val = xor->init_xor_val; int i; for (i = xor->from_idx; i >= xor->to_idx; i--) val ^= cf->data[i]; cf->data[xor->result_idx] = val; } static void cgw_csum_crc8_rel(struct canfd_frame *cf, struct cgw_csum_crc8 *crc8) { int from = calc_idx(crc8->from_idx, cf->len); int to = calc_idx(crc8->to_idx, cf->len); int res = calc_idx(crc8->result_idx, cf->len); u8 crc = crc8->init_crc_val; int i; if (from < 0 || to < 0 || res < 0) return; if (from <= to) { for (i = crc8->from_idx; i <= crc8->to_idx; i++) crc = crc8->crctab[crc ^ cf->data[i]]; } else { for (i = crc8->from_idx; i >= crc8->to_idx; i--) crc = crc8->crctab[crc ^ cf->data[i]]; } switch (crc8->profile) { case CGW_CRC8PRF_1U8: crc = crc8->crctab[crc ^ crc8->profile_data[0]]; break; case CGW_CRC8PRF_16U8: crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; break; case CGW_CRC8PRF_SFFID_XOR: crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ (cf->can_id >> 8 & 0xFF)]; break; } cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; } static void cgw_csum_crc8_pos(struct canfd_frame *cf, struct cgw_csum_crc8 *crc8) { u8 crc = crc8->init_crc_val; int i; for (i = crc8->from_idx; i <= crc8->to_idx; i++) crc = crc8->crctab[crc ^ cf->data[i]]; switch (crc8->profile) { case CGW_CRC8PRF_1U8: crc = crc8->crctab[crc ^ crc8->profile_data[0]]; break; case CGW_CRC8PRF_16U8: crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; break; case CGW_CRC8PRF_SFFID_XOR: crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ (cf->can_id >> 8 & 0xFF)]; break; } cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; } static void cgw_csum_crc8_neg(struct canfd_frame *cf, struct cgw_csum_crc8 *crc8) { u8 crc = crc8->init_crc_val; int i; for (i = crc8->from_idx; i >= crc8->to_idx; i--) crc = crc8->crctab[crc ^ cf->data[i]]; switch (crc8->profile) { case CGW_CRC8PRF_1U8: crc = crc8->crctab[crc ^ crc8->profile_data[0]]; break; case CGW_CRC8PRF_16U8: crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]]; break; case CGW_CRC8PRF_SFFID_XOR: crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^ (cf->can_id >> 8 & 0xFF)]; break; } cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; } /* the receive & process & send function */ static void can_can_gw_rcv(struct sk_buff *skb, void *data) { struct cgw_job *gwj = (struct cgw_job *)data; struct canfd_frame *cf; struct sk_buff *nskb; int modidx = 0; /* process strictly Classic CAN or CAN FD frames */ if (gwj->flags & CGW_FLAGS_CAN_FD) { if (skb->len != CANFD_MTU) return; } else { if (skb->len != CAN_MTU) return; } /* Do not handle CAN frames routed more than 'max_hops' times. * In general we should never catch this delimiter which is intended * to cover a misconfiguration protection (e.g. circular CAN routes). * * The Controller Area Network controllers only accept CAN frames with * correct CRCs - which are not visible in the controller registers. * According to skbuff.h documentation the csum_start element for IP * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY. * Only CAN skbs can be processed here which already have this property. */ #define cgw_hops(skb) ((skb)->csum_start) BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY); if (cgw_hops(skb) >= max_hops) { /* indicate deleted frames due to misconfiguration */ gwj->deleted_frames++; return; } if (!(gwj->dst.dev->flags & IFF_UP)) { gwj->dropped_frames++; return; } /* is sending the skb back to the incoming interface not allowed? */ if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) && can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex) return; /* clone the given skb, which has not been done in can_rcv() * * When there is at least one modification function activated, * we need to copy the skb as we want to modify skb->data. */ if (gwj->mod.modfunc[0]) nskb = skb_copy(skb, GFP_ATOMIC); else nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) { gwj->dropped_frames++; return; } /* put the incremented hop counter in the cloned skb */ cgw_hops(nskb) = cgw_hops(skb) + 1; /* first processing of this CAN frame -> adjust to private hop limit */ if (gwj->limit_hops && cgw_hops(nskb) == 1) cgw_hops(nskb) = max_hops - gwj->limit_hops + 1; nskb->dev = gwj->dst.dev; /* pointer to modifiable CAN frame */ cf = (struct canfd_frame *)nskb->data; /* perform preprocessed modification functions if there are any */ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); /* Has the CAN frame been modified? */ if (modidx) { /* get available space for the processed CAN frame type */ int max_len = nskb->len - offsetof(struct canfd_frame, data); /* dlc may have changed, make sure it fits to the CAN frame */ if (cf->len > max_len) { /* delete frame due to misconfiguration */ gwj->deleted_frames++; kfree_skb(nskb); return; } /* check for checksum updates */ if (gwj->mod.csumfunc.crc8) (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); if (gwj->mod.csumfunc.xor) (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); } /* clear the skb timestamp if not configured the other way */ if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP)) nskb->tstamp = 0; /* send to netdevice */ if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO)) gwj->dropped_frames++; else gwj->handled_frames++; } static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) { return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id, gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj, "gw", NULL); } static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj) { can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id, gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj); } static int cgw_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; if (msg == NETDEV_UNREGISTER) { struct cgw_job *gwj = NULL; struct hlist_node *nx; ASSERT_RTNL(); hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { if (gwj->src.dev == dev || gwj->dst.dev == dev) { hlist_del(&gwj->list); cgw_unregister_filter(net, gwj); synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); } } } return NOTIFY_DONE; } static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, u32 pid, u32 seq, int flags) { struct rtcanmsg *rtcan; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); if (!nlh) return -EMSGSIZE; rtcan = nlmsg_data(nlh); rtcan->can_family = AF_CAN; rtcan->gwtype = gwj->gwtype; rtcan->flags = gwj->flags; /* add statistics if available */ if (gwj->handled_frames) { if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0) goto cancel; } if (gwj->dropped_frames) { if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0) goto cancel; } if (gwj->deleted_frames) { if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0) goto cancel; } /* check non default settings of attributes */ if (gwj->limit_hops) { if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0) goto cancel; } if (gwj->flags & CGW_FLAGS_CAN_FD) { struct cgw_fdframe_mod mb; if (gwj->mod.modtype.and) { memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.and; if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.or) { memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.or; if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.xor) { memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.xor; if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.set) { memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.set; if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } else { struct cgw_frame_mod mb; if (gwj->mod.modtype.and) { memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.and; if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.or) { memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.or; if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.xor) { memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.xor; if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } if (gwj->mod.modtype.set) { memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); mb.modtype = gwj->mod.modtype.set; if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } if (gwj->mod.uid) { if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) goto cancel; } if (gwj->mod.csumfunc.crc8) { if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, &gwj->mod.csum.crc8) < 0) goto cancel; } if (gwj->mod.csumfunc.xor) { if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, &gwj->mod.csum.xor) < 0) goto cancel; } if (gwj->gwtype == CGW_TYPE_CAN_CAN) { if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) { if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter), &gwj->ccgw.filter) < 0) goto cancel; } if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0) goto cancel; if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0) goto cancel; } nlmsg_end(skb, nlh); return 0; cancel: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } /* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct cgw_job *gwj = NULL; int idx = 0; int s_idx = cb->args[0]; rcu_read_lock(); hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) { if (idx < s_idx) goto cont; if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0) break; cont: idx++; } rcu_read_unlock(); cb->args[0] = idx; return skb->len; } static const struct nla_policy cgw_policy[CGW_MAX + 1] = { [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) }, [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) }, [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) }, [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) }, [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) }, [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) }, [CGW_SRC_IF] = { .type = NLA_U32 }, [CGW_DST_IF] = { .type = NLA_U32 }, [CGW_FILTER] = { .len = sizeof(struct can_filter) }, [CGW_LIM_HOPS] = { .type = NLA_U8 }, [CGW_MOD_UID] = { .type = NLA_U32 }, [CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) }, [CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) }, [CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) }, [CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) }, }; /* check for common and gwtype specific attributes */ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, u8 gwtype, void *gwtypeattr, u8 *limhops) { struct nlattr *tb[CGW_MAX + 1]; struct rtcanmsg *r = nlmsg_data(nlh); int modidx = 0; int err = 0; /* initialize modification & checksum data space */ memset(mod, 0, sizeof(*mod)); err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, cgw_policy, NULL); if (err < 0) return err; if (tb[CGW_LIM_HOPS]) { *limhops = nla_get_u8(tb[CGW_LIM_HOPS]); if (*limhops < 1 || *limhops > max_hops) return -EINVAL; } /* check for AND/OR/XOR/SET modifications */ if (r->flags & CGW_FLAGS_CAN_FD) { struct cgw_fdframe_mod mb; if (tb[CGW_FDMOD_AND]) { nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN); canfdframecpy(&mod->modframe.and, &mb.cf); mod->modtype.and = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_and_id; if (mb.modtype & CGW_MOD_LEN) mod->modfunc[modidx++] = mod_and_len; if (mb.modtype & CGW_MOD_FLAGS) mod->modfunc[modidx++] = mod_and_flags; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_and_fddata; } if (tb[CGW_FDMOD_OR]) { nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN); canfdframecpy(&mod->modframe.or, &mb.cf); mod->modtype.or = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_or_id; if (mb.modtype & CGW_MOD_LEN) mod->modfunc[modidx++] = mod_or_len; if (mb.modtype & CGW_MOD_FLAGS) mod->modfunc[modidx++] = mod_or_flags; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_or_fddata; } if (tb[CGW_FDMOD_XOR]) { nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN); canfdframecpy(&mod->modframe.xor, &mb.cf); mod->modtype.xor = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_xor_id; if (mb.modtype & CGW_MOD_LEN) mod->modfunc[modidx++] = mod_xor_len; if (mb.modtype & CGW_MOD_FLAGS) mod->modfunc[modidx++] = mod_xor_flags; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_xor_fddata; } if (tb[CGW_FDMOD_SET]) { nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN); canfdframecpy(&mod->modframe.set, &mb.cf); mod->modtype.set = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_set_id; if (mb.modtype & CGW_MOD_LEN) mod->modfunc[modidx++] = mod_set_len; if (mb.modtype & CGW_MOD_FLAGS) mod->modfunc[modidx++] = mod_set_flags; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_set_fddata; } } else { struct cgw_frame_mod mb; if (tb[CGW_MOD_AND]) { nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); canframecpy(&mod->modframe.and, &mb.cf); mod->modtype.and = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_and_id; if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_and_ccdlc; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_and_data; } if (tb[CGW_MOD_OR]) { nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); canframecpy(&mod->modframe.or, &mb.cf); mod->modtype.or = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_or_id; if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_or_ccdlc; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_or_data; } if (tb[CGW_MOD_XOR]) { nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); canframecpy(&mod->modframe.xor, &mb.cf); mod->modtype.xor = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_xor_id; if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_xor_ccdlc; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_xor_data; } if (tb[CGW_MOD_SET]) { nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); canframecpy(&mod->modframe.set, &mb.cf); mod->modtype.set = mb.modtype; if (mb.modtype & CGW_MOD_ID) mod->modfunc[modidx++] = mod_set_id; if (mb.modtype & CGW_MOD_DLC) mod->modfunc[modidx++] = mod_set_ccdlc; if (mb.modtype & CGW_MOD_DATA) mod->modfunc[modidx++] = mod_set_data; } } /* check for checksum operations after CAN frame modifications */ if (modidx) { if (tb[CGW_CS_CRC8]) { struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c->result_idx, r); if (err) return err; nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8], CGW_CS_CRC8_LEN); /* select dedicated processing function to reduce * runtime operations in receive hot path. */ if (c->from_idx < 0 || c->to_idx < 0 || c->result_idx < 0) mod->csumfunc.crc8 = cgw_csum_crc8_rel; else if (c->from_idx <= c->to_idx) mod->csumfunc.crc8 = cgw_csum_crc8_pos; else mod->csumfunc.crc8 = cgw_csum_crc8_neg; } if (tb[CGW_CS_XOR]) { struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); err = cgw_chk_csum_parms(c->from_idx, c->to_idx, c->result_idx, r); if (err) return err; nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR], CGW_CS_XOR_LEN); /* select dedicated processing function to reduce * runtime operations in receive hot path. */ if (c->from_idx < 0 || c->to_idx < 0 || c->result_idx < 0) mod->csumfunc.xor = cgw_csum_xor_rel; else if (c->from_idx <= c->to_idx) mod->csumfunc.xor = cgw_csum_xor_pos; else mod->csumfunc.xor = cgw_csum_xor_neg; } if (tb[CGW_MOD_UID]) nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32)); } if (gwtype == CGW_TYPE_CAN_CAN) { /* check CGW_TYPE_CAN_CAN specific attributes */ struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr; memset(ccgw, 0, sizeof(*ccgw)); /* check for can_filter in attributes */ if (tb[CGW_FILTER]) nla_memcpy(&ccgw->filter, tb[CGW_FILTER], sizeof(struct can_filter)); err = -ENODEV; /* specifying two interfaces is mandatory */ if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF]) return err; ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]); ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]); /* both indices set to 0 for flushing all routing entries */ if (!ccgw->src_idx && !ccgw->dst_idx) return 0; /* only one index set to 0 is an error */ if (!ccgw->src_idx || !ccgw->dst_idx) return err; } /* add the checks for other gwtypes here */ return 0; } static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct rtcanmsg *r; struct cgw_job *gwj; struct cf_mod mod; struct can_can_gw ccgw; u8 limhops = 0; int err = 0; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (nlmsg_len(nlh) < sizeof(*r)) return -EINVAL; r = nlmsg_data(nlh); if (r->can_family != AF_CAN) return -EPFNOSUPPORT; /* so far we only support CAN -> CAN routings */ if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); if (err < 0) return err; if (mod.uid) { ASSERT_RTNL(); /* check for updating an existing job with identical uid */ hlist_for_each_entry(gwj, &net->can.cgw_list, list) { if (gwj->mod.uid != mod.uid) continue; /* interfaces & filters must be identical */ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) return -EINVAL; /* update modifications with disabled softirq & quit */ local_bh_disable(); memcpy(&gwj->mod, &mod, sizeof(mod)); local_bh_enable(); return 0; } } /* ifindex == 0 is not allowed for job creation */ if (!ccgw.src_idx || !ccgw.dst_idx) return -ENODEV; gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); if (!gwj) return -ENOMEM; gwj->handled_frames = 0; gwj->dropped_frames = 0; gwj->deleted_frames = 0; gwj->flags = r->flags; gwj->gwtype = r->gwtype; gwj->limit_hops = limhops; /* insert already parsed information */ memcpy(&gwj->mod, &mod, sizeof(mod)); memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); err = -ENODEV; gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx); if (!gwj->src.dev) goto out; if (gwj->src.dev->type != ARPHRD_CAN) goto out; gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx); if (!gwj->dst.dev) goto out; if (gwj->dst.dev->type != ARPHRD_CAN) goto out; ASSERT_RTNL(); err = cgw_register_filter(net, gwj); if (!err) hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); out: if (err) kmem_cache_free(cgw_cache, gwj); return err; } static void cgw_remove_all_jobs(struct net *net) { struct cgw_job *gwj = NULL; struct hlist_node *nx; ASSERT_RTNL(); hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { hlist_del(&gwj->list); cgw_unregister_filter(net, gwj); synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); } } static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct cgw_job *gwj = NULL; struct hlist_node *nx; struct rtcanmsg *r; struct cf_mod mod; struct can_can_gw ccgw; u8 limhops = 0; int err = 0; if (!netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (nlmsg_len(nlh) < sizeof(*r)) return -EINVAL; r = nlmsg_data(nlh); if (r->can_family != AF_CAN) return -EPFNOSUPPORT; /* so far we only support CAN -> CAN routings */ if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); if (err < 0) return err; /* two interface indices both set to 0 => remove all entries */ if (!ccgw.src_idx && !ccgw.dst_idx) { cgw_remove_all_jobs(net); return 0; } err = -EINVAL; ASSERT_RTNL(); /* remove only the first matching entry */ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { if (gwj->flags != r->flags) continue; if (gwj->limit_hops != limhops) continue; /* we have a match when uid is enabled and identical */ if (gwj->mod.uid || mod.uid) { if (gwj->mod.uid != mod.uid) continue; } else { /* no uid => check for identical modifications */ if (memcmp(&gwj->mod, &mod, sizeof(mod))) continue; } /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) continue; hlist_del(&gwj->list); cgw_unregister_filter(net, gwj); synchronize_rcu(); kmem_cache_free(cgw_cache, gwj); err = 0; break; } return err; } static int __net_init cangw_pernet_init(struct net *net) { INIT_HLIST_HEAD(&net->can.cgw_list); return 0; } static void __net_exit cangw_pernet_exit(struct net *net) { rtnl_lock(); cgw_remove_all_jobs(net); rtnl_unlock(); } static struct pernet_operations cangw_pernet_ops = { .init = cangw_pernet_init, .exit = cangw_pernet_exit, }; static __init int cgw_module_init(void) { int ret; /* sanitize given module parameter */ max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS); pr_info("can: netlink gateway - max_hops=%d\n", max_hops); ret = register_pernet_subsys(&cangw_pernet_ops); if (ret) return ret; ret = -ENOMEM; cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job), 0, 0, NULL); if (!cgw_cache) goto out_cache_create; /* set notifier */ notifier.notifier_call = cgw_notifier; ret = register_netdevice_notifier(¬ifier); if (ret) goto out_register_notifier; ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, 0); if (ret) goto out_rtnl_register1; ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, 0); if (ret) goto out_rtnl_register2; ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, 0); if (ret) goto out_rtnl_register3; return 0; out_rtnl_register3: rtnl_unregister(PF_CAN, RTM_NEWROUTE); out_rtnl_register2: rtnl_unregister(PF_CAN, RTM_GETROUTE); out_rtnl_register1: unregister_netdevice_notifier(¬ifier); out_register_notifier: kmem_cache_destroy(cgw_cache); out_cache_create: unregister_pernet_subsys(&cangw_pernet_ops); return ret; } static __exit void cgw_module_exit(void) { rtnl_unregister_all(PF_CAN); unregister_netdevice_notifier(¬ifier); unregister_pernet_subsys(&cangw_pernet_ops); rcu_barrier(); /* Wait for completion of call_rcu()'s */ kmem_cache_destroy(cgw_cache); } module_init(cgw_module_init); module_exit(cgw_module_exit); |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 | /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/cryptouser.h> #include <net/netlink.h> struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact); #ifdef CONFIG_CRYPTO_STATS int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs); #else static inline int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs) { return -ENOTSUPP; } #endif |
109 6 103 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 | // SPDX-License-Identifier: GPL-2.0 /* * fs/partitions/sun.c * * Code extracted from drivers/block/genhd.c * * Copyright (C) 1991-1998 Linus Torvalds * Re-organised Feb 1998 Russell King */ #include "check.h" #define SUN_LABEL_MAGIC 0xDABE #define SUN_VTOC_SANITY 0x600DDEEE enum { SUN_WHOLE_DISK = 5, LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */ }; int sun_partition(struct parsed_partitions *state) { int i; __be16 csum; int slot = 1; __be16 *ush; Sector sect; struct sun_disklabel { unsigned char info[128]; /* Informative text string */ struct sun_vtoc { __be32 version; /* Layout version */ char volume[8]; /* Volume name */ __be16 nparts; /* Number of partitions */ struct sun_info { /* Partition hdrs, sec 2 */ __be16 id; __be16 flags; } infos[8]; __be16 padding; /* Alignment padding */ __be32 bootinfo[3]; /* Info needed by mboot */ __be32 sanity; /* To verify vtoc sanity */ __be32 reserved[10]; /* Free space */ __be32 timestamp[8]; /* Partition timestamp */ } vtoc; __be32 write_reinstruct; /* sectors to skip, writes */ __be32 read_reinstruct; /* sectors to skip, reads */ unsigned char spare[148]; /* Padding */ __be16 rspeed; /* Disk rotational speed */ __be16 pcylcount; /* Physical cylinder count */ __be16 sparecyl; /* extra sects per cylinder */ __be16 obs1; /* gap1 */ __be16 obs2; /* gap2 */ __be16 ilfact; /* Interleave factor */ __be16 ncyl; /* Data cylinder count */ __be16 nacyl; /* Alt. cylinder count */ __be16 ntrks; /* Tracks per cylinder */ __be16 nsect; /* Sectors per track */ __be16 obs3; /* bhead - Label head offset */ __be16 obs4; /* ppart - Physical Partition */ struct sun_partition { __be32 start_cylinder; __be32 num_sectors; } partitions[8]; __be16 magic; /* Magic number */ __be16 csum; /* Label xor'd checksum */ } * label; struct sun_partition *p; unsigned long spc; int use_vtoc; int nparts; label = read_part_sector(state, 0, §); if (!label) return -1; p = label->partitions; if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) { /* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n", state->disk->disk_name, be16_to_cpu(label->magic)); */ put_dev_sector(sect); return 0; } /* Look at the checksum */ ush = ((__be16 *) (label+1)) - 1; for (csum = 0; ush >= ((__be16 *) label);) csum ^= *ush--; if (csum) { printk("Dev %s Sun disklabel: Csum bad, label corrupted\n", state->disk->disk_name); put_dev_sector(sect); return 0; } /* Check to see if we can use the VTOC table */ use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) && (be32_to_cpu(label->vtoc.version) == 1) && (be16_to_cpu(label->vtoc.nparts) <= 8)); /* Use 8 partition entries if not specified in validated VTOC */ nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8; /* * So that old Linux-Sun partitions continue to work, * alow the VTOC to be used under the additional condition ... */ use_vtoc = use_vtoc || !(label->vtoc.sanity || label->vtoc.version || label->vtoc.nparts); spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect); for (i = 0; i < nparts; i++, p++) { unsigned long st_sector; unsigned int num_sectors; st_sector = be32_to_cpu(p->start_cylinder) * spc; num_sectors = be32_to_cpu(p->num_sectors); if (num_sectors) { put_partition(state, slot, st_sector, num_sectors); state->parts[slot].flags = 0; if (use_vtoc) { if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION) state->parts[slot].flags |= ADDPART_FLAG_RAID; else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK) state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; } } slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; } |
1 1 4 4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 11 11 11 11 11 4 4 4 10 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 | // SPDX-License-Identifier: GPL-2.0 #include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/ceph/ceph_features.h> #include <linux/ceph/mon_client.h> #include <linux/ceph/libceph.h> #include <linux/ceph/debugfs.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> /* * Interact with Ceph monitor cluster. Handle requests for new map * versions, and periodically resend as needed. Also implement * statfs() and umount(). * * A small cluster of Ceph "monitors" are responsible for managing critical * cluster configuration and state information. An odd number (e.g., 3, 5) * of cmon daemons use a modified version of the Paxos part-time parliament * algorithm to manage the MDS map (mds cluster membership), OSD map, and * list of clients who have mounted the file system. * * We maintain an open, active session with a monitor at all times in order to * receive timely MDSMap updates. We periodically send a keepalive byte on the * TCP socket to ensure we detect a failure. If the connection does break, we * randomly hunt for a new monitor. Once the connection is reestablished, we * resend any outstanding requests. */ static const struct ceph_connection_operations mon_con_ops; static int __validate_auth(struct ceph_mon_client *monc); static int decode_mon_info(void **p, void *end, bool msgr2, struct ceph_entity_addr *addr) { void *mon_info_end; u32 struct_len; u8 struct_v; int ret; ret = ceph_start_decoding(p, end, 1, "mon_info_t", &struct_v, &struct_len); if (ret) return ret; mon_info_end = *p + struct_len; ceph_decode_skip_string(p, end, e_inval); /* skip mon name */ ret = ceph_decode_entity_addrvec(p, end, msgr2, addr); if (ret) return ret; *p = mon_info_end; return 0; e_inval: return -EINVAL; } /* * Decode a monmap blob (e.g., during mount). * * Assume MonMap v3 (i.e. encoding with MONNAMES and MONENC). */ static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2) { struct ceph_monmap *monmap = NULL; struct ceph_fsid fsid; u32 struct_len; int blob_len; int num_mon; u8 struct_v; u32 epoch; int ret; int i; ceph_decode_32_safe(p, end, blob_len, e_inval); ceph_decode_need(p, end, blob_len, e_inval); ret = ceph_start_decoding(p, end, 6, "monmap", &struct_v, &struct_len); if (ret) goto fail; dout("%s struct_v %d\n", __func__, struct_v); ceph_decode_copy_safe(p, end, &fsid, sizeof(fsid), e_inval); ceph_decode_32_safe(p, end, epoch, e_inval); if (struct_v >= 6) { u32 feat_struct_len; u8 feat_struct_v; *p += sizeof(struct ceph_timespec); /* skip last_changed */ *p += sizeof(struct ceph_timespec); /* skip created */ ret = ceph_start_decoding(p, end, 1, "mon_feature_t", &feat_struct_v, &feat_struct_len); if (ret) goto fail; *p += feat_struct_len; /* skip persistent_features */ ret = ceph_start_decoding(p, end, 1, "mon_feature_t", &feat_struct_v, &feat_struct_len); if (ret) goto fail; *p += feat_struct_len; /* skip optional_features */ } ceph_decode_32_safe(p, end, num_mon, e_inval); dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch, num_mon); if (num_mon > CEPH_MAX_MON) goto e_inval; monmap = kmalloc(struct_size(monmap, mon_inst, num_mon), GFP_NOIO); if (!monmap) { ret = -ENOMEM; goto fail; } monmap->fsid = fsid; monmap->epoch = epoch; monmap->num_mon = num_mon; /* legacy_mon_addr map or mon_info map */ for (i = 0; i < num_mon; i++) { struct ceph_entity_inst *inst = &monmap->mon_inst[i]; ceph_decode_skip_string(p, end, e_inval); /* skip mon name */ inst->name.type = CEPH_ENTITY_TYPE_MON; inst->name.num = cpu_to_le64(i); if (struct_v >= 6) ret = decode_mon_info(p, end, msgr2, &inst->addr); else ret = ceph_decode_entity_addr(p, end, &inst->addr); if (ret) goto fail; dout("%s mon%d addr %s\n", __func__, i, ceph_pr_addr(&inst->addr)); } return monmap; e_inval: ret = -EINVAL; fail: kfree(monmap); return ERR_PTR(ret); } /* * return true if *addr is included in the monmap. */ int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) { int i; for (i = 0; i < m->num_mon; i++) { if (ceph_addr_equal_no_type(addr, &m->mon_inst[i].addr)) return 1; } return 0; } /* * Send an auth request. */ static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) { monc->pending_auth = 1; monc->m_auth->front.iov_len = len; monc->m_auth->hdr.front_len = cpu_to_le32(len); ceph_msg_revoke(monc->m_auth); ceph_msg_get(monc->m_auth); /* keep our ref */ ceph_con_send(&monc->con, monc->m_auth); } /* * Close monitor session, if any. */ static void __close_session(struct ceph_mon_client *monc) { dout("__close_session closing mon%d\n", monc->cur_mon); ceph_msg_revoke(monc->m_auth); ceph_msg_revoke_incoming(monc->m_auth_reply); ceph_msg_revoke(monc->m_subscribe); ceph_msg_revoke_incoming(monc->m_subscribe_ack); ceph_con_close(&monc->con); monc->pending_auth = 0; ceph_auth_reset(monc->auth); } /* * Pick a new monitor at random and set cur_mon. If we are repicking * (i.e. cur_mon is already set), be sure to pick a different one. */ static void pick_new_mon(struct ceph_mon_client *monc) { int old_mon = monc->cur_mon; BUG_ON(monc->monmap->num_mon < 1); if (monc->monmap->num_mon == 1) { monc->cur_mon = 0; } else { int max = monc->monmap->num_mon; int o = -1; int n; if (monc->cur_mon >= 0) { if (monc->cur_mon < monc->monmap->num_mon) o = monc->cur_mon; if (o >= 0) max--; } n = prandom_u32() % max; if (o >= 0 && n >= o) n++; monc->cur_mon = n; } dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon, monc->cur_mon, monc->monmap->num_mon); } /* * Open a session with a new monitor. */ static void __open_session(struct ceph_mon_client *monc) { int ret; pick_new_mon(monc); monc->hunting = true; if (monc->had_a_connection) { monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF; if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT) monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT; } monc->sub_renew_after = jiffies; /* i.e., expired */ monc->sub_renew_sent = 0; dout("%s opening mon%d\n", __func__, monc->cur_mon); ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon, &monc->monmap->mon_inst[monc->cur_mon].addr); /* * Queue a keepalive to ensure that in case of an early fault * the messenger doesn't put us into STANDBY state and instead * retries. This also ensures that our timestamp is valid by * the time we finish hunting and delayed_work() checks it. */ ceph_con_keepalive(&monc->con); if (ceph_msgr2(monc->client)) { monc->pending_auth = 1; return; } /* initiate authentication handshake */ ret = ceph_auth_build_hello(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_alloc_len); BUG_ON(ret <= 0); __send_prepared_auth_request(monc, ret); } static void reopen_session(struct ceph_mon_client *monc) { if (!monc->hunting) pr_info("mon%d %s session lost, hunting for new mon\n", monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr)); __close_session(monc); __open_session(monc); } void ceph_monc_reopen_session(struct ceph_mon_client *monc) { mutex_lock(&monc->mutex); reopen_session(monc); mutex_unlock(&monc->mutex); } static void un_backoff(struct ceph_mon_client *monc) { monc->hunt_mult /= 2; /* reduce by 50% */ if (monc->hunt_mult < 1) monc->hunt_mult = 1; dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult); } /* * Reschedule delayed work timer. */ static void __schedule_delayed(struct ceph_mon_client *monc) { unsigned long delay; if (monc->hunting) delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult; else delay = CEPH_MONC_PING_INTERVAL; dout("__schedule_delayed after %lu\n", delay); mod_delayed_work(system_wq, &monc->delayed_work, round_jiffies_relative(delay)); } const char *ceph_sub_str[] = { [CEPH_SUB_MONMAP] = "monmap", [CEPH_SUB_OSDMAP] = "osdmap", [CEPH_SUB_FSMAP] = "fsmap.user", [CEPH_SUB_MDSMAP] = "mdsmap", }; /* * Send subscribe request for one or more maps, according to * monc->subs. */ static void __send_subscribe(struct ceph_mon_client *monc) { struct ceph_msg *msg = monc->m_subscribe; void *p = msg->front.iov_base; void *const end = p + msg->front_alloc_len; int num = 0; int i; dout("%s sent %lu\n", __func__, monc->sub_renew_sent); BUG_ON(monc->cur_mon < 0); if (!monc->sub_renew_sent) monc->sub_renew_sent = jiffies | 1; /* never 0 */ msg->hdr.version = cpu_to_le16(2); for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { if (monc->subs[i].want) num++; } BUG_ON(num < 1); /* monmap sub is always there */ ceph_encode_32(&p, num); for (i = 0; i < ARRAY_SIZE(monc->subs); i++) { char buf[32]; int len; if (!monc->subs[i].want) continue; len = sprintf(buf, "%s", ceph_sub_str[i]); if (i == CEPH_SUB_MDSMAP && monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE) len += sprintf(buf + len, ".%d", monc->fs_cluster_id); dout("%s %s start %llu flags 0x%x\n", __func__, buf, le64_to_cpu(monc->subs[i].item.start), monc->subs[i].item.flags); ceph_encode_string(&p, end, buf, len); memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item)); p += sizeof(monc->subs[i].item); } BUG_ON(p > end); msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); ceph_msg_revoke(msg); ceph_con_send(&monc->con, ceph_msg_get(msg)); } static void handle_subscribe_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { unsigned int seconds; struct ceph_mon_subscribe_ack *h = msg->front.iov_base; if (msg->front.iov_len < sizeof(*h)) goto bad; seconds = le32_to_cpu(h->duration); mutex_lock(&monc->mutex); if (monc->sub_renew_sent) { /* * This is only needed for legacy (infernalis or older) * MONs -- see delayed_work(). */ monc->sub_renew_after = monc->sub_renew_sent + (seconds >> 1) * HZ - 1; dout("%s sent %lu duration %d renew after %lu\n", __func__, monc->sub_renew_sent, seconds, monc->sub_renew_after); monc->sub_renew_sent = 0; } else { dout("%s sent %lu renew after %lu, ignoring\n", __func__, monc->sub_renew_sent, monc->sub_renew_after); } mutex_unlock(&monc->mutex); return; bad: pr_err("got corrupt subscribe-ack msg\n"); ceph_msg_dump(msg); } /* * Register interest in a map * * @sub: one of CEPH_SUB_* * @epoch: X for "every map since X", or 0 for "just the latest" */ static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, bool continuous) { __le64 start = cpu_to_le64(epoch); u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0; dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub], epoch, continuous); if (monc->subs[sub].want && monc->subs[sub].item.start == start && monc->subs[sub].item.flags == flags) return false; monc->subs[sub].item.start = start; monc->subs[sub].item.flags = flags; monc->subs[sub].want = true; return true; } bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch, bool continuous) { bool need_request; mutex_lock(&monc->mutex); need_request = __ceph_monc_want_map(monc, sub, epoch, continuous); mutex_unlock(&monc->mutex); return need_request; } EXPORT_SYMBOL(ceph_monc_want_map); /* * Keep track of which maps we have * * @sub: one of CEPH_SUB_* */ static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) { dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch); if (monc->subs[sub].want) { if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME) monc->subs[sub].want = false; else monc->subs[sub].item.start = cpu_to_le64(epoch + 1); } monc->subs[sub].have = epoch; } void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch) { mutex_lock(&monc->mutex); __ceph_monc_got_map(monc, sub, epoch); mutex_unlock(&monc->mutex); } EXPORT_SYMBOL(ceph_monc_got_map); void ceph_monc_renew_subs(struct ceph_mon_client *monc) { mutex_lock(&monc->mutex); __send_subscribe(monc); mutex_unlock(&monc->mutex); } EXPORT_SYMBOL(ceph_monc_renew_subs); /* * Wait for an osdmap with a given epoch. * * @epoch: epoch to wait for * @timeout: in jiffies, 0 means "wait forever" */ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout) { unsigned long started = jiffies; long ret; mutex_lock(&monc->mutex); while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) { mutex_unlock(&monc->mutex); if (timeout && time_after_eq(jiffies, started + timeout)) return -ETIMEDOUT; ret = wait_event_interruptible_timeout(monc->client->auth_wq, monc->subs[CEPH_SUB_OSDMAP].have >= epoch, ceph_timeout_jiffies(timeout)); if (ret < 0) return ret; mutex_lock(&monc->mutex); } mutex_unlock(&monc->mutex); return 0; } EXPORT_SYMBOL(ceph_monc_wait_osdmap); /* * Open a session with a random monitor. Request monmap and osdmap, * which are waited upon in __ceph_open_session(). */ int ceph_monc_open_session(struct ceph_mon_client *monc) { mutex_lock(&monc->mutex); __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true); __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false); __open_session(monc); __schedule_delayed(monc); mutex_unlock(&monc->mutex); return 0; } EXPORT_SYMBOL(ceph_monc_open_session); static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_client *client = monc->client; struct ceph_monmap *monmap; void *p, *end; mutex_lock(&monc->mutex); dout("handle_monmap\n"); p = msg->front.iov_base; end = p + msg->front.iov_len; monmap = ceph_monmap_decode(&p, end, ceph_msgr2(client)); if (IS_ERR(monmap)) { pr_err("problem decoding monmap, %d\n", (int)PTR_ERR(monmap)); ceph_msg_dump(msg); goto out; } if (ceph_check_fsid(client, &monmap->fsid) < 0) { kfree(monmap); goto out; } kfree(monc->monmap); monc->monmap = monmap; __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch); client->have_fsid = true; out: mutex_unlock(&monc->mutex); wake_up_all(&client->auth_wq); } /* * generic requests (currently statfs, mon_get_version) */ DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node) static void release_generic_request(struct kref *kref) { struct ceph_mon_generic_request *req = container_of(kref, struct ceph_mon_generic_request, kref); dout("%s greq %p request %p reply %p\n", __func__, req, req->request, req->reply); WARN_ON(!RB_EMPTY_NODE(&req->node)); if (req->reply) ceph_msg_put(req->reply); if (req->request) ceph_msg_put(req->request); kfree(req); } static void put_generic_request(struct ceph_mon_generic_request *req) { if (req) kref_put(&req->kref, release_generic_request); } static void get_generic_request(struct ceph_mon_generic_request *req) { kref_get(&req->kref); } static struct ceph_mon_generic_request * alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp) { struct ceph_mon_generic_request *req; req = kzalloc(sizeof(*req), gfp); if (!req) return NULL; req->monc = monc; kref_init(&req->kref); RB_CLEAR_NODE(&req->node); init_completion(&req->completion); dout("%s greq %p\n", __func__, req); return req; } static void register_generic_request(struct ceph_mon_generic_request *req) { struct ceph_mon_client *monc = req->monc; WARN_ON(req->tid); get_generic_request(req); req->tid = ++monc->last_tid; insert_generic_request(&monc->generic_request_tree, req); } static void send_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *req) { WARN_ON(!req->tid); dout("%s greq %p tid %llu\n", __func__, req, req->tid); req->request->hdr.tid = cpu_to_le64(req->tid); ceph_con_send(&monc->con, ceph_msg_get(req->request)); } static void __finish_generic_request(struct ceph_mon_generic_request *req) { struct ceph_mon_client *monc = req->monc; dout("%s greq %p tid %llu\n", __func__, req, req->tid); erase_generic_request(&monc->generic_request_tree, req); ceph_msg_revoke(req->request); ceph_msg_revoke_incoming(req->reply); } static void finish_generic_request(struct ceph_mon_generic_request *req) { __finish_generic_request(req); put_generic_request(req); } static void complete_generic_request(struct ceph_mon_generic_request *req) { if (req->complete_cb) req->complete_cb(req); else complete_all(&req->completion); put_generic_request(req); } static void cancel_generic_request(struct ceph_mon_generic_request *req) { struct ceph_mon_client *monc = req->monc; struct ceph_mon_generic_request *lookup_req; dout("%s greq %p tid %llu\n", __func__, req, req->tid); mutex_lock(&monc->mutex); lookup_req = lookup_generic_request(&monc->generic_request_tree, req->tid); if (lookup_req) { WARN_ON(lookup_req != req); finish_generic_request(req); } mutex_unlock(&monc->mutex); } static int wait_generic_request(struct ceph_mon_generic_request *req) { int ret; dout("%s greq %p tid %llu\n", __func__, req, req->tid); ret = wait_for_completion_interruptible(&req->completion); if (ret) cancel_generic_request(req); else ret = req->result; /* completed */ return ret; } static struct ceph_msg *get_generic_reply(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_mon_client *monc = con->private; struct ceph_mon_generic_request *req; u64 tid = le64_to_cpu(hdr->tid); struct ceph_msg *m; mutex_lock(&monc->mutex); req = lookup_generic_request(&monc->generic_request_tree, tid); if (!req) { dout("get_generic_reply %lld dne\n", tid); *skip = 1; m = NULL; } else { dout("get_generic_reply %lld got %p\n", tid, req->reply); *skip = 0; m = ceph_msg_get(req->reply); /* * we don't need to track the connection reading into * this reply because we only have one open connection * at a time, ever. */ } mutex_unlock(&monc->mutex); return m; } /* * statfs */ static void handle_statfs_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_mon_generic_request *req; struct ceph_mon_statfs_reply *reply = msg->front.iov_base; u64 tid = le64_to_cpu(msg->hdr.tid); dout("%s msg %p tid %llu\n", __func__, msg, tid); if (msg->front.iov_len != sizeof(*reply)) goto bad; mutex_lock(&monc->mutex); req = lookup_generic_request(&monc->generic_request_tree, tid); if (!req) { mutex_unlock(&monc->mutex); return; } req->result = 0; *req->u.st = reply->st; /* struct */ __finish_generic_request(req); mutex_unlock(&monc->mutex); complete_generic_request(req); return; bad: pr_err("corrupt statfs reply, tid %llu\n", tid); ceph_msg_dump(msg); } /* * Do a synchronous statfs(). */ int ceph_monc_do_statfs(struct ceph_mon_client *monc, u64 data_pool, struct ceph_statfs *buf) { struct ceph_mon_generic_request *req; struct ceph_mon_statfs *h; int ret = -ENOMEM; req = alloc_generic_request(monc, GFP_NOFS); if (!req) goto out; req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, true); if (!req->request) goto out; req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true); if (!req->reply) goto out; req->u.st = buf; req->request->hdr.version = cpu_to_le16(2); mutex_lock(&monc->mutex); register_generic_request(req); /* fill out request */ h = req->request->front.iov_base; h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; h->contains_data_pool = (data_pool != CEPH_NOPOOL); h->data_pool = cpu_to_le64(data_pool); send_generic_request(monc, req); mutex_unlock(&monc->mutex); ret = wait_generic_request(req); out: put_generic_request(req); return ret; } EXPORT_SYMBOL(ceph_monc_do_statfs); static void handle_get_version_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_mon_generic_request *req; u64 tid = le64_to_cpu(msg->hdr.tid); void *p = msg->front.iov_base; void *end = p + msg->front_alloc_len; u64 handle; dout("%s msg %p tid %llu\n", __func__, msg, tid); ceph_decode_need(&p, end, 2*sizeof(u64), bad); handle = ceph_decode_64(&p); if (tid != 0 && tid != handle) goto bad; mutex_lock(&monc->mutex); req = lookup_generic_request(&monc->generic_request_tree, handle); if (!req) { mutex_unlock(&monc->mutex); return; } req->result = 0; req->u.newest = ceph_decode_64(&p); __finish_generic_request(req); mutex_unlock(&monc->mutex); complete_generic_request(req); return; bad: pr_err("corrupt mon_get_version reply, tid %llu\n", tid); ceph_msg_dump(msg); } static struct ceph_mon_generic_request * __ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, ceph_monc_callback_t cb, u64 private_data) { struct ceph_mon_generic_request *req; req = alloc_generic_request(monc, GFP_NOIO); if (!req) goto err_put_req; req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, sizeof(u64) + sizeof(u32) + strlen(what), GFP_NOIO, true); if (!req->request) goto err_put_req; req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO, true); if (!req->reply) goto err_put_req; req->complete_cb = cb; req->private_data = private_data; mutex_lock(&monc->mutex); register_generic_request(req); { void *p = req->request->front.iov_base; void *const end = p + req->request->front_alloc_len; ceph_encode_64(&p, req->tid); /* handle */ ceph_encode_string(&p, end, what, strlen(what)); WARN_ON(p != end); } send_generic_request(monc, req); mutex_unlock(&monc->mutex); return req; err_put_req: put_generic_request(req); return ERR_PTR(-ENOMEM); } /* * Send MMonGetVersion and wait for the reply. * * @what: one of "mdsmap", "osdmap" or "monmap" */ int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what, u64 *newest) { struct ceph_mon_generic_request *req; int ret; req = __ceph_monc_get_version(monc, what, NULL, 0); if (IS_ERR(req)) return PTR_ERR(req); ret = wait_generic_request(req); if (!ret) *newest = req->u.newest; put_generic_request(req); return ret; } EXPORT_SYMBOL(ceph_monc_get_version); /* * Send MMonGetVersion, * * @what: one of "mdsmap", "osdmap" or "monmap" */ int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what, ceph_monc_callback_t cb, u64 private_data) { struct ceph_mon_generic_request *req; req = __ceph_monc_get_version(monc, what, cb, private_data); if (IS_ERR(req)) return PTR_ERR(req); put_generic_request(req); return 0; } EXPORT_SYMBOL(ceph_monc_get_version_async); static void handle_command_ack(struct ceph_mon_client *monc, struct ceph_msg *msg) { struct ceph_mon_generic_request *req; void *p = msg->front.iov_base; void *const end = p + msg->front_alloc_len; u64 tid = le64_to_cpu(msg->hdr.tid); dout("%s msg %p tid %llu\n", __func__, msg, tid); ceph_decode_need(&p, end, sizeof(struct ceph_mon_request_header) + sizeof(u32), bad); p += sizeof(struct ceph_mon_request_header); mutex_lock(&monc->mutex); req = lookup_generic_request(&monc->generic_request_tree, tid); if (!req) { mutex_unlock(&monc->mutex); return; } req->result = ceph_decode_32(&p); __finish_generic_request(req); mutex_unlock(&monc->mutex); complete_generic_request(req); return; bad: pr_err("corrupt mon_command ack, tid %llu\n", tid); ceph_msg_dump(msg); } static __printf(2, 0) int do_mon_command_vargs(struct ceph_mon_client *monc, const char *fmt, va_list ap) { struct ceph_mon_generic_request *req; struct ceph_mon_command *h; int ret = -ENOMEM; int len; req = alloc_generic_request(monc, GFP_NOIO); if (!req) goto out; req->request = ceph_msg_new(CEPH_MSG_MON_COMMAND, 256, GFP_NOIO, true); if (!req->request) goto out; req->reply = ceph_msg_new(CEPH_MSG_MON_COMMAND_ACK, 512, GFP_NOIO, true); if (!req->reply) goto out; mutex_lock(&monc->mutex); register_generic_request(req); h = req->request->front.iov_base; h->monhdr.have_version = 0; h->monhdr.session_mon = cpu_to_le16(-1); h->monhdr.session_mon_tid = 0; h->fsid = monc->monmap->fsid; h->num_strs = cpu_to_le32(1); len = vsprintf(h->str, fmt, ap); h->str_len = cpu_to_le32(len); send_generic_request(monc, req); mutex_unlock(&monc->mutex); ret = wait_generic_request(req); out: put_generic_request(req); return ret; } static __printf(2, 3) int do_mon_command(struct ceph_mon_client *monc, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = do_mon_command_vargs(monc, fmt, ap); va_end(ap); return ret; } int ceph_monc_blocklist_add(struct ceph_mon_client *monc, struct ceph_entity_addr *client_addr) { int ret; ret = do_mon_command(monc, "{ \"prefix\": \"osd blocklist\", \ \"blocklistop\": \"add\", \ \"addr\": \"%pISpc/%u\" }", &client_addr->in_addr, le32_to_cpu(client_addr->nonce)); if (ret == -EINVAL) { /* * The monitor returns EINVAL on an unrecognized command. * Try the legacy command -- it is exactly the same except * for the name. */ ret = do_mon_command(monc, "{ \"prefix\": \"osd blacklist\", \ \"blacklistop\": \"add\", \ \"addr\": \"%pISpc/%u\" }", &client_addr->in_addr, le32_to_cpu(client_addr->nonce)); } if (ret) return ret; /* * Make sure we have the osdmap that includes the blocklist * entry. This is needed to ensure that the OSDs pick up the * new blocklist before processing any future requests from * this client. */ return ceph_wait_for_latest_osdmap(monc->client, 0); } EXPORT_SYMBOL(ceph_monc_blocklist_add); /* * Resend pending generic requests. */ static void __resend_generic_request(struct ceph_mon_client *monc) { struct ceph_mon_generic_request *req; struct rb_node *p; for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) { req = rb_entry(p, struct ceph_mon_generic_request, node); ceph_msg_revoke(req->request); ceph_msg_revoke_incoming(req->reply); ceph_con_send(&monc->con, ceph_msg_get(req->request)); } } /* * Delayed work. If we haven't mounted yet, retry. Otherwise, * renew/retry subscription as needed (in case it is timing out, or we * got an ENOMEM). And keep the monitor connection alive. */ static void delayed_work(struct work_struct *work) { struct ceph_mon_client *monc = container_of(work, struct ceph_mon_client, delayed_work.work); mutex_lock(&monc->mutex); dout("%s mon%d\n", __func__, monc->cur_mon); if (monc->cur_mon < 0) { goto out; } if (monc->hunting) { dout("%s continuing hunt\n", __func__); reopen_session(monc); } else { int is_auth = ceph_auth_is_authenticated(monc->auth); dout("%s is_authed %d\n", __func__, is_auth); if (ceph_con_keepalive_expired(&monc->con, CEPH_MONC_PING_TIMEOUT)) { dout("monc keepalive timeout\n"); is_auth = 0; reopen_session(monc); } if (!monc->hunting) { ceph_con_keepalive(&monc->con); __validate_auth(monc); un_backoff(monc); } if (is_auth && !(monc->con.peer_features & CEPH_FEATURE_MON_STATEFUL_SUB)) { unsigned long now = jiffies; dout("%s renew subs? now %lu renew after %lu\n", __func__, now, monc->sub_renew_after); if (time_after_eq(now, monc->sub_renew_after)) __send_subscribe(monc); } } __schedule_delayed(monc); out: mutex_unlock(&monc->mutex); } /* * On startup, we build a temporary monmap populated with the IPs * provided by mount(2). */ static int build_initial_monmap(struct ceph_mon_client *monc) { __le32 my_type = ceph_msgr2(monc->client) ? CEPH_ENTITY_ADDR_TYPE_MSGR2 : CEPH_ENTITY_ADDR_TYPE_LEGACY; struct ceph_options *opt = monc->client->options; int num_mon = opt->num_mon; int i; /* build initial monmap */ monc->monmap = kzalloc(struct_size(monc->monmap, mon_inst, num_mon), GFP_KERNEL); if (!monc->monmap) return -ENOMEM; for (i = 0; i < num_mon; i++) { struct ceph_entity_inst *inst = &monc->monmap->mon_inst[i]; memcpy(&inst->addr.in_addr, &opt->mon_addr[i].in_addr, sizeof(inst->addr.in_addr)); inst->addr.type = my_type; inst->addr.nonce = 0; inst->name.type = CEPH_ENTITY_TYPE_MON; inst->name.num = cpu_to_le64(i); } monc->monmap->num_mon = num_mon; return 0; } int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) { int err = 0; dout("init\n"); memset(monc, 0, sizeof(*monc)); monc->client = cl; monc->monmap = NULL; mutex_init(&monc->mutex); err = build_initial_monmap(monc); if (err) goto out; /* connection */ /* authentication */ monc->auth = ceph_auth_init(cl->options->name, cl->options->key, cl->options->con_modes); if (IS_ERR(monc->auth)) { err = PTR_ERR(monc->auth); goto out_monmap; } monc->auth->want_keys = CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; /* msgs */ err = -ENOMEM; monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK, sizeof(struct ceph_mon_subscribe_ack), GFP_KERNEL, true); if (!monc->m_subscribe_ack) goto out_auth; monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_KERNEL, true); if (!monc->m_subscribe) goto out_subscribe_ack; monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_KERNEL, true); if (!monc->m_auth_reply) goto out_subscribe; monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_KERNEL, true); monc->pending_auth = 0; if (!monc->m_auth) goto out_auth_reply; ceph_con_init(&monc->con, monc, &mon_con_ops, &monc->client->msgr); monc->cur_mon = -1; monc->had_a_connection = false; monc->hunt_mult = 1; INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); monc->generic_request_tree = RB_ROOT; monc->last_tid = 0; monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE; return 0; out_auth_reply: ceph_msg_put(monc->m_auth_reply); out_subscribe: ceph_msg_put(monc->m_subscribe); out_subscribe_ack: ceph_msg_put(monc->m_subscribe_ack); out_auth: ceph_auth_destroy(monc->auth); out_monmap: kfree(monc->monmap); out: return err; } EXPORT_SYMBOL(ceph_monc_init); void ceph_monc_stop(struct ceph_mon_client *monc) { dout("stop\n"); mutex_lock(&monc->mutex); __close_session(monc); monc->hunting = false; monc->cur_mon = -1; mutex_unlock(&monc->mutex); cancel_delayed_work_sync(&monc->delayed_work); /* * flush msgr queue before we destroy ourselves to ensure that: * - any work that references our embedded con is finished. * - any osd_client or other work that may reference an authorizer * finishes before we shut down the auth subsystem. */ ceph_msgr_flush(); ceph_auth_destroy(monc->auth); WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree)); ceph_msg_put(monc->m_auth); ceph_msg_put(monc->m_auth_reply); ceph_msg_put(monc->m_subscribe); ceph_msg_put(monc->m_subscribe_ack); kfree(monc->monmap); } EXPORT_SYMBOL(ceph_monc_stop); static void finish_hunting(struct ceph_mon_client *monc) { if (monc->hunting) { dout("%s found mon%d\n", __func__, monc->cur_mon); monc->hunting = false; monc->had_a_connection = true; un_backoff(monc); __schedule_delayed(monc); } } static void finish_auth(struct ceph_mon_client *monc, int auth_err, bool was_authed) { dout("%s auth_err %d was_authed %d\n", __func__, auth_err, was_authed); WARN_ON(auth_err > 0); monc->pending_auth = 0; if (auth_err) { monc->client->auth_err = auth_err; wake_up_all(&monc->client->auth_wq); return; } if (!was_authed && ceph_auth_is_authenticated(monc->auth)) { dout("%s authenticated, starting session global_id %llu\n", __func__, monc->auth->global_id); monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; monc->client->msgr.inst.name.num = cpu_to_le64(monc->auth->global_id); __send_subscribe(monc); __resend_generic_request(monc); pr_info("mon%d %s session established\n", monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr)); } } static void handle_auth_reply(struct ceph_mon_client *monc, struct ceph_msg *msg) { bool was_authed; int ret; mutex_lock(&monc->mutex); was_authed = ceph_auth_is_authenticated(monc->auth); ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, monc->m_auth->front.iov_base, monc->m_auth->front_alloc_len); if (ret > 0) { __send_prepared_auth_request(monc, ret); } else { finish_auth(monc, ret, was_authed); finish_hunting(monc); } mutex_unlock(&monc->mutex); } static int __validate_auth(struct ceph_mon_client *monc) { int ret; if (monc->pending_auth) return 0; ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, monc->m_auth->front_alloc_len); if (ret <= 0) return ret; /* either an error, or no need to authenticate */ __send_prepared_auth_request(monc, ret); return 0; } int ceph_monc_validate_auth(struct ceph_mon_client *monc) { int ret; mutex_lock(&monc->mutex); ret = __validate_auth(monc); mutex_unlock(&monc->mutex); return ret; } EXPORT_SYMBOL(ceph_monc_validate_auth); static int mon_get_auth_request(struct ceph_connection *con, void *buf, int *buf_len, void **authorizer, int *authorizer_len) { struct ceph_mon_client *monc = con->private; int ret; mutex_lock(&monc->mutex); ret = ceph_auth_get_request(monc->auth, buf, *buf_len); mutex_unlock(&monc->mutex); if (ret < 0) return ret; *buf_len = ret; *authorizer = NULL; *authorizer_len = 0; return 0; } static int mon_handle_auth_reply_more(struct ceph_connection *con, void *reply, int reply_len, void *buf, int *buf_len, void **authorizer, int *authorizer_len) { struct ceph_mon_client *monc = con->private; int ret; mutex_lock(&monc->mutex); ret = ceph_auth_handle_reply_more(monc->auth, reply, reply_len, buf, *buf_len); mutex_unlock(&monc->mutex); if (ret < 0) return ret; *buf_len = ret; *authorizer = NULL; *authorizer_len = 0; return 0; } static int mon_handle_auth_done(struct ceph_connection *con, u64 global_id, void *reply, int reply_len, u8 *session_key, int *session_key_len, u8 *con_secret, int *con_secret_len) { struct ceph_mon_client *monc = con->private; bool was_authed; int ret; mutex_lock(&monc->mutex); WARN_ON(!monc->hunting); was_authed = ceph_auth_is_authenticated(monc->auth); ret = ceph_auth_handle_reply_done(monc->auth, global_id, reply, reply_len, session_key, session_key_len, con_secret, con_secret_len); finish_auth(monc, ret, was_authed); if (!ret) finish_hunting(monc); mutex_unlock(&monc->mutex); return 0; } static int mon_handle_auth_bad_method(struct ceph_connection *con, int used_proto, int result, const int *allowed_protos, int proto_cnt, const int *allowed_modes, int mode_cnt) { struct ceph_mon_client *monc = con->private; bool was_authed; mutex_lock(&monc->mutex); WARN_ON(!monc->hunting); was_authed = ceph_auth_is_authenticated(monc->auth); ceph_auth_handle_bad_method(monc->auth, used_proto, result, allowed_protos, proto_cnt, allowed_modes, mode_cnt); finish_auth(monc, -EACCES, was_authed); mutex_unlock(&monc->mutex); return 0; } /* * handle incoming message */ static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg) { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(msg->hdr.type); switch (type) { case CEPH_MSG_AUTH_REPLY: handle_auth_reply(monc, msg); break; case CEPH_MSG_MON_SUBSCRIBE_ACK: handle_subscribe_ack(monc, msg); break; case CEPH_MSG_STATFS_REPLY: handle_statfs_reply(monc, msg); break; case CEPH_MSG_MON_GET_VERSION_REPLY: handle_get_version_reply(monc, msg); break; case CEPH_MSG_MON_COMMAND_ACK: handle_command_ack(monc, msg); break; case CEPH_MSG_MON_MAP: ceph_monc_handle_map(monc, msg); break; case CEPH_MSG_OSD_MAP: ceph_osdc_handle_map(&monc->client->osdc, msg); break; default: /* can the chained handler handle it? */ if (monc->client->extra_mon_dispatch && monc->client->extra_mon_dispatch(monc->client, msg) == 0) break; pr_err("received unknown message type %d %s\n", type, ceph_msg_type_name(type)); } ceph_msg_put(msg); } /* * Allocate memory for incoming message */ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, struct ceph_msg_header *hdr, int *skip) { struct ceph_mon_client *monc = con->private; int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); struct ceph_msg *m = NULL; *skip = 0; switch (type) { case CEPH_MSG_MON_SUBSCRIBE_ACK: m = ceph_msg_get(monc->m_subscribe_ack); break; case CEPH_MSG_STATFS_REPLY: case CEPH_MSG_MON_COMMAND_ACK: return get_generic_reply(con, hdr, skip); case CEPH_MSG_AUTH_REPLY: m = ceph_msg_get(monc->m_auth_reply); break; case CEPH_MSG_MON_GET_VERSION_REPLY: if (le64_to_cpu(hdr->tid) != 0) return get_generic_reply(con, hdr, skip); /* * Older OSDs don't set reply tid even if the original * request had a non-zero tid. Work around this weirdness * by allocating a new message. */ fallthrough; case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: case CEPH_MSG_FS_MAP_USER: m = ceph_msg_new(type, front_len, GFP_NOFS, false); if (!m) return NULL; /* ENOMEM--return skip == 0 */ break; } if (!m) { pr_info("alloc_msg unknown type %d\n", type); *skip = 1; } else if (front_len > m->front_alloc_len) { pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", front_len, m->front_alloc_len, (unsigned int)con->peer_name.type, le64_to_cpu(con->peer_name.num)); ceph_msg_put(m); m = ceph_msg_new(type, front_len, GFP_NOFS, false); } return m; } /* * If the monitor connection resets, pick a new monitor and resubmit * any pending requests. */ static void mon_fault(struct ceph_connection *con) { struct ceph_mon_client *monc = con->private; mutex_lock(&monc->mutex); dout("%s mon%d\n", __func__, monc->cur_mon); if (monc->cur_mon >= 0) { if (!monc->hunting) { dout("%s hunting for new mon\n", __func__); reopen_session(monc); __schedule_delayed(monc); } else { dout("%s already hunting\n", __func__); } } mutex_unlock(&monc->mutex); } /* * We can ignore refcounting on the connection struct, as all references * will come from the messenger workqueue, which is drained prior to * mon_client destruction. */ static struct ceph_connection *mon_get_con(struct ceph_connection *con) { return con; } static void mon_put_con(struct ceph_connection *con) { } static const struct ceph_connection_operations mon_con_ops = { .get = mon_get_con, .put = mon_put_con, .alloc_msg = mon_alloc_msg, .dispatch = mon_dispatch, .fault = mon_fault, .get_auth_request = mon_get_auth_request, .handle_auth_reply_more = mon_handle_auth_reply_more, .handle_auth_done = mon_handle_auth_done, .handle_auth_bad_method = mon_handle_auth_bad_method, }; |
72 72 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 | /* * Mapping of UID/GIDs to name and vice versa. * * Copyright (c) 2002, 2003 The Regents of the University of * Michigan. All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/sunrpc/svc_xprt.h> #include <net/net_namespace.h> #include "idmap.h" #include "nfsd.h" #include "netns.h" #include "vfs.h" /* * Turn off idmapping when using AUTH_SYS. */ static bool nfs4_disable_idmapping = true; module_param(nfs4_disable_idmapping, bool, 0644); MODULE_PARM_DESC(nfs4_disable_idmapping, "Turn off server's NFSv4 idmapping when using 'sec=sys'"); /* * Cache entry */ /* * XXX we know that IDMAP_NAMESZ < PAGE_SIZE, but it's ugly to rely on * that. */ struct ent { struct cache_head h; int type; /* User / Group */ u32 id; char name[IDMAP_NAMESZ]; char authname[IDMAP_NAMESZ]; struct rcu_head rcu_head; }; /* Common entry handling */ #define ENT_HASHBITS 8 #define ENT_HASHMAX (1 << ENT_HASHBITS) static void ent_init(struct cache_head *cnew, struct cache_head *citm) { struct ent *new = container_of(cnew, struct ent, h); struct ent *itm = container_of(citm, struct ent, h); new->id = itm->id; new->type = itm->type; strscpy(new->name, itm->name, sizeof(new->name)); strscpy(new->authname, itm->authname, sizeof(new->authname)); } static void ent_put(struct kref *ref) { struct ent *map = container_of(ref, struct ent, h.ref); kfree_rcu(map, rcu_head); } static struct cache_head * ent_alloc(void) { struct ent *e = kmalloc(sizeof(*e), GFP_KERNEL); if (e) return &e->h; else return NULL; } /* * ID -> Name cache */ static uint32_t idtoname_hash(struct ent *ent) { uint32_t hash; hash = hash_str(ent->authname, ENT_HASHBITS); hash = hash_long(hash ^ ent->id, ENT_HASHBITS); /* Flip LSB for user/group */ if (ent->type == IDMAP_TYPE_GROUP) hash ^= 1; return hash; } static int idtoname_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall_timeout(cd, h); } static void idtoname_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); char idstr[11]; qword_add(bpp, blen, ent->authname); snprintf(idstr, sizeof(idstr), "%u", ent->id); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, idstr); (*bpp)[-1] = '\n'; } static int idtoname_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->id == b->id && a->type == b->type && strcmp(a->authname, b->authname) == 0); } static int idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type id [name]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %u", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->id); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %s", ent->name); seq_putc(m, '\n'); return 0; } static void warn_no_idmapd(struct cache_detail *detail, int has_died) { printk("nfsd: nfsv4 idmapping failing: has idmapd %s?\n", has_died ? "died" : "not been started"); } static int idtoname_parse(struct cache_detail *, char *, int); static struct ent *idtoname_lookup(struct cache_detail *, struct ent *); static struct ent *idtoname_update(struct cache_detail *, struct ent *, struct ent *); static const struct cache_detail idtoname_cache_template = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .name = "nfs4.idtoname", .cache_put = ent_put, .cache_upcall = idtoname_upcall, .cache_request = idtoname_request, .cache_parse = idtoname_parse, .cache_show = idtoname_show, .warn_no_listener = warn_no_idmapd, .match = idtoname_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int idtoname_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1, *bp; int len; int error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* ID */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.id = simple_strtoul(buf1, &bp, 10); if (bp == buf1) goto out; /* expiry */ ent.h.expiry_time = get_expiry(&buf); if (ent.h.expiry_time == 0) goto out; error = -ENOMEM; res = idtoname_lookup(cd, &ent); if (!res) goto out; /* Name */ error = -EINVAL; len = qword_get(&buf, buf1, PAGE_SIZE); if (len < 0 || len >= IDMAP_NAMESZ) goto out; if (len == 0) set_bit(CACHE_NEGATIVE, &ent.h.flags); else memcpy(ent.name, buf1, sizeof(ent.name)); error = -ENOMEM; res = idtoname_update(cd, &ent, res); if (res == NULL) goto out; cache_put(&res->h, cd); error = 0; out: kfree(buf1); return error; } static struct ent * idtoname_lookup(struct cache_detail *cd, struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, idtoname_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, idtoname_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Name -> ID cache */ static inline int nametoid_hash(struct ent *ent) { return hash_str(ent->name, ENT_HASHBITS); } static int nametoid_upcall(struct cache_detail *cd, struct cache_head *h) { return sunrpc_cache_pipe_upcall_timeout(cd, h); } static void nametoid_request(struct cache_detail *cd, struct cache_head *ch, char **bpp, int *blen) { struct ent *ent = container_of(ch, struct ent, h); qword_add(bpp, blen, ent->authname); qword_add(bpp, blen, ent->type == IDMAP_TYPE_GROUP ? "group" : "user"); qword_add(bpp, blen, ent->name); (*bpp)[-1] = '\n'; } static int nametoid_match(struct cache_head *ca, struct cache_head *cb) { struct ent *a = container_of(ca, struct ent, h); struct ent *b = container_of(cb, struct ent, h); return (a->type == b->type && strcmp(a->name, b->name) == 0 && strcmp(a->authname, b->authname) == 0); } static int nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) { struct ent *ent; if (h == NULL) { seq_puts(m, "#domain type name [id]\n"); return 0; } ent = container_of(h, struct ent, h); seq_printf(m, "%s %s %s", ent->authname, ent->type == IDMAP_TYPE_GROUP ? "group" : "user", ent->name); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %u", ent->id); seq_putc(m, '\n'); return 0; } static struct ent *nametoid_lookup(struct cache_detail *, struct ent *); static struct ent *nametoid_update(struct cache_detail *, struct ent *, struct ent *); static int nametoid_parse(struct cache_detail *, char *, int); static const struct cache_detail nametoid_cache_template = { .owner = THIS_MODULE, .hash_size = ENT_HASHMAX, .name = "nfs4.nametoid", .cache_put = ent_put, .cache_upcall = nametoid_upcall, .cache_request = nametoid_request, .cache_parse = nametoid_parse, .cache_show = nametoid_show, .warn_no_listener = warn_no_idmapd, .match = nametoid_match, .init = ent_init, .update = ent_init, .alloc = ent_alloc, }; static int nametoid_parse(struct cache_detail *cd, char *buf, int buflen) { struct ent ent, *res; char *buf1; int len, error = -EINVAL; if (buf[buflen - 1] != '\n') return (-EINVAL); buf[buflen - 1]= '\0'; buf1 = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buf1 == NULL) return (-ENOMEM); memset(&ent, 0, sizeof(ent)); /* Authentication name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.authname, buf1, sizeof(ent.authname)); /* Type */ if (qword_get(&buf, buf1, PAGE_SIZE) <= 0) goto out; ent.type = strcmp(buf1, "user") == 0 ? IDMAP_TYPE_USER : IDMAP_TYPE_GROUP; /* Name */ len = qword_get(&buf, buf1, PAGE_SIZE); if (len <= 0 || len >= IDMAP_NAMESZ) goto out; memcpy(ent.name, buf1, sizeof(ent.name)); /* expiry */ ent.h.expiry_time = get_expiry(&buf); if (ent.h.expiry_time == 0) goto out; /* ID */ error = get_int(&buf, &ent.id); if (error == -EINVAL) goto out; if (error == -ENOENT) set_bit(CACHE_NEGATIVE, &ent.h.flags); error = -ENOMEM; res = nametoid_lookup(cd, &ent); if (res == NULL) goto out; res = nametoid_update(cd, &ent, res); if (res == NULL) goto out; cache_put(&res->h, cd); error = 0; out: kfree(buf1); return (error); } static struct ent * nametoid_lookup(struct cache_detail *cd, struct ent *item) { struct cache_head *ch = sunrpc_cache_lookup_rcu(cd, &item->h, nametoid_hash(item)); if (ch) return container_of(ch, struct ent, h); else return NULL; } static struct ent * nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old) { struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h, nametoid_hash(new)); if (ch) return container_of(ch, struct ent, h); else return NULL; } /* * Exported API */ int nfsd_idmap_init(struct net *net) { int rv; struct nfsd_net *nn = net_generic(net, nfsd_net_id); nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net); if (IS_ERR(nn->idtoname_cache)) return PTR_ERR(nn->idtoname_cache); rv = cache_register_net(nn->idtoname_cache, net); if (rv) goto destroy_idtoname_cache; nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net); if (IS_ERR(nn->nametoid_cache)) { rv = PTR_ERR(nn->nametoid_cache); goto unregister_idtoname_cache; } rv = cache_register_net(nn->nametoid_cache, net); if (rv) goto destroy_nametoid_cache; return 0; destroy_nametoid_cache: cache_destroy_net(nn->nametoid_cache, net); unregister_idtoname_cache: cache_unregister_net(nn->idtoname_cache, net); destroy_idtoname_cache: cache_destroy_net(nn->idtoname_cache, net); return rv; } void nfsd_idmap_shutdown(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); cache_unregister_net(nn->idtoname_cache, net); cache_unregister_net(nn->nametoid_cache, net); cache_destroy_net(nn->idtoname_cache, net); cache_destroy_net(nn->nametoid_cache, net); } static int idmap_lookup(struct svc_rqst *rqstp, struct ent *(*lookup_fn)(struct cache_detail *, struct ent *), struct ent *key, struct cache_detail *detail, struct ent **item) { int ret; *item = lookup_fn(detail, key); if (!*item) return -ENOMEM; retry: ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle); if (ret == -ETIMEDOUT) { struct ent *prev_item = *item; *item = lookup_fn(detail, key); if (*item != prev_item) goto retry; cache_put(&(*item)->h, detail); } return ret; } static char * rqst_authname(struct svc_rqst *rqstp) { struct auth_domain *clp; clp = rqstp->rq_gssclient ? rqstp->rq_gssclient : rqstp->rq_client; return clp->name; } static __be32 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { struct ent *item, key = { .type = type, }; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); if (namelen + 1 > sizeof(key.name)) return nfserr_badowner; memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item); if (ret == -ENOENT) return nfserr_badowner; if (ret) return nfserrno(ret); *id = item->id; cache_put(&item->h, nn->nametoid_cache); return 0; } static __be32 encode_ascii_id(struct xdr_stream *xdr, u32 id) { char buf[11]; int len; __be32 *p; len = sprintf(buf, "%u", id); p = xdr_reserve_space(xdr, len + 4); if (!p) return nfserr_resource; p = xdr_encode_opaque(p, buf, len); return 0; } static __be32 idmap_id_to_name(struct xdr_stream *xdr, struct svc_rqst *rqstp, int type, u32 id) { struct ent *item, key = { .id = id, .type = type, }; __be32 status = nfs_ok; __be32 *p; int ret; struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); strscpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item); if (ret == -ENOENT) return encode_ascii_id(xdr, id); if (ret) return nfserrno(ret); ret = strlen(item->name); WARN_ON_ONCE(ret > IDMAP_NAMESZ); p = xdr_reserve_space(xdr, ret + 4); if (unlikely(!p)) { status = nfserr_resource; goto out_put; } xdr_encode_opaque(p, item->name, ret); out_put: cache_put(&item->h, nn->idtoname_cache); return status; } static bool numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { int ret; char buf[11]; if (namelen + 1 > sizeof(buf)) /* too long to represent a 32-bit id: */ return false; /* Just to make sure it's null-terminated: */ memcpy(buf, name, namelen); buf[namelen] = '\0'; ret = kstrtouint(buf, 10, id); return ret == 0; } static __be32 do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u32 *id) { if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS) if (numeric_name_to_id(rqstp, type, name, namelen, id)) return 0; /* * otherwise, fall through and try idmapping, for * backwards compatibility with clients sending names: */ return idmap_name_to_id(rqstp, type, name, namelen, id); } static __be32 encode_name_from_id(struct xdr_stream *xdr, struct svc_rqst *rqstp, int type, u32 id) { if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS) return encode_ascii_id(xdr, id); return idmap_id_to_name(xdr, rqstp, type, id); } __be32 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, kuid_t *uid) { __be32 status; u32 id = -1; if (name == NULL || namelen == 0) return nfserr_inval; status = do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, &id); *uid = make_kuid(nfsd_user_namespace(rqstp), id); if (!uid_valid(*uid)) status = nfserr_badowner; return status; } __be32 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, kgid_t *gid) { __be32 status; u32 id = -1; if (name == NULL || namelen == 0) return nfserr_inval; status = do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, &id); *gid = make_kgid(nfsd_user_namespace(rqstp), id); if (!gid_valid(*gid)) status = nfserr_badowner; return status; } __be32 nfsd4_encode_user(struct xdr_stream *xdr, struct svc_rqst *rqstp, kuid_t uid) { u32 id = from_kuid_munged(nfsd_user_namespace(rqstp), uid); return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_USER, id); } __be32 nfsd4_encode_group(struct xdr_stream *xdr, struct svc_rqst *rqstp, kgid_t gid) { u32 id = from_kgid_munged(nfsd_user_namespace(rqstp), gid); return encode_name_from_id(xdr, rqstp, IDMAP_TYPE_GROUP, id); } |
2428 2486 27 2487 10 2480 3 2484 1073 2481 2484 2485 2482 13 5 8 15 11 1 12 3 57 35 4 17 1 1 1 1 1 2330 144 2331 1454 5 48 1422 1455 1457 502 3 506 1380 1375 1324 42 6 76 1369 1906 478 1457 432 1378 2326 2162 395 1373 2335 2330 2 8 7 42 42 42 28 27 7 94 2292 30 2243 2347 2421 2443 2284 2300 813 2348 1 3 5 1180 1293 895 2280 2282 2284 2284 2284 2276 4 2277 2243 2277 2240 2244 2243 2278 2243 2271 4 2280 2347 2409 2349 1003 448 1013 2374 1205 382 8 172 42 188 188 188 2 85 30 111 109 2 2 111 26 85 13 9 9 8 1 9 9 1 9 2240 2241 2379 2291 3 6 7 2331 1 3 2329 2328 2327 2327 2328 4 2332 2331 2333 2288 2291 1896 420 2291 2293 2294 2294 2329 4 4 4 8 13 2315 2323 2288 2288 2291 2288 2292 2332 2 2 187 183 10 43 2 121 3 2 3 4 114 2 6 4 111 6 2 8 2 11 2 14 1 49 2 7 2 10 1 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 | // SPDX-License-Identifier: GPL-2.0 /* * message.c - synchronous message handling * * Released under the GPLv2 only. */ #include <linux/acpi.h> #include <linux/pci.h> /* for scatterlist macros */ #include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/nls.h> #include <linux/device.h> #include <linux/scatterlist.h> #include <linux/usb/cdc.h> #include <linux/usb/quirks.h> #include <linux/usb/hcd.h> /* for usbcore internals */ #include <linux/usb/of.h> #include <asm/byteorder.h> #include "usb.h" static void cancel_async_set_config(struct usb_device *udev); struct api_context { struct completion done; int status; }; static void usb_api_blocking_completion(struct urb *urb) { struct api_context *ctx = urb->context; ctx->status = urb->status; complete(&ctx->done); } /* * Starts urb and waits for completion or timeout. Note that this call * is NOT interruptible. Many device driver i/o requests should be * interruptible and therefore these drivers should implement their * own interruptible routines. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) { struct api_context ctx; unsigned long expire; int retval; init_completion(&ctx.done); urb->context = &ctx; urb->actual_length = 0; retval = usb_submit_urb(urb, GFP_NOIO); if (unlikely(retval)) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (!wait_for_completion_timeout(&ctx.done, expire)) { usb_kill_urb(urb); retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); dev_dbg(&urb->dev->dev, "%s timed out on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", urb->actual_length, urb->transfer_buffer_length); } else retval = ctx.status; out: if (actual_length) *actual_length = urb->actual_length; usb_free_urb(urb); return retval; } /*-------------------------------------------------------------------*/ /* returns status (negative) or length (positive) */ static int usb_internal_control_msg(struct usb_device *usb_dev, unsigned int pipe, struct usb_ctrlrequest *cmd, void *data, int len, int timeout) { struct urb *urb; int retv; int length; urb = usb_alloc_urb(0, GFP_NOIO); if (!urb) return -ENOMEM; usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); retv = usb_start_wait_urb(urb, timeout, &length); if (retv < 0) return retv; else return length; } /** * usb_control_msg - Builds a control urb, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple control message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: If successful, the number of bytes transferred. Otherwise, a negative * error number. */ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout) { struct usb_ctrlrequest *dr; int ret; dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); if (!dr) return -ENOMEM; dr->bRequestType = requesttype; dr->bRequest = request; dr->wValue = cpu_to_le16(value); dr->wIndex = cpu_to_le16(index); dr->wLength = cpu_to_le16(size); ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); /* Linger a bit, prior to the next control message. */ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) msleep(200); kfree(dr); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg); /** * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to send * @size: length in bytes of the data to send * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is not * expected to fill in a response (i.e. a "send message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else, * as it will not be modified at all. This does not have the restriction that * usb_control_msg() has where the data pointer must be to dynamically allocated * memory (i.e. memory that can be successfully DMAed to a device). * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, const void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_sndctrlpipe(dev, endpoint); int ret; u8 *data = NULL; if (size) { data = kmemdup(driver_data, size, memflags); if (!data) return -ENOMEM; } ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); kfree(data); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(usb_control_msg_send); /** * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion * @dev: pointer to the usb device to send the message to * @endpoint: endpoint to send the message to * @request: USB message request value * @requesttype: USB message request type value * @value: USB message value * @index: USB message index value * @driver_data: pointer to the data to be filled in by the message * @size: length in bytes of the data to be received * @timeout: time in msecs to wait for the message to complete before timing * out (if 0 the wait is forever) * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () * * This function sends a control message to a specified endpoint that is * expected to fill in a response (i.e. a "receive message") and waits for the * message to complete, or timeout. * * Do not use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb(). If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * The data pointer can be made to a reference on the stack, or anywhere else * that can be successfully written to. This function does not have the * restriction that usb_control_msg() has where the data pointer must be to * dynamically allocated memory (i.e. memory that can be successfully DMAed to a * device). * * The "whole" message must be properly received from the device in order for * this function to be successful. If a device returns less than the expected * amount of data, then the function will fail. Do not use this for messages * where a variable amount of data might be returned. * * Return: If successful, 0 is returned, Otherwise, a negative error number. */ int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *driver_data, __u16 size, int timeout, gfp_t memflags) { unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); int ret; u8 *data; if (!size || !driver_data) return -EINVAL; data = kmalloc(size, memflags); if (!data) return -ENOMEM; ret = usb_control_msg(dev, pipe, request, requesttype, value, index, data, size, timeout); if (ret < 0) goto exit; if (ret == size) { memcpy(driver_data, data, size); ret = 0; } else { ret = -EREMOTEIO; } exit: kfree(data); return ret; } EXPORT_SYMBOL_GPL(usb_control_msg_recv); /** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple interrupt message to a specified endpoint and * waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. */ int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout); } EXPORT_SYMBOL_GPL(usb_interrupt_msg); /** * usb_bulk_msg - Builds a bulk urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to * @data: pointer to the data to send * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before * timing out (if 0 the wait is forever) * * Context: task context, might sleep. * * This function sends a simple bulk message to a specified endpoint * and waits for the message to complete, or timeout. * * Don't use this function from within an interrupt context. If you need * an asynchronous message, or need to send a message from within interrupt * context, use usb_submit_urb() If a thread in your driver uses this call, * make sure your disconnect() method can wait for it to complete. Since you * don't have a handle on the URB used, you can't cancel the request. * * Because there is no usb_interrupt_msg() and no USBDEVFS_INTERRUPT ioctl, * users are forced to abuse this routine by using it to submit URBs for * interrupt endpoints. We will take the liberty of creating an interrupt URB * (with the default interval) if the target is an interrupt endpoint. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual * bytes transferred will be stored in the @actual_length parameter. * */ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout) { struct urb *urb; struct usb_host_endpoint *ep; ep = usb_pipe_endpoint(usb_dev, pipe); if (!ep || len < 0) return -EINVAL; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); usb_fill_int_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL, ep->desc.bInterval); } else usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); return usb_start_wait_urb(urb, timeout, actual_length); } EXPORT_SYMBOL_GPL(usb_bulk_msg); /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) { if (io->urbs) { while (io->entries--) usb_free_urb(io->urbs[io->entries]); kfree(io->urbs); io->urbs = NULL; } io->dev = NULL; } static void sg_complete(struct urb *urb) { unsigned long flags; struct usb_sg_request *io = urb->context; int status = urb->status; spin_lock_irqsave(&io->lock, flags); /* In 2.5 we require hcds' endpoint queues not to progress after fault * reports, until the completion callback (this!) returns. That lets * device driver code (like this routine) unlink queued urbs first, * if it needs to, since the HC won't work on them at all. So it's * not possible for page N+1 to overwrite page N, and so on. * * That's only for "hard" faults; "soft" faults (unlinks) sometimes * complete before the HCD can get requests away from hardware, * though never during cleanup after a hard fault. */ if (io->status && (io->status != -ECONNRESET || status != -ECONNRESET) && urb->actual_length) { dev_err(io->dev->bus->controller, "dev %s ep%d%s scatterlist error %d/%d\n", io->dev->devpath, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", status, io->status); /* BUG (); */ } if (io->status == 0 && status && status != -ECONNRESET) { int i, found, retval; io->status = status; /* the previous urbs, and this one, completed already. * unlink pending urbs so they won't rx/tx bad data. * careful: unlink can sometimes be synchronous... */ spin_unlock_irqrestore(&io->lock, flags); for (i = 0, found = 0; i < io->entries; i++) { if (!io->urbs[i]) continue; if (found) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_err(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } else if (urb == io->urbs[i]) found = 1; } spin_lock_irqsave(&io->lock, flags); } /* on the last completion, signal usb_sg_wait() */ io->bytes += urb->actual_length; io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } /** * usb_sg_init - initializes scatterlist-based bulk/interrupt I/O request * @io: request block being initialized. until usb_sg_wait() returns, * treat this as a pointer to an opaque block of memory, * @dev: the usb device that will send or receive the data * @pipe: endpoint "pipe" used to transfer the data * @period: polling rate for interrupt endpoints, in frames or * (for high speed endpoints) microframes; ignored for bulk * @sg: scatterlist entries * @nents: how many entries in the scatterlist * @length: how many bytes to send from the scatterlist, or zero to * send every byte identified in the list. * @mem_flags: SLAB_* flags affecting memory allocations in this call * * This initializes a scatter/gather request, allocating resources such as * I/O mappings and urb memory (except maybe memory used by USB controller * drivers). * * The request must be issued using usb_sg_wait(), which waits for the I/O to * complete (or to be canceled) and then cleans up all resources allocated by * usb_sg_init(). * * The request may be canceled with usb_sg_cancel(), either before or after * usb_sg_wait() is called. * * Return: Zero for success, else a negative errno value. */ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, unsigned pipe, unsigned period, struct scatterlist *sg, int nents, size_t length, gfp_t mem_flags) { int i; int urb_flags; int use_sg; if (!io || !dev || !sg || usb_pipecontrol(pipe) || usb_pipeisoc(pipe) || nents <= 0) return -EINVAL; spin_lock_init(&io->lock); io->dev = dev; io->pipe = pipe; if (dev->bus->sg_tablesize > 0) { use_sg = true; io->entries = 1; } else { use_sg = false; io->entries = nents; } /* initialize all the urbs we'll use */ io->urbs = kmalloc_array(io->entries, sizeof(*io->urbs), mem_flags); if (!io->urbs) goto nomem; urb_flags = URB_NO_INTERRUPT; if (usb_pipein(pipe)) urb_flags |= URB_SHORT_NOT_OK; for_each_sg(sg, sg, io->entries, i) { struct urb *urb; unsigned len; urb = usb_alloc_urb(0, mem_flags); if (!urb) { io->entries = i; goto nomem; } io->urbs[i] = urb; urb->dev = NULL; urb->pipe = pipe; urb->interval = period; urb->transfer_flags = urb_flags; urb->complete = sg_complete; urb->context = io; urb->sg = sg; if (use_sg) { /* There is no single transfer buffer */ urb->transfer_buffer = NULL; urb->num_sgs = nents; /* A length of zero means transfer the whole sg list */ len = length; if (len == 0) { struct scatterlist *sg2; int j; for_each_sg(sg, sg2, nents, j) len += sg2->length; } } else { /* * Some systems can't use DMA; they use PIO instead. * For their sakes, transfer_buffer is set whenever * possible. */ if (!PageHighMem(sg_page(sg))) urb->transfer_buffer = sg_virt(sg); else urb->transfer_buffer = NULL; len = sg->length; if (length) { len = min_t(size_t, len, length); length -= len; if (length == 0) io->entries = i + 1; } } urb->transfer_buffer_length = len; } io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; /* transaction state */ io->count = io->entries; io->status = 0; io->bytes = 0; init_completion(&io->complete); return 0; nomem: sg_clean(io); return -ENOMEM; } EXPORT_SYMBOL_GPL(usb_sg_init); /** * usb_sg_wait - synchronously execute scatter/gather request * @io: request block handle, as initialized with usb_sg_init(). * some fields become accessible when this call returns. * * Context: task context, might sleep. * * This function blocks until the specified I/O operation completes. It * leverages the grouping of the related I/O requests to get good transfer * rates, by queueing the requests. At higher speeds, such queuing can * significantly improve USB throughput. * * There are three kinds of completion for this function. * * (1) success, where io->status is zero. The number of io->bytes * transferred is as requested. * (2) error, where io->status is a negative errno value. The number * of io->bytes transferred before the error is usually less * than requested, and can be nonzero. * (3) cancellation, a type of error with status -ECONNRESET that * is initiated by usb_sg_cancel(). * * When this function returns, all memory allocated through usb_sg_init() or * this call will have been freed. The request block parameter may still be * passed to usb_sg_cancel(), or it may be freed. It could also be * reinitialized and then reused. * * Data Transfer Rates: * * Bulk transfers are valid for full or high speed endpoints. * The best full speed data rate is 19 packets of 64 bytes each * per frame, or 1216 bytes per millisecond. * The best high speed data rate is 13 packets of 512 bytes each * per microframe, or 52 KBytes per millisecond. * * The reason to use interrupt transfers through this API would most likely * be to reserve high speed bandwidth, where up to 24 KBytes per millisecond * could be transferred. That capability is less useful for low or full * speed interrupt endpoints, which allow at most one packet per millisecond, * of at most 8 or 64 bytes (respectively). * * It is not necessary to call this function to reserve bandwidth for devices * under an xHCI host controller, as the bandwidth is reserved when the * configuration or interface alt setting is selected. */ void usb_sg_wait(struct usb_sg_request *io) { int i; int entries = io->entries; /* queue the urbs. */ spin_lock_irq(&io->lock); i = 0; while (i < entries && !io->status) { int retval; io->urbs[i]->dev = io->dev; spin_unlock_irq(&io->lock); retval = usb_submit_urb(io->urbs[i], GFP_NOIO); switch (retval) { /* maybe we retrying will recover */ case -ENXIO: /* hc didn't queue this one */ case -EAGAIN: case -ENOMEM: retval = 0; yield(); break; /* no error? continue immediately. * * NOTE: to work better with UHCI (4K I/O buffer may * need 3K of TDs) it may be good to limit how many * URBs are queued at once; N milliseconds? */ case 0: ++i; cpu_relax(); break; /* fail any uncompleted urbs */ default: io->urbs[i]->status = retval; dev_dbg(&io->dev->dev, "%s, submit --> %d\n", __func__, retval); usb_sg_cancel(io); } spin_lock_irq(&io->lock); if (retval && (io->status == 0 || io->status == -ECONNRESET)) io->status = retval; } io->count -= entries - i; if (io->count == 0) complete(&io->complete); spin_unlock_irq(&io->lock); /* OK, yes, this could be packaged as non-blocking. * So could the submit loop above ... but it's easier to * solve neither problem than to solve both! */ wait_for_completion(&io->complete); sg_clean(io); } EXPORT_SYMBOL_GPL(usb_sg_wait); /** * usb_sg_cancel - stop scatter/gather i/o issued by usb_sg_wait() * @io: request block, initialized with usb_sg_init() * * This stops a request after it has been started by usb_sg_wait(). * It can also prevents one initialized by usb_sg_init() from starting, * so that call just frees resources allocated to the request. */ void usb_sg_cancel(struct usb_sg_request *io) { unsigned long flags; int i, retval; spin_lock_irqsave(&io->lock, flags); if (io->status || io->count == 0) { spin_unlock_irqrestore(&io->lock, flags); return; } /* shut everything down */ io->status = -ECONNRESET; io->count++; /* Keep the request alive until we're done */ spin_unlock_irqrestore(&io->lock, flags); for (i = io->entries - 1; i >= 0; --i) { usb_block_urb(io->urbs[i]); retval = usb_unlink_urb(io->urbs[i]); if (retval != -EINPROGRESS && retval != -ENODEV && retval != -EBUSY && retval != -EIDRM) dev_warn(&io->dev->dev, "%s, unlink --> %d\n", __func__, retval); } spin_lock_irqsave(&io->lock, flags); io->count--; if (!io->count) complete(&io->complete); spin_unlock_irqrestore(&io->lock, flags); } EXPORT_SYMBOL_GPL(usb_sg_cancel); /*-------------------------------------------------------------------*/ /** * usb_get_descriptor - issues a generic GET_DESCRIPTOR request * @dev: the device whose descriptor is being retrieved * @type: the descriptor type (USB_DT_*) * @index: the number of the descriptor * @buf: where to put the descriptor * @size: how big is "buf"? * * Context: task context, might sleep. * * Gets a USB descriptor. Convenience functions exist to simplify * getting some types of descriptors. Use * usb_get_string() or usb_string() for USB_DT_STRING. * Device (USB_DT_DEVICE) and configuration descriptors (USB_DT_CONFIG) * are part of the device structure. * In addition to a number of USB-standard descriptors, some * devices also use class-specific or vendor-specific descriptors. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ int usb_get_descriptor(struct usb_device *dev, unsigned char type, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; memset(buf, 0, size); /* Make sure we parse really received data */ for (i = 0; i < 3; ++i) { /* retry on length 0 or error; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (type << 8) + index, 0, buf, size, USB_CTRL_GET_TIMEOUT); if (result <= 0 && result != -ETIMEDOUT) continue; if (result > 1 && ((u8 *)buf)[1] != type) { result = -ENODATA; continue; } break; } return result; } EXPORT_SYMBOL_GPL(usb_get_descriptor); /** * usb_get_string - gets a string descriptor * @dev: the device whose string descriptor is being retrieved * @langid: code for language chosen (from string descriptor zero) * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * Retrieves a string, encoded using UTF-16LE (Unicode, 16 bits per character, * in little-endian byte order). * The usb_string() function will often be a convenient way to turn * these strings into kernel-printable form. * * Strings may be referenced in device, configuration, interface, or other * descriptors, and could also be used in vendor-specific ways. * * This call is synchronous, and may not be used in an interrupt context. * * Return: The number of bytes received on success, or else the status code * returned by the underlying usb_control_msg() call. */ static int usb_get_string(struct usb_device *dev, unsigned short langid, unsigned char index, void *buf, int size) { int i; int result; if (size <= 0) /* No point in asking for no data */ return -EINVAL; for (i = 0; i < 3; ++i) { /* retry on length 0 or stall; some devices are flakey */ result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, (USB_DT_STRING << 8) + index, langid, buf, size, USB_CTRL_GET_TIMEOUT); if (result == 0 || result == -EPIPE) continue; if (result > 1 && ((u8 *) buf)[1] != USB_DT_STRING) { result = -ENODATA; continue; } break; } return result; } static void usb_try_string_workarounds(unsigned char *buf, int *length) { int newlength, oldlength = *length; for (newlength = 2; newlength + 1 < oldlength; newlength += 2) if (!isprint(buf[newlength]) || buf[newlength + 1]) break; if (newlength > 2) { buf[0] = newlength; *length = newlength; } } static int usb_string_sub(struct usb_device *dev, unsigned int langid, unsigned int index, unsigned char *buf) { int rc; /* Try to read the string descriptor by asking for the maximum * possible number of bytes */ if (dev->quirks & USB_QUIRK_STRING_FETCH_255) rc = -EIO; else rc = usb_get_string(dev, langid, index, buf, 255); /* If that failed try to read the descriptor length, then * ask for just that many bytes */ if (rc < 2) { rc = usb_get_string(dev, langid, index, buf, 2); if (rc == 2) rc = usb_get_string(dev, langid, index, buf, buf[0]); } if (rc >= 2) { if (!buf[0] && !buf[1]) usb_try_string_workarounds(buf, &rc); /* There might be extra junk at the end of the descriptor */ if (buf[0] < rc) rc = buf[0]; rc = rc - (rc & 1); /* force a multiple of two */ } if (rc < 2) rc = (rc < 0 ? rc : -EINVAL); return rc; } static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf) { int err; if (dev->have_langid) return 0; if (dev->string_langid < 0) return -EPIPE; err = usb_string_sub(dev, 0, 0, tbuf); /* If the string was reported but is malformed, default to english * (0x0409) */ if (err == -ENODATA || (err > 0 && err < 4)) { dev->string_langid = 0x0409; dev->have_langid = 1; dev_err(&dev->dev, "language id specifier not provided by device, defaulting to English\n"); return 0; } /* In case of all other errors, we assume the device is not able to * deal with strings at all. Set string_langid to -1 in order to * prevent any string to be retrieved from the device */ if (err < 0) { dev_info(&dev->dev, "string descriptor 0 read error: %d\n", err); dev->string_langid = -1; return -EPIPE; } /* always use the first langid listed */ dev->string_langid = tbuf[2] | (tbuf[3] << 8); dev->have_langid = 1; dev_dbg(&dev->dev, "default language 0x%04x\n", dev->string_langid); return 0; } /** * usb_string - returns UTF-8 version of a string descriptor * @dev: the device whose string descriptor is being retrieved * @index: the number of the descriptor * @buf: where to put the string * @size: how big is "buf"? * * Context: task context, might sleep. * * This converts the UTF-16LE encoded strings returned by devices, from * usb_get_string_descriptor(), to null-terminated UTF-8 encoded ones * that are more usable in most kernel contexts. Note that this function * chooses strings in the first language supported by the device. * * This call is synchronous, and may not be used in an interrupt context. * * Return: length of the string (>= 0) or usb_control_msg status (< 0). */ int usb_string(struct usb_device *dev, int index, char *buf, size_t size) { unsigned char *tbuf; int err; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; if (size <= 0 || !buf) return -EINVAL; buf[0] = 0; if (index <= 0 || index >= 256) return -EINVAL; tbuf = kmalloc(256, GFP_NOIO); if (!tbuf) return -ENOMEM; err = usb_get_langid(dev, tbuf); if (err < 0) goto errout; err = usb_string_sub(dev, dev->string_langid, index, tbuf); if (err < 0) goto errout; size--; /* leave room for trailing NULL char in output buffer */ err = utf16s_to_utf8s((wchar_t *) &tbuf[2], (err - 2) / 2, UTF16_LITTLE_ENDIAN, buf, size); buf[err] = 0; if (tbuf[1] != USB_DT_STRING) dev_dbg(&dev->dev, "wrong descriptor type %02x for string %d (\"%s\")\n", tbuf[1], index, buf); errout: kfree(tbuf); return err; } EXPORT_SYMBOL_GPL(usb_string); /* one UTF-8-encoded 16-bit character has at most three bytes */ #define MAX_USB_STRING_SIZE (127 * 3 + 1) /** * usb_cache_string - read a string descriptor and cache it for later use * @udev: the device whose string descriptor is being read * @index: the descriptor index * * Return: A pointer to a kmalloc'ed buffer containing the descriptor string, * or %NULL if the index is 0 or the string could not be read. */ char *usb_cache_string(struct usb_device *udev, int index) { char *buf; char *smallbuf = NULL; int len; if (index <= 0) return NULL; buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); if (buf) { len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); if (len > 0) { smallbuf = kmalloc(++len, GFP_NOIO); if (!smallbuf) return buf; memcpy(smallbuf, buf, len); } kfree(buf); } return smallbuf; } /* * usb_get_device_descriptor - read the device descriptor * @udev: the device whose device descriptor should be read * * Context: task context, might sleep. * * Not exported, only for use by the core. If drivers really want to read * the device descriptor directly, they can call usb_get_descriptor() with * type = USB_DT_DEVICE and index = 0. * * Returns: a pointer to a dynamically allocated usb_device_descriptor * structure (which the caller must deallocate), or an ERR_PTR value. */ struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev) { struct usb_device_descriptor *desc; int ret; desc = kmalloc(sizeof(*desc), GFP_NOIO); if (!desc) return ERR_PTR(-ENOMEM); ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc)); if (ret == sizeof(*desc)) return desc; if (ret >= 0) ret = -EMSGSIZE; kfree(desc); return ERR_PTR(ret); } /* * usb_set_isoch_delay - informs the device of the packet transmit delay * @dev: the device whose delay is to be informed * Context: task context, might sleep * * Since this is an optional request, we don't bother if it fails. */ int usb_set_isoch_delay(struct usb_device *dev) { /* skip hub devices */ if (dev->descriptor.bDeviceClass == USB_CLASS_HUB) return 0; /* skip non-SS/non-SSP devices */ if (dev->speed < USB_SPEED_SUPER) return 0; return usb_control_msg_send(dev, 0, USB_REQ_SET_ISOCH_DELAY, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, dev->hub_delay, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); } /** * usb_get_status - issues a GET_STATUS call * @dev: the device whose status is being checked * @recip: USB_RECIP_*; for device, interface, or endpoint * @type: USB_STATUS_TYPE_*; for standard or PTM status types * @target: zero (for device), else interface or endpoint number * @data: pointer to two bytes of bitmap data * * Context: task context, might sleep. * * Returns device, interface, or endpoint status. Normally only of * interest to see if the device is self powered, or has enabled the * remote wakeup facility; or whether a bulk or interrupt endpoint * is halted ("stalled"). * * Bits in these status bitmaps are set using the SET_FEATURE request, * and cleared using the CLEAR_FEATURE request. The usb_clear_halt() * function should be used to clear halt ("stall") status. * * This call is synchronous, and may not be used in an interrupt context. * * Returns 0 and the status value in *@data (in host byte order) on success, * or else the status code from the underlying usb_control_msg() call. */ int usb_get_status(struct usb_device *dev, int recip, int type, int target, void *data) { int ret; void *status; int length; switch (type) { case USB_STATUS_TYPE_STANDARD: length = 2; break; case USB_STATUS_TYPE_PTM: if (recip != USB_RECIP_DEVICE) return -EINVAL; length = 4; break; default: return -EINVAL; } status = kmalloc(length, GFP_KERNEL); if (!status) return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_STATUS, USB_DIR_IN | recip, USB_STATUS_TYPE_STANDARD, target, status, length, USB_CTRL_GET_TIMEOUT); switch (ret) { case 4: if (type != USB_STATUS_TYPE_PTM) { ret = -EIO; break; } *(u32 *) data = le32_to_cpu(*(__le32 *) status); ret = 0; break; case 2: if (type != USB_STATUS_TYPE_STANDARD) { ret = -EIO; break; } *(u16 *) data = le16_to_cpu(*(__le16 *) status); ret = 0; break; default: ret = -EIO; } kfree(status); return ret; } EXPORT_SYMBOL_GPL(usb_get_status); /** * usb_clear_halt - tells device to clear endpoint halt/stall condition * @dev: device whose endpoint is halted * @pipe: endpoint "pipe" being cleared * * Context: task context, might sleep. * * This is used to clear halt conditions for bulk and interrupt endpoints, * as reported by URB completion status. Endpoints that are halted are * sometimes referred to as being "stalled". Such endpoints are unable * to transmit or receive data until the halt status is cleared. Any URBs * queued for such an endpoint should normally be unlinked by the driver * before clearing the halt condition, as described in sections 5.7.5 * and 5.8.5 of the USB 2.0 spec. * * Note that control and isochronous endpoints don't halt, although control * endpoints report "protocol stall" (for unsupported requests) using the * same status code used to report a true stall. * * This call is synchronous, and may not be used in an interrupt context. * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_clear_halt(struct usb_device *dev, int pipe) { int result; int endp = usb_pipeendpoint(pipe); if (usb_pipein(pipe)) endp |= USB_DIR_IN; /* we don't care if it wasn't halted first. in fact some devices * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ result = usb_control_msg_send(dev, 0, USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, USB_ENDPOINT_HALT, endp, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); /* don't un-halt or force to DATA0 except on success */ if (result) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying * the clear "took", so some devices could lock up if you check... * such as the Hagiwara FlashGate DUAL. So we won't bother. * * NOTE: make sure the logic here doesn't diverge much from * the copy in usb-storage, for as long as we need two copies. */ usb_reset_endpoint(dev, endp); return 0; } EXPORT_SYMBOL_GPL(usb_clear_halt); static int create_intf_ep_devs(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_interface *alt = intf->cur_altsetting; int i; if (intf->ep_devs_created || intf->unregistering) return 0; for (i = 0; i < alt->desc.bNumEndpoints; ++i) (void) usb_create_ep_devs(&intf->dev, &alt->endpoint[i], udev); intf->ep_devs_created = 1; return 0; } static void remove_intf_ep_devs(struct usb_interface *intf) { struct usb_host_interface *alt = intf->cur_altsetting; int i; if (!intf->ep_devs_created) return; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_remove_ep_devs(&alt->endpoint[i]); intf->ep_devs_created = 0; } /** * usb_disable_endpoint -- Disable an endpoint by address * @dev: the device whose endpoint is being disabled * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables the endpoint for URB submission and nukes all pending URBs. * If @reset_hardware is set then also deallocates hcd/hardware state * for the endpoint. */ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr, bool reset_hardware) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (!dev) return; if (usb_endpoint_out(epaddr)) { ep = dev->ep_out[epnum]; if (reset_hardware && epnum != 0) dev->ep_out[epnum] = NULL; } else { ep = dev->ep_in[epnum]; if (reset_hardware && epnum != 0) dev->ep_in[epnum] = NULL; } if (ep) { ep->enabled = 0; usb_hcd_flush_endpoint(dev, ep); if (reset_hardware) usb_hcd_disable_endpoint(dev, ep); } } /** * usb_reset_endpoint - Reset an endpoint's state. * @dev: the device whose endpoint is to be reset * @epaddr: the endpoint's address. Endpoint number for output, * endpoint number + USB_DIR_IN for input * * Resets any host-side endpoint state such as the toggle bit, * sequence number or current window. */ void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr) { unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK; struct usb_host_endpoint *ep; if (usb_endpoint_out(epaddr)) ep = dev->ep_out[epnum]; else ep = dev->ep_in[epnum]; if (ep) usb_hcd_reset_endpoint(dev, ep); } EXPORT_SYMBOL_GPL(usb_reset_endpoint); /** * usb_disable_interface -- Disable all endpoints for an interface * @dev: the device whose interface is being disabled * @intf: pointer to the interface descriptor * @reset_hardware: flag to erase any endpoint state stored in the * controller hardware * * Disables all the endpoints for the interface's current altsetting. */ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_hardware) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) { usb_disable_endpoint(dev, alt->endpoint[i].desc.bEndpointAddress, reset_hardware); } } /* * usb_disable_device_endpoints -- Disable all endpoints for a device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. */ static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0) { struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i; if (hcd->driver->check_bandwidth) { /* First pass: Cancel URBs, leave endpoint pointers intact. */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, false); usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); mutex_unlock(hcd->bandwidth_mutex); } /* Second pass: remove endpoint pointers */ for (i = skip_ep0; i < 16; ++i) { usb_disable_endpoint(dev, i, true); usb_disable_endpoint(dev, i + USB_DIR_IN, true); } } /** * usb_disable_device - Disable all the endpoints for a USB device * @dev: the device whose endpoints are being disabled * @skip_ep0: 0 to disable endpoint 0, 1 to skip it. * * Disables all the device's endpoints, potentially including endpoint 0. * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ if (dev->actconfig) { /* * FIXME: In order to avoid self-deadlock involving the * bandwidth_mutex, we have to mark all the interfaces * before unregistering any of them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) dev->actconfig->interface[i]->unregistering = 1; for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { struct usb_interface *interface; /* remove this interface if it has been registered */ interface = dev->actconfig->interface[i]; if (!device_is_registered(&interface->dev)) continue; dev_dbg(&dev->dev, "unregistering interface %s\n", dev_name(&interface->dev)); remove_intf_ep_devs(interface); device_del(&interface->dev); } /* Now that the interfaces are unbound, nobody should * try to access them. */ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) { put_device(&dev->actconfig->interface[i]->dev); dev->actconfig->interface[i] = NULL; } usb_disable_usb2_hardware_lpm(dev); usb_unlocked_disable_lpm(dev); usb_disable_ltm(dev); dev->actconfig = NULL; if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, skip_ep0 ? "non-ep0" : "all"); usb_disable_device_endpoints(dev, skip_ep0); } /** * usb_enable_endpoint - Enable an endpoint for USB communications * @dev: the device whose interface is being enabled * @ep: the endpoint * @reset_ep: flag to reset the endpoint state * * Resets the endpoint state if asked, and sets dev->ep_{in,out} pointers. * For control endpoints, both the input and output sides are handled. */ void usb_enable_endpoint(struct usb_device *dev, struct usb_host_endpoint *ep, bool reset_ep) { int epnum = usb_endpoint_num(&ep->desc); int is_out = usb_endpoint_dir_out(&ep->desc); int is_control = usb_endpoint_xfer_control(&ep->desc); if (reset_ep) usb_hcd_reset_endpoint(dev, ep); if (is_out || is_control) dev->ep_out[epnum] = ep; if (!is_out || is_control) dev->ep_in[epnum] = ep; ep->enabled = 1; } /** * usb_enable_interface - Enable all the endpoints for an interface * @dev: the device whose interface is being enabled * @intf: pointer to the interface descriptor * @reset_eps: flag to reset the endpoints' state * * Enables all the endpoints for the interface's current altsetting. */ void usb_enable_interface(struct usb_device *dev, struct usb_interface *intf, bool reset_eps) { struct usb_host_interface *alt = intf->cur_altsetting; int i; for (i = 0; i < alt->desc.bNumEndpoints; ++i) usb_enable_endpoint(dev, &alt->endpoint[i], reset_eps); } /** * usb_set_interface - Makes a particular alternate setting be current * @dev: the device whose interface is being updated * @interface: the interface being updated * @alternate: the setting being chosen. * * Context: task context, might sleep. * * This is used to enable data transfers on interfaces that may not * be enabled by default. Not all devices support such configurability. * Only the driver bound to an interface may change its setting. * * Within any given configuration, each interface may have several * alternative settings. These are often used to control levels of * bandwidth consumption. For example, the default setting for a high * speed interrupt endpoint may not send more than 64 bytes per microframe, * while interrupt transfers of up to 3KBytes per microframe are legal. * Also, isochronous endpoints may never be part of an * interface's default setting. To access such bandwidth, alternate * interface settings must be made current. * * Note that in the Linux USB subsystem, bandwidth associated with * an endpoint in a given alternate setting is not reserved until an URB * is submitted that needs that bandwidth. Some other operating systems * allocate bandwidth early, when a configuration is chosen. * * xHCI reserves bandwidth and configures the alternate setting in * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting * may be disabled. Drivers cannot rely on any particular alternate * setting being in effect after a failure. * * This call is synchronous, and may not be used in an interrupt context. * Also, drivers must not change altsettings while urbs are scheduled for * endpoints in that interface; all such urbs must first be completed * (perhaps forced by unlinking). * * Return: Zero on success, or else the status code returned by the * underlying usb_control_msg() call. */ int usb_set_interface(struct usb_device *dev, int interface, int alternate) { struct usb_interface *iface; struct usb_host_interface *alt; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int i, ret, manual = 0; unsigned int epaddr; unsigned int pipe; if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; iface = usb_ifnum_to_if(dev, interface); if (!iface) { dev_dbg(&dev->dev, "selecting invalid interface %d\n", interface); return -EINVAL; } if (iface->unregistering) return -ENODEV; alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } /* * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth, * including freeing dropped endpoint ring buffers. * Make sure the interface endpoints are flushed before that */ usb_disable_interface(dev, iface, false); /* Make sure we have enough bandwidth for this alternate interface. * Remove the current alt setting and add the new alt setting. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new alt setting is installed, * so that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&iface->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* Changing alt-setting also frees any allocated streams */ for (i = 0; i < iface->cur_altsetting->desc.bNumEndpoints; i++) iface->cur_altsetting->endpoint[i].streams = 0; ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt); if (ret < 0) { dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n", alternate); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else ret = usb_control_msg_send(dev, 0, USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, alternate, interface, NULL, 0, 5000, GFP_NOIO); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. */ if (ret == -EPIPE && iface->num_altsetting == 1) { dev_dbg(&dev->dev, "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; } else if (ret) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return ret; } mutex_unlock(hcd->bandwidth_mutex); /* FIXME drivers shouldn't need to replicate/bugfix the logic here * when they implement async or easily-killable versions of this or * other "should-be-internal" functions (like clear_halt). * should hcd+usbcore postprocess control requests? */ /* prevent submissions using previous endpoint settings */ if (iface->cur_altsetting != alt) { remove_intf_ep_devs(iface); usb_remove_sysfs_intf_files(iface); } usb_disable_interface(dev, iface, true); iface->cur_altsetting = alt; /* Now that the interface is installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* If the interface only has one altsetting and the device didn't * accept the request, we attempt to carry out the equivalent action * by manually clearing the HALT feature for each endpoint in the * new altsetting. */ if (manual) { for (i = 0; i < alt->desc.bNumEndpoints; i++) { epaddr = alt->endpoint[i].desc.bEndpointAddress; pipe = __create_pipe(dev, USB_ENDPOINT_NUMBER_MASK & epaddr) | (usb_endpoint_out(epaddr) ? USB_DIR_OUT : USB_DIR_IN); usb_clear_halt(dev, pipe); } } /* 9.1.1.5: reset toggles for all endpoints in the new altsetting * * Note: * Despite EP0 is always present in all interfaces/AS, the list of * endpoints from the descriptor does not contain EP0. Due to its * omnipresence one might expect EP0 being considered "affected" by * any SetInterface request and hence assume toggles need to be reset. * However, EP0 toggles are re-synced for every individual transfer * during the SETUP stage - hence EP0 toggles are "don't care" here. * (Likewise, EP0 never "halts" on well designed devices.) */ usb_enable_interface(dev, iface, true); if (device_is_registered(&iface->dev)) { usb_create_sysfs_intf_files(iface); create_intf_ep_devs(iface); } return 0; } EXPORT_SYMBOL_GPL(usb_set_interface); /** * usb_reset_configuration - lightweight device reset * @dev: the device whose configuration is being reset * * This issues a standard SET_CONFIGURATION request to the device using * the current configuration. The effect is to reset most USB-related * state in the device, including interface altsettings (reset to zero), * endpoint halts (cleared), and endpoint state (only for bulk and interrupt * endpoints). Other usbcore state is unchanged, including bindings of * usb device drivers to interfaces. * * Because this affects multiple interfaces, avoid using this with composite * (multi-interface) devices. Instead, the driver for each interface may * use usb_set_interface() on the interfaces it claims. Be careful though; * some devices don't support the SET_INTERFACE request, and others won't * reset all the interface state (notably endpoint state). Resetting the whole * configuration would affect other drivers' interfaces. * * The caller must own the device lock. * * Return: Zero on success, else a negative error code. * * If this routine fails the device will probably be in an unusable state * with endpoints disabled, and interfaces only partially enabled. */ int usb_reset_configuration(struct usb_device *dev) { int i, retval; struct usb_host_config *config; struct usb_hcd *hcd = bus_to_hcd(dev->bus); if (dev->state == USB_STATE_SUSPENDED) return -EHOSTUNREACH; /* caller must have locked the device and must own * the usb bus readlock (so driver bindings are stable); * calls during probe() are fine */ usb_disable_device_endpoints(dev, 1); /* skip ep0*/ config = dev->actconfig; retval = 0; mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the configuration is reset, so * that the xHCI driver can recalculate the U1/U2 timeouts. */ if (usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); return -ENOMEM; } /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL); if (retval < 0) { usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, config->desc.bConfigurationValue, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (retval) { usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); return retval; } mutex_unlock(hcd->bandwidth_mutex); /* re-init hc/hcd interface/endpoint state */ for (i = 0; i < config->desc.bNumInterfaces; i++) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; if (alt != intf->cur_altsetting) { remove_intf_ep_devs(intf); usb_remove_sysfs_intf_files(intf); } intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); if (device_is_registered(&intf->dev)) { usb_create_sysfs_intf_files(intf); create_intf_ep_devs(intf); } } /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); return 0; } EXPORT_SYMBOL_GPL(usb_reset_configuration); static void usb_release_interface(struct device *dev) { struct usb_interface *intf = to_usb_interface(dev); struct usb_interface_cache *intfc = altsetting_to_usb_interface_cache(intf->altsetting); kref_put(&intfc->ref, usb_release_interface_cache); usb_put_dev(interface_to_usbdev(intf)); of_node_put(dev->of_node); kfree(intf); } /* * usb_deauthorize_interface - deauthorize an USB interface * * @intf: USB interface structure */ void usb_deauthorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; device_lock(dev->parent); if (intf->authorized) { device_lock(dev); intf->authorized = 0; device_unlock(dev); usb_forced_unbind_intf(intf); } device_unlock(dev->parent); } /* * usb_authorize_interface - authorize an USB interface * * @intf: USB interface structure */ void usb_authorize_interface(struct usb_interface *intf) { struct device *dev = &intf->dev; if (!intf->authorized) { device_lock(dev); intf->authorized = 1; /* authorize interface */ device_unlock(dev); } } static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_device *usb_dev; struct usb_interface *intf; struct usb_host_interface *alt; intf = to_usb_interface(dev); usb_dev = interface_to_usbdev(intf); alt = intf->cur_altsetting; if (add_uevent_var(env, "INTERFACE=%d/%d/%d", alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol)) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=usb:" "v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02Xic%02Xisc%02Xip%02Xin%02X", le16_to_cpu(usb_dev->descriptor.idVendor), le16_to_cpu(usb_dev->descriptor.idProduct), le16_to_cpu(usb_dev->descriptor.bcdDevice), usb_dev->descriptor.bDeviceClass, usb_dev->descriptor.bDeviceSubClass, usb_dev->descriptor.bDeviceProtocol, alt->desc.bInterfaceClass, alt->desc.bInterfaceSubClass, alt->desc.bInterfaceProtocol, alt->desc.bInterfaceNumber)) return -ENOMEM; return 0; } struct device_type usb_if_device_type = { .name = "usb_interface", .release = usb_release_interface, .uevent = usb_if_uevent, }; static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev, struct usb_host_config *config, u8 inum) { struct usb_interface_assoc_descriptor *retval = NULL; struct usb_interface_assoc_descriptor *intf_assoc; int first_intf; int last_intf; int i; for (i = 0; (i < USB_MAXIADS && config->intf_assoc[i]); i++) { intf_assoc = config->intf_assoc[i]; if (intf_assoc->bInterfaceCount == 0) continue; first_intf = intf_assoc->bFirstInterface; last_intf = first_intf + (intf_assoc->bInterfaceCount - 1); if (inum >= first_intf && inum <= last_intf) { if (!retval) retval = intf_assoc; else dev_err(&dev->dev, "Interface #%d referenced" " by multiple IADs\n", inum); } } return retval; } /* * Internal function to queue a device reset * See usb_queue_reset_device() for more details */ static void __usb_queue_reset_device(struct work_struct *ws) { int rc; struct usb_interface *iface = container_of(ws, struct usb_interface, reset_ws); struct usb_device *udev = interface_to_usbdev(iface); rc = usb_lock_device_for_reset(udev, iface); if (rc >= 0) { usb_reset_device(udev); usb_unlock_device(udev); } usb_put_intf(iface); /* Undo _get_ in usb_queue_reset_device() */ } /* * usb_set_configuration - Makes a particular device setting be current * @dev: the device whose configuration is being updated * @configuration: the configuration being chosen. * * Context: task context, might sleep. Caller holds device lock. * * This is used to enable non-default device modes. Not all devices * use this kind of configurability; many devices only have one * configuration. * * @configuration is the value of the configuration to be installed. * According to the USB spec (e.g. section 9.1.1.5), configuration values * must be non-zero; a value of zero indicates that the device in * unconfigured. However some devices erroneously use 0 as one of their * configuration values. To help manage such devices, this routine will * accept @configuration = -1 as indicating the device should be put in * an unconfigured state. * * USB device configurations may affect Linux interoperability, * power consumption and the functionality available. For example, * the default configuration is limited to using 100mA of bus power, * so that when certain device functionality requires more power, * and the device is bus powered, that functionality should be in some * non-default device configuration. Other device modes may also be * reflected as configuration options, such as whether two ISDN * channels are available independently; and choosing between open * standard device protocols (like CDC) or proprietary ones. * * Note that a non-authorized device (dev->authorized == 0) will only * be put in unconfigured mode. * * Note that USB has an additional level of device configurability, * associated with interfaces. That configurability is accessed using * usb_set_interface(). * * This call is synchronous. The calling context must be able to sleep, * must own the device lock, and must not hold the driver model's USB * bus mutex; usb interface driver probe() methods cannot use this routine. * * Returns zero on success, or else the status code returned by the * underlying call that failed. On successful completion, each interface * in the original device configuration has been destroyed, and each one * in the new configuration has been probed by all relevant usb device * drivers currently known to the kernel. */ int usb_set_configuration(struct usb_device *dev, int configuration) { int i, ret; struct usb_host_config *cp = NULL; struct usb_interface **new_interfaces = NULL; struct usb_hcd *hcd = bus_to_hcd(dev->bus); int n, nintf; if (dev->authorized == 0 || configuration == -1) configuration = 0; else { for (i = 0; i < dev->descriptor.bNumConfigurations; i++) { if (dev->config[i].desc.bConfigurationValue == configuration) { cp = &dev->config[i]; break; } } } if ((!cp && configuration != 0)) return -EINVAL; /* The USB spec says configuration 0 means unconfigured. * But if a device includes a configuration numbered 0, * we will accept it as a correctly configured state. * Use -1 if you really want to unconfigure the device. */ if (cp && configuration == 0) dev_warn(&dev->dev, "config 0 descriptor??\n"); /* Allocate memory for new interfaces before doing anything else, * so that if we run out then nothing will have changed. */ n = nintf = 0; if (cp) { nintf = cp->desc.bNumInterfaces; new_interfaces = kmalloc_array(nintf, sizeof(*new_interfaces), GFP_NOIO); if (!new_interfaces) return -ENOMEM; for (; n < nintf; ++n) { new_interfaces[n] = kzalloc( sizeof(struct usb_interface), GFP_NOIO); if (!new_interfaces[n]) { ret = -ENOMEM; free_interfaces: while (--n >= 0) kfree(new_interfaces[n]); kfree(new_interfaces); return ret; } } i = dev->bus_mA - usb_get_max_power(dev, cp); if (i < 0) dev_warn(&dev->dev, "new config #%d exceeds power " "limit by %dmA\n", configuration, -i); } /* Wake up the device so we can send it the Set-Config request */ ret = usb_autoresume_device(dev); if (ret) goto free_interfaces; /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ /* Get rid of pending async Set-Config requests for this device */ cancel_async_set_config(dev); /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ mutex_lock(hcd->bandwidth_mutex); /* Disable LPM, and re-enable it once the new configuration is * installed, so that the xHCI driver can recalculate the U1/U2 * timeouts. */ if (dev->actconfig && usb_disable_lpm(dev)) { dev_err(&dev->dev, "%s Failed to disable LPM\n", __func__); mutex_unlock(hcd->bandwidth_mutex); ret = -ENOMEM; goto free_interfaces; } ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { if (dev->actconfig) usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); usb_autosuspend_device(dev); goto free_interfaces; } /* * Initialize the new interface structures and the * hc/hcd/usbcore interface/endpoint state. */ for (i = 0; i < nintf; ++i) { struct usb_interface_cache *intfc; struct usb_interface *intf; struct usb_host_interface *alt; u8 ifnum; cp->interface[i] = intf = new_interfaces[i]; intfc = cp->intf_cache[i]; intf->altsetting = intfc->altsetting; intf->num_altsetting = intfc->num_altsetting; intf->authorized = !!HCD_INTF_AUTHORIZED(hcd); kref_get(&intfc->ref); alt = usb_altnum_to_altsetting(intf, 0); /* No altsetting 0? We'll assume the first altsetting. * We could use a GetInterface call, but if a device is * so non-compliant that it doesn't have altsetting 0 * then I wouldn't trust its reply anyway. */ if (!alt) alt = &intf->altsetting[0]; ifnum = alt->desc.bInterfaceNumber; intf->intf_assoc = find_iad(dev, cp, ifnum); intf->cur_altsetting = alt; usb_enable_interface(dev, intf, true); intf->dev.parent = &dev->dev; if (usb_of_has_combined_node(dev)) { device_set_of_node_from_dev(&intf->dev, &dev->dev); } else { intf->dev.of_node = usb_of_get_interface_node(dev, configuration, ifnum); } ACPI_COMPANION_SET(&intf->dev, ACPI_COMPANION(&dev->dev)); intf->dev.driver = NULL; intf->dev.bus = &usb_bus_type; intf->dev.type = &usb_if_device_type; intf->dev.groups = usb_interface_groups; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); intf->minor = -1; device_initialize(&intf->dev); pm_runtime_no_callbacks(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, ifnum); usb_get_dev(dev); } kfree(new_interfaces); ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_NOIO); if (ret && cp) { /* * All the old state is gone, so what else can we do? * The device is probably useless now anyway. */ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); for (i = 0; i < nintf; ++i) { usb_disable_interface(dev, cp->interface[i], true); put_device(&cp->interface[i]->dev); cp->interface[i] = NULL; } cp = NULL; } dev->actconfig = cp; mutex_unlock(hcd->bandwidth_mutex); if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); /* Leave LPM disabled while the device is unconfigured. */ usb_autosuspend_device(dev); return ret; } usb_set_device_state(dev, USB_STATE_CONFIGURED); if (cp->string == NULL && !(dev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS)) cp->string = usb_cache_string(dev, cp->desc.iConfiguration); /* Now that the interfaces are installed, re-enable LPM. */ usb_unlocked_enable_lpm(dev); /* Enable LTM if it was turned off by usb_disable_device. */ usb_enable_ltm(dev); /* Now that all the interfaces are set up, register them * to trigger binding of drivers to interfaces. probe() * routines may install different altsettings and may * claim() any interfaces not yet bound. Many class drivers * need that: CDC, audio, video, etc. */ for (i = 0; i < nintf; ++i) { struct usb_interface *intf = cp->interface[i]; if (intf->dev.of_node && !of_device_is_available(intf->dev.of_node)) { dev_info(&dev->dev, "skipping disabled interface %d\n", intf->cur_altsetting->desc.bInterfaceNumber); continue; } dev_dbg(&dev->dev, "adding %s (config #%d, interface %d)\n", dev_name(&intf->dev), configuration, intf->cur_altsetting->desc.bInterfaceNumber); device_enable_async_suspend(&intf->dev); ret = device_add(&intf->dev); if (ret != 0) { dev_err(&dev->dev, "device_add(%s) --> %d\n", dev_name(&intf->dev), ret); continue; } create_intf_ep_devs(intf); } usb_autosuspend_device(dev); return 0; } EXPORT_SYMBOL_GPL(usb_set_configuration); static LIST_HEAD(set_config_list); static DEFINE_SPINLOCK(set_config_lock); struct set_config_request { struct usb_device *udev; int config; struct work_struct work; struct list_head node; }; /* Worker routine for usb_driver_set_configuration() */ static void driver_set_config_work(struct work_struct *work) { struct set_config_request *req = container_of(work, struct set_config_request, work); struct usb_device *udev = req->udev; usb_lock_device(udev); spin_lock(&set_config_lock); list_del(&req->node); spin_unlock(&set_config_lock); if (req->config >= -1) /* Is req still valid? */ usb_set_configuration(udev, req->config); usb_unlock_device(udev); usb_put_dev(udev); kfree(req); } /* Cancel pending Set-Config requests for a device whose configuration * was just changed */ static void cancel_async_set_config(struct usb_device *udev) { struct set_config_request *req; spin_lock(&set_config_lock); list_for_each_entry(req, &set_config_list, node) { if (req->udev == udev) req->config = -999; /* Mark as cancelled */ } spin_unlock(&set_config_lock); } /** * usb_driver_set_configuration - Provide a way for drivers to change device configurations * @udev: the device whose configuration is being updated * @config: the configuration being chosen. * Context: In process context, must be able to sleep * * Device interface drivers are not allowed to change device configurations. * This is because changing configurations will destroy the interface the * driver is bound to and create new ones; it would be like a floppy-disk * driver telling the computer to replace the floppy-disk drive with a * tape drive! * * Still, in certain specialized circumstances the need may arise. This * routine gets around the normal restrictions by using a work thread to * submit the change-config request. * * Return: 0 if the request was successfully queued, error code otherwise. * The caller has no way to know whether the queued request will eventually * succeed. */ int usb_driver_set_configuration(struct usb_device *udev, int config) { struct set_config_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; req->udev = udev; req->config = config; INIT_WORK(&req->work, driver_set_config_work); spin_lock(&set_config_lock); list_add(&req->node, &set_config_list); spin_unlock(&set_config_lock); usb_get_dev(udev); schedule_work(&req->work); return 0; } EXPORT_SYMBOL_GPL(usb_driver_set_configuration); /** * cdc_parse_cdc_header - parse the extra headers present in CDC devices * @hdr: the place to put the results of the parsing * @intf: the interface for which parsing is requested * @buffer: pointer to the extra headers to be parsed * @buflen: length of the extra headers * * This evaluates the extra headers present in CDC devices which * bind the interfaces for data and control and provide details * about the capabilities of the device. * * Return: number of descriptors parsed or -EINVAL * if the header is contradictory beyond salvage */ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, struct usb_interface *intf, u8 *buffer, int buflen) { /* duplicates are ignored */ struct usb_cdc_union_desc *union_header = NULL; /* duplicates are not tolerated */ struct usb_cdc_header_desc *header = NULL; struct usb_cdc_ether_desc *ether = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; struct usb_cdc_mdlm_desc *desc = NULL; unsigned int elength; int cnt = 0; memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); hdr->phonet_magic_present = false; while (buflen > 0) { elength = buffer[0]; if (!elength) { dev_err(&intf->dev, "skipping garbage byte\n"); elength = 1; goto next_desc; } if ((buflen < elength) || (elength < 3)) { dev_err(&intf->dev, "invalid descriptor buffer length\n"); break; } if (buffer[1] != USB_DT_CS_INTERFACE) { dev_err(&intf->dev, "skipping garbage\n"); goto next_desc; } switch (buffer[2]) { case USB_CDC_UNION_TYPE: /* we've found it */ if (elength < sizeof(struct usb_cdc_union_desc)) goto next_desc; if (union_header) { dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); goto next_desc; } union_header = (struct usb_cdc_union_desc *)buffer; break; case USB_CDC_COUNTRY_TYPE: if (elength < sizeof(struct usb_cdc_country_functional_desc)) goto next_desc; hdr->usb_cdc_country_functional_desc = (struct usb_cdc_country_functional_desc *)buffer; break; case USB_CDC_HEADER_TYPE: if (elength != sizeof(struct usb_cdc_header_desc)) goto next_desc; if (header) return -EINVAL; header = (struct usb_cdc_header_desc *)buffer; break; case USB_CDC_ACM_TYPE: if (elength < sizeof(struct usb_cdc_acm_descriptor)) goto next_desc; hdr->usb_cdc_acm_descriptor = (struct usb_cdc_acm_descriptor *)buffer; break; case USB_CDC_ETHERNET_TYPE: if (elength != sizeof(struct usb_cdc_ether_desc)) goto next_desc; if (ether) return -EINVAL; ether = (struct usb_cdc_ether_desc *)buffer; break; case USB_CDC_CALL_MANAGEMENT_TYPE: if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) goto next_desc; hdr->usb_cdc_call_mgmt_descriptor = (struct usb_cdc_call_mgmt_descriptor *)buffer; break; case USB_CDC_DMM_TYPE: if (elength < sizeof(struct usb_cdc_dmm_desc)) goto next_desc; hdr->usb_cdc_dmm_desc = (struct usb_cdc_dmm_desc *)buffer; break; case USB_CDC_MDLM_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_desc)) goto next_desc; if (desc) return -EINVAL; desc = (struct usb_cdc_mdlm_desc *)buffer; break; case USB_CDC_MDLM_DETAIL_TYPE: if (elength < sizeof(struct usb_cdc_mdlm_detail_desc)) goto next_desc; if (detail) return -EINVAL; detail = (struct usb_cdc_mdlm_detail_desc *)buffer; break; case USB_CDC_NCM_TYPE: if (elength < sizeof(struct usb_cdc_ncm_desc)) goto next_desc; hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; break; case USB_CDC_MBIM_TYPE: if (elength < sizeof(struct usb_cdc_mbim_desc)) goto next_desc; hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; break; case USB_CDC_MBIM_EXTENDED_TYPE: if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) break; hdr->usb_cdc_mbim_extended_desc = (struct usb_cdc_mbim_extended_desc *)buffer; break; case CDC_PHONET_MAGIC_NUMBER: hdr->phonet_magic_present = true; break; default: /* * there are LOTS more CDC descriptors that * could legitimately be found here. */ dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", buffer[2], elength); goto next_desc; } cnt++; next_desc: buflen -= elength; buffer += elength; } hdr->usb_cdc_union_desc = union_header; hdr->usb_cdc_header_desc = header; hdr->usb_cdc_mdlm_detail_desc = detail; hdr->usb_cdc_mdlm_desc = desc; hdr->usb_cdc_ether_desc = ether; return cnt; } EXPORT_SYMBOL(cdc_parse_cdc_header); |
11 11 11 11 11 9 9 7 37 33 33 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "queueing.h" #include <linux/skb_array.h> struct multicore_worker __percpu * wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) { int cpu; struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); if (!worker) return NULL; for_each_possible_cpu(cpu) { per_cpu_ptr(worker, cpu)->ptr = ptr; INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); } return worker; } int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, unsigned int len) { int ret; memset(queue, 0, sizeof(*queue)); queue->last_cpu = -1; ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); if (ret) return ret; queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); if (!queue->worker) { ptr_ring_cleanup(&queue->ring, NULL); return -ENOMEM; } return 0; } void wg_packet_queue_free(struct crypt_queue *queue, bool purge) { free_percpu(queue->worker); WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL); } #define NEXT(skb) ((skb)->prev) #define STUB(queue) ((struct sk_buff *)&queue->empty) void wg_prev_queue_init(struct prev_queue *queue) { NEXT(STUB(queue)) = NULL; queue->head = queue->tail = STUB(queue); queue->peeked = NULL; atomic_set(&queue->count, 0); BUILD_BUG_ON( offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - offsetof(struct prev_queue, empty) || offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - offsetof(struct prev_queue, empty)); } static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) { WRITE_ONCE(NEXT(skb), NULL); WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); } bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) { if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) return false; __wg_prev_queue_enqueue(queue, skb); return true; } struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) { struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); if (tail == STUB(queue)) { if (!next) return NULL; queue->tail = next; tail = next; next = smp_load_acquire(&NEXT(next)); } if (next) { queue->tail = next; atomic_dec(&queue->count); return tail; } if (tail != READ_ONCE(queue->head)) return NULL; __wg_prev_queue_enqueue(queue, STUB(queue)); next = smp_load_acquire(&NEXT(tail)); if (next) { queue->tail = next; atomic_dec(&queue->count); return tail; } return NULL; } #undef NEXT #undef STUB |
16 518 1 92 89 425 52 6 58 444 9 7 55 7 3 16 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 | /* SPDX-License-Identifier: GPL-2.0 */ /* IP Virtual Server * data structure and functionality definitions */ #ifndef _NET_IP_VS_H #define _NET_IP_VS_H #include <linux/ip_vs.h> /* definitions shared with userland */ #include <asm/types.h> /* for __uXX types */ #include <linux/list.h> /* for struct list_head */ #include <linux/spinlock.h> /* for struct rwlock_t */ #include <linux/atomic.h> /* for struct atomic_t */ #include <linux/refcount.h> /* for struct refcount_t */ #include <linux/workqueue.h> #include <linux/compiler.h> #include <linux/timer.h> #include <linux/bug.h> #include <net/checksum.h> #include <linux/netfilter.h> /* for union nf_inet_addr */ #include <linux/ip.h> #include <linux/ipv6.h> /* for struct ipv6hdr */ #include <net/ipv6.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <net/netfilter/nf_conntrack.h> #endif #include <net/net_namespace.h> /* Netw namespace */ #define IP_VS_HDR_INVERSE 1 #define IP_VS_HDR_ICMP 2 /* Generic access of ipvs struct */ static inline struct netns_ipvs *net_ipvs(struct net* net) { return net->ipvs; } /* Connections' size value needed by ip_vs_ctl.c */ extern int ip_vs_conn_tab_size; struct ip_vs_iphdr { int hdr_flags; /* ipvs flags */ __u32 off; /* Where IP or IPv4 header starts */ __u32 len; /* IPv4 simply where L4 starts * IPv6 where L4 Transport Header starts */ __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/ __s16 protocol; __s32 flags; union nf_inet_addr saddr; union nf_inet_addr daddr; }; static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, int len, void *buffer) { return skb_header_pointer(skb, offset, len, buffer); } /* This function handles filling *ip_vs_iphdr, both for IPv4 and IPv6. * IPv6 requires some extra work, as finding proper header position, * depend on the IPv6 extension headers. */ static inline int ip_vs_fill_iph_skb_off(int af, const struct sk_buff *skb, int offset, int hdr_flags, struct ip_vs_iphdr *iphdr) { iphdr->hdr_flags = hdr_flags; iphdr->off = offset; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { struct ipv6hdr _iph; const struct ipv6hdr *iph = skb_header_pointer( skb, offset, sizeof(_iph), &_iph); if (!iph) return 0; iphdr->saddr.in6 = iph->saddr; iphdr->daddr.in6 = iph->daddr; /* ipv6_find_hdr() updates len, flags */ iphdr->len = offset; iphdr->flags = 0; iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1, &iphdr->fragoffs, &iphdr->flags); if (iphdr->protocol < 0) return 0; } else #endif { struct iphdr _iph; const struct iphdr *iph = skb_header_pointer( skb, offset, sizeof(_iph), &_iph); if (!iph) return 0; iphdr->len = offset + iph->ihl * 4; iphdr->fragoffs = 0; iphdr->protocol = iph->protocol; iphdr->saddr.ip = iph->saddr; iphdr->daddr.ip = iph->daddr; } return 1; } static inline int ip_vs_fill_iph_skb_icmp(int af, const struct sk_buff *skb, int offset, bool inverse, struct ip_vs_iphdr *iphdr) { int hdr_flags = IP_VS_HDR_ICMP; if (inverse) hdr_flags |= IP_VS_HDR_INVERSE; return ip_vs_fill_iph_skb_off(af, skb, offset, hdr_flags, iphdr); } static inline int ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, bool inverse, struct ip_vs_iphdr *iphdr) { int hdr_flags = 0; if (inverse) hdr_flags |= IP_VS_HDR_INVERSE; return ip_vs_fill_iph_skb_off(af, skb, skb_network_offset(skb), hdr_flags, iphdr); } static inline bool ip_vs_iph_inverse(const struct ip_vs_iphdr *iph) { return !!(iph->hdr_flags & IP_VS_HDR_INVERSE); } static inline bool ip_vs_iph_icmp(const struct ip_vs_iphdr *iph) { return !!(iph->hdr_flags & IP_VS_HDR_ICMP); } static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst, const union nf_inet_addr *src) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) dst->in6 = src->in6; else #endif dst->ip = src->ip; } static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst, const union nf_inet_addr *src) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { dst->in6 = src->in6; return; } #endif dst->ip = src->ip; dst->all[1] = 0; dst->all[2] = 0; dst->all[3] = 0; } static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, const union nf_inet_addr *b) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) return ipv6_addr_equal(&a->in6, &b->in6); #endif return a->ip == b->ip; } #ifdef CONFIG_IP_VS_DEBUG #include <linux/net.h> int ip_vs_get_debug_level(void); static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, const union nf_inet_addr *addr, int *idx) { int len; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) len = snprintf(&buf[*idx], buf_len - *idx, "[%pI6c]", &addr->in6) + 1; else #endif len = snprintf(&buf[*idx], buf_len - *idx, "%pI4", &addr->ip) + 1; *idx += len; BUG_ON(*idx > buf_len + 1); return &buf[*idx - len]; } #define IP_VS_DBG_BUF(level, msg, ...) \ do { \ char ip_vs_dbg_buf[160]; \ int ip_vs_dbg_idx = 0; \ if (level <= ip_vs_get_debug_level()) \ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ } while (0) #define IP_VS_ERR_BUF(msg...) \ do { \ char ip_vs_dbg_buf[160]; \ int ip_vs_dbg_idx = 0; \ pr_err(msg); \ } while (0) /* Only use from within IP_VS_DBG_BUF() or IP_VS_ERR_BUF macros */ #define IP_VS_DBG_ADDR(af, addr) \ ip_vs_dbg_addr(af, ip_vs_dbg_buf, \ sizeof(ip_vs_dbg_buf), addr, \ &ip_vs_dbg_idx) #define IP_VS_DBG(level, msg, ...) \ do { \ if (level <= ip_vs_get_debug_level()) \ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ } while (0) #define IP_VS_DBG_RL(msg, ...) \ do { \ if (net_ratelimit()) \ printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ } while (0) #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ do { \ if (level <= ip_vs_get_debug_level()) \ pp->debug_packet(af, pp, skb, ofs, msg); \ } while (0) #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ do { \ if (level <= ip_vs_get_debug_level() && \ net_ratelimit()) \ pp->debug_packet(af, pp, skb, ofs, msg); \ } while (0) #else /* NO DEBUGGING at ALL */ #define IP_VS_DBG_BUF(level, msg...) do {} while (0) #define IP_VS_ERR_BUF(msg...) do {} while (0) #define IP_VS_DBG(level, msg...) do {} while (0) #define IP_VS_DBG_RL(msg...) do {} while (0) #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) #endif #define IP_VS_BUG() BUG() #define IP_VS_ERR_RL(msg, ...) \ do { \ if (net_ratelimit()) \ pr_err(msg, ##__VA_ARGS__); \ } while (0) #ifdef CONFIG_IP_VS_DEBUG #define EnterFunction(level) \ do { \ if (level <= ip_vs_get_debug_level()) \ printk(KERN_DEBUG \ pr_fmt("Enter: %s, %s line %i\n"), \ __func__, __FILE__, __LINE__); \ } while (0) #define LeaveFunction(level) \ do { \ if (level <= ip_vs_get_debug_level()) \ printk(KERN_DEBUG \ pr_fmt("Leave: %s, %s line %i\n"), \ __func__, __FILE__, __LINE__); \ } while (0) #else #define EnterFunction(level) do {} while (0) #define LeaveFunction(level) do {} while (0) #endif /* The port number of FTP service (in network order). */ #define FTPPORT cpu_to_be16(21) #define FTPDATA cpu_to_be16(20) /* TCP State Values */ enum { IP_VS_TCP_S_NONE = 0, IP_VS_TCP_S_ESTABLISHED, IP_VS_TCP_S_SYN_SENT, IP_VS_TCP_S_SYN_RECV, IP_VS_TCP_S_FIN_WAIT, IP_VS_TCP_S_TIME_WAIT, IP_VS_TCP_S_CLOSE, IP_VS_TCP_S_CLOSE_WAIT, IP_VS_TCP_S_LAST_ACK, IP_VS_TCP_S_LISTEN, IP_VS_TCP_S_SYNACK, IP_VS_TCP_S_LAST }; /* UDP State Values */ enum { IP_VS_UDP_S_NORMAL, IP_VS_UDP_S_LAST, }; /* ICMP State Values */ enum { IP_VS_ICMP_S_NORMAL, IP_VS_ICMP_S_LAST, }; /* SCTP State Values */ enum ip_vs_sctp_states { IP_VS_SCTP_S_NONE, IP_VS_SCTP_S_INIT1, IP_VS_SCTP_S_INIT, IP_VS_SCTP_S_COOKIE_SENT, IP_VS_SCTP_S_COOKIE_REPLIED, IP_VS_SCTP_S_COOKIE_WAIT, IP_VS_SCTP_S_COOKIE, IP_VS_SCTP_S_COOKIE_ECHOED, IP_VS_SCTP_S_ESTABLISHED, IP_VS_SCTP_S_SHUTDOWN_SENT, IP_VS_SCTP_S_SHUTDOWN_RECEIVED, IP_VS_SCTP_S_SHUTDOWN_ACK_SENT, IP_VS_SCTP_S_REJECTED, IP_VS_SCTP_S_CLOSED, IP_VS_SCTP_S_LAST }; /* Connection templates use bits from state */ #define IP_VS_CTPL_S_NONE 0x0000 #define IP_VS_CTPL_S_ASSURED 0x0001 #define IP_VS_CTPL_S_LAST 0x0002 /* Delta sequence info structure * Each ip_vs_conn has 2 (output AND input seq. changes). * Only used in the VS/NAT. */ struct ip_vs_seq { __u32 init_seq; /* Add delta from this seq */ __u32 delta; /* Delta in sequence numbers */ __u32 previous_delta; /* Delta in sequence numbers * before last resized pkt */ }; /* counters per cpu */ struct ip_vs_counters { __u64 conns; /* connections scheduled */ __u64 inpkts; /* incoming packets */ __u64 outpkts; /* outgoing packets */ __u64 inbytes; /* incoming bytes */ __u64 outbytes; /* outgoing bytes */ }; /* Stats per cpu */ struct ip_vs_cpu_stats { struct ip_vs_counters cnt; struct u64_stats_sync syncp; }; /* IPVS statistics objects */ struct ip_vs_estimator { struct list_head list; u64 last_inbytes; u64 last_outbytes; u64 last_conns; u64 last_inpkts; u64 last_outpkts; u64 cps; u64 inpps; u64 outpps; u64 inbps; u64 outbps; }; /* * IPVS statistics object, 64-bit kernel version of struct ip_vs_stats_user */ struct ip_vs_kstats { u64 conns; /* connections scheduled */ u64 inpkts; /* incoming packets */ u64 outpkts; /* outgoing packets */ u64 inbytes; /* incoming bytes */ u64 outbytes; /* outgoing bytes */ u64 cps; /* current connection rate */ u64 inpps; /* current in packet rate */ u64 outpps; /* current out packet rate */ u64 inbps; /* current in byte rate */ u64 outbps; /* current out byte rate */ }; struct ip_vs_stats { struct ip_vs_kstats kstats; /* kernel statistics */ struct ip_vs_estimator est; /* estimator */ struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */ spinlock_t lock; /* spin lock */ struct ip_vs_kstats kstats0; /* reset values */ }; struct dst_entry; struct iphdr; struct ip_vs_conn; struct ip_vs_app; struct sk_buff; struct ip_vs_proto_data; struct ip_vs_protocol { struct ip_vs_protocol *next; char *name; u16 protocol; u16 num_states; int dont_defrag; void (*init)(struct ip_vs_protocol *pp); void (*exit)(struct ip_vs_protocol *pp); int (*init_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); void (*exit_netns)(struct netns_ipvs *ipvs, struct ip_vs_proto_data *pd); int (*conn_schedule)(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *verdict, struct ip_vs_conn **cpp, struct ip_vs_iphdr *iph); struct ip_vs_conn * (*conn_in_get)(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb, const struct ip_vs_iphdr *iph); struct ip_vs_conn * (*conn_out_get)(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb, const struct ip_vs_iphdr *iph); int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); const char *(*state_name)(int state); void (*state_transition)(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_proto_data *pd); int (*register_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); void (*unregister_app)(struct netns_ipvs *ipvs, struct ip_vs_app *inc); int (*app_conn_bind)(struct ip_vs_conn *cp); void (*debug_packet)(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg); void (*timeout_change)(struct ip_vs_proto_data *pd, int flags); }; /* protocol data per netns */ struct ip_vs_proto_data { struct ip_vs_proto_data *next; struct ip_vs_protocol *pp; int *timeout_table; /* protocol timeout table */ atomic_t appcnt; /* counter of proto app incs. */ struct tcp_states_t *tcp_state_table; }; struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto); struct ip_vs_proto_data *ip_vs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto); struct ip_vs_conn_param { struct netns_ipvs *ipvs; const union nf_inet_addr *caddr; const union nf_inet_addr *vaddr; __be16 cport; __be16 vport; __u16 protocol; u16 af; const struct ip_vs_pe *pe; char *pe_data; __u8 pe_data_len; }; /* IP_VS structure allocated for each dynamically scheduled connection */ struct ip_vs_conn { struct hlist_node c_list; /* hashed list heads */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 dport; __be16 vport; u16 af; /* address family */ union nf_inet_addr caddr; /* client address */ union nf_inet_addr vaddr; /* virtual address */ union nf_inet_addr daddr; /* destination address */ volatile __u32 flags; /* status flags */ __u16 protocol; /* Which protocol (TCP/UDP) */ __u16 daf; /* Address family of the dest */ struct netns_ipvs *ipvs; /* counter and timer */ refcount_t refcnt; /* reference count */ struct timer_list timer; /* Expiration timer */ volatile unsigned long timeout; /* timeout */ /* Flags and state transition */ spinlock_t lock; /* lock for state transition */ volatile __u16 state; /* state info */ volatile __u16 old_state; /* old state, to be used for * state transition triggerd * synchronization */ __u32 fwmark; /* Fire wall mark from skb */ unsigned long sync_endtime; /* jiffies + sent_retries */ /* Control members */ struct ip_vs_conn *control; /* Master control connection */ atomic_t n_control; /* Number of controlled ones */ struct ip_vs_dest *dest; /* real server */ atomic_t in_pkts; /* incoming packet counter */ /* Packet transmitter for different forwarding methods. If it * mangles the packet, it must return NF_DROP or better NF_STOLEN, * otherwise this must be changed to a sk_buff **. * NF_ACCEPT can be returned when destination is local. */ int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); /* Note: we can group the following members into a structure, * in order to save more space, and the following members are * only used in VS/NAT anyway */ struct ip_vs_app *app; /* bound ip_vs_app object */ void *app_data; /* Application private data */ struct_group(sync_conn_opt, struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ ); const struct ip_vs_pe *pe; char *pe_data; __u8 pe_data_len; struct rcu_head rcu_head; }; /* Extended internal versions of struct ip_vs_service_user and ip_vs_dest_user * for IPv6 support. * * We need these to conveniently pass around service and destination * options, but unfortunately, we also need to keep the old definitions to * maintain userspace backwards compatibility for the setsockopt interface. */ struct ip_vs_service_user_kern { /* virtual service addresses */ u16 af; u16 protocol; union nf_inet_addr addr; /* virtual ip address */ __be16 port; u32 fwmark; /* firwall mark of service */ /* virtual service options */ char *sched_name; char *pe_name; unsigned int flags; /* virtual service flags */ unsigned int timeout; /* persistent timeout in sec */ __be32 netmask; /* persistent netmask or plen */ }; struct ip_vs_dest_user_kern { /* destination server address */ union nf_inet_addr addr; __be16 port; /* real server options */ unsigned int conn_flags; /* connection flags */ int weight; /* destination weight */ /* thresholds for active connections */ u32 u_threshold; /* upper threshold */ u32 l_threshold; /* lower threshold */ /* Address family of addr */ u16 af; u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ u16 tun_flags; /* tunnel flags */ }; /* * The information about the virtual service offered to the net and the * forwarding entries. */ struct ip_vs_service { struct hlist_node s_list; /* for normal service table */ struct hlist_node f_list; /* for fwmark-based service table */ atomic_t refcnt; /* reference counter */ u16 af; /* address family */ __u16 protocol; /* which protocol (TCP/UDP) */ union nf_inet_addr addr; /* IP address for virtual service */ __be16 port; /* port number for the service */ __u32 fwmark; /* firewall mark of the service */ unsigned int flags; /* service status flags */ unsigned int timeout; /* persistent timeout in ticks */ __be32 netmask; /* grouping granularity, mask/plen */ struct netns_ipvs *ipvs; struct list_head destinations; /* real server d-linked list */ __u32 num_dests; /* number of servers */ struct ip_vs_stats stats; /* statistics for the service */ /* for scheduling */ struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */ spinlock_t sched_lock; /* lock sched_data */ void *sched_data; /* scheduler application data */ /* alternate persistence engine */ struct ip_vs_pe __rcu *pe; int conntrack_afmask; struct rcu_head rcu_head; }; /* Information for cached dst */ struct ip_vs_dest_dst { struct dst_entry *dst_cache; /* destination cache entry */ u32 dst_cookie; union nf_inet_addr dst_saddr; struct rcu_head rcu_head; }; /* The real server destination forwarding entry with ip address, port number, * and so on. */ struct ip_vs_dest { struct list_head n_list; /* for the dests in the service */ struct hlist_node d_list; /* for table with all the dests */ u16 af; /* address family */ __be16 port; /* port number of the server */ union nf_inet_addr addr; /* IP address of the server */ volatile unsigned int flags; /* dest status flags */ atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ atomic_t last_weight; /* server latest weight */ __u16 tun_type; /* tunnel type */ __be16 tun_port; /* tunnel port */ __u16 tun_flags; /* tunnel flags */ refcount_t refcnt; /* reference counter */ struct ip_vs_stats stats; /* statistics */ unsigned long idle_start; /* start time, jiffies */ /* connection counters and thresholds */ atomic_t activeconns; /* active connections */ atomic_t inactconns; /* inactive connections */ atomic_t persistconns; /* persistent connections */ __u32 u_threshold; /* upper threshold */ __u32 l_threshold; /* lower threshold */ /* for destination cache */ spinlock_t dst_lock; /* lock of dst_cache */ struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */ /* for virtual service */ struct ip_vs_service __rcu *svc; /* service it belongs to */ __u16 protocol; /* which protocol (TCP/UDP) */ __be16 vport; /* virtual port number */ union nf_inet_addr vaddr; /* virtual IP address */ __u32 vfwmark; /* firewall mark of service */ struct list_head t_list; /* in dest_trash */ unsigned int in_rs_table:1; /* we are in rs_table */ }; /* The scheduler object */ struct ip_vs_scheduler { struct list_head n_list; /* d-linked list head */ char *name; /* scheduler name */ atomic_t refcnt; /* reference counter */ struct module *module; /* THIS_MODULE/NULL */ /* scheduler initializing service */ int (*init_service)(struct ip_vs_service *svc); /* scheduling service finish */ void (*done_service)(struct ip_vs_service *svc); /* dest is linked */ int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); /* dest is unlinked */ int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); /* dest is updated */ int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest); /* selecting a server from the given service */ struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, const struct sk_buff *skb, struct ip_vs_iphdr *iph); }; /* The persistence engine object */ struct ip_vs_pe { struct list_head n_list; /* d-linked list head */ char *name; /* scheduler name */ atomic_t refcnt; /* reference counter */ struct module *module; /* THIS_MODULE/NULL */ /* get the connection template, if any */ int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); bool (*ct_match)(const struct ip_vs_conn_param *p, struct ip_vs_conn *ct); u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, bool inverse); int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); /* create connections for real-server outgoing packets */ struct ip_vs_conn* (*conn_out)(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct sk_buff *skb, const struct ip_vs_iphdr *iph, __be16 dport, __be16 cport); }; /* The application module object (a.k.a. app incarnation) */ struct ip_vs_app { struct list_head a_list; /* member in app list */ int type; /* IP_VS_APP_TYPE_xxx */ char *name; /* application module name */ __u16 protocol; struct module *module; /* THIS_MODULE/NULL */ struct list_head incs_list; /* list of incarnations */ /* members for application incarnations */ struct list_head p_list; /* member in proto app list */ struct ip_vs_app *app; /* its real application */ __be16 port; /* port number in net order */ atomic_t usecnt; /* usage counter */ struct rcu_head rcu_head; /* output hook: Process packet in inout direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); /* input hook: Process packet in outin direction, diff set for TCP. * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, * 2=Mangled but checksum was not updated */ int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, struct sk_buff *, int *diff, struct ip_vs_iphdr *ipvsh); /* ip_vs_app initializer */ int (*init_conn)(struct ip_vs_app *, struct ip_vs_conn *); /* ip_vs_app finish */ int (*done_conn)(struct ip_vs_app *, struct ip_vs_conn *); /* not used now */ int (*bind_conn)(struct ip_vs_app *, struct ip_vs_conn *, struct ip_vs_protocol *); void (*unbind_conn)(struct ip_vs_app *, struct ip_vs_conn *); int * timeout_table; int * timeouts; int timeouts_size; int (*conn_schedule)(struct sk_buff *skb, struct ip_vs_app *app, int *verdict, struct ip_vs_conn **cpp); struct ip_vs_conn * (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, const struct iphdr *iph, int inverse); struct ip_vs_conn * (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, const struct iphdr *iph, int inverse); int (*state_transition)(struct ip_vs_conn *cp, int direction, const struct sk_buff *skb, struct ip_vs_app *app); void (*timeout_change)(struct ip_vs_app *app, int flags); }; struct ipvs_master_sync_state { struct list_head sync_queue; struct ip_vs_sync_buff *sync_buff; unsigned long sync_queue_len; unsigned int sync_queue_delay; struct delayed_work master_wakeup_work; struct netns_ipvs *ipvs; }; struct ip_vs_sync_thread_data; /* How much time to keep dests in trash */ #define IP_VS_DEST_TRASH_PERIOD (120 * HZ) struct ipvs_sync_daemon_cfg { union nf_inet_addr mcast_group; int syncid; u16 sync_maxlen; u16 mcast_port; u8 mcast_af; u8 mcast_ttl; /* multicast interface name */ char mcast_ifn[IP_VS_IFNAME_MAXLEN]; }; /* IPVS in network namespace */ struct netns_ipvs { int gen; /* Generation */ int enable; /* enable like nf_hooks do */ /* Hash table: for real service lookups */ #define IP_VS_RTAB_BITS 4 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) struct hlist_head rs_table[IP_VS_RTAB_SIZE]; /* ip_vs_app */ struct list_head app_list; /* ip_vs_proto */ #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; /* ip_vs_proto_tcp */ #ifdef CONFIG_IP_VS_PROTO_TCP #define TCP_APP_TAB_BITS 4 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) struct list_head tcp_apps[TCP_APP_TAB_SIZE]; #endif /* ip_vs_proto_udp */ #ifdef CONFIG_IP_VS_PROTO_UDP #define UDP_APP_TAB_BITS 4 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) struct list_head udp_apps[UDP_APP_TAB_SIZE]; #endif /* ip_vs_proto_sctp */ #ifdef CONFIG_IP_VS_PROTO_SCTP #define SCTP_APP_TAB_BITS 4 #define SCTP_APP_TAB_SIZE (1 << SCTP_APP_TAB_BITS) #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) /* Hash table for SCTP application incarnations */ struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; #endif /* ip_vs_conn */ atomic_t conn_count; /* connection counter */ /* ip_vs_ctl */ struct ip_vs_stats tot_stats; /* Statistics & est. */ int num_services; /* no of virtual services */ int num_services6; /* IPv6 virtual services */ /* Trash for destinations */ struct list_head dest_trash; spinlock_t dest_trash_lock; struct timer_list dest_trash_timer; /* expiration timer */ /* Service counters */ atomic_t ftpsvc_counter; atomic_t nullsvc_counter; atomic_t conn_out_counter; #ifdef CONFIG_SYSCTL /* delayed work for expiring no dest connections */ struct delayed_work expire_nodest_conn_work; /* 1/rate drop and drop-entry variables */ struct delayed_work defense_work; /* Work handler */ int drop_rate; int drop_counter; int old_secure_tcp; atomic_t dropentry; /* locks in ctl.c */ spinlock_t dropentry_lock; /* drop entry handling */ spinlock_t droppacket_lock; /* drop packet handling */ spinlock_t securetcp_lock; /* state and timeout tables */ /* sys-ctl struct */ struct ctl_table_header *sysctl_hdr; struct ctl_table *sysctl_tbl; #endif /* sysctl variables */ int sysctl_amemthresh; int sysctl_am_droprate; int sysctl_drop_entry; int sysctl_drop_packet; int sysctl_secure_tcp; #ifdef CONFIG_IP_VS_NFCT int sysctl_conntrack; #endif int sysctl_snat_reroute; int sysctl_sync_ver; int sysctl_sync_ports; int sysctl_sync_persist_mode; unsigned long sysctl_sync_qlen_max; int sysctl_sync_sock_size; int sysctl_cache_bypass; int sysctl_expire_nodest_conn; int sysctl_sloppy_tcp; int sysctl_sloppy_sctp; int sysctl_expire_quiescent_template; int sysctl_sync_threshold[2]; unsigned int sysctl_sync_refresh_period; int sysctl_sync_retries; int sysctl_nat_icmp_send; int sysctl_pmtu_disc; int sysctl_backup_only; int sysctl_conn_reuse_mode; int sysctl_schedule_icmp; int sysctl_ignore_tunneled; /* ip_vs_lblc */ int sysctl_lblc_expiration; struct ctl_table_header *lblc_ctl_header; struct ctl_table *lblc_ctl_table; /* ip_vs_lblcr */ int sysctl_lblcr_expiration; struct ctl_table_header *lblcr_ctl_header; struct ctl_table *lblcr_ctl_table; /* ip_vs_est */ struct list_head est_list; /* estimator list */ spinlock_t est_lock; struct timer_list est_timer; /* Estimation timer */ /* ip_vs_sync */ spinlock_t sync_lock; struct ipvs_master_sync_state *ms; spinlock_t sync_buff_lock; struct ip_vs_sync_thread_data *master_tinfo; struct ip_vs_sync_thread_data *backup_tinfo; int threads_mask; volatile int sync_state; struct mutex sync_mutex; struct ipvs_sync_daemon_cfg mcfg; /* Master Configuration */ struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ /* net name space ptr */ struct net *net; /* Needed by timer routines */ /* Number of heterogeneous destinations, needed becaus heterogeneous * are not supported when synchronization is enabled. */ unsigned int mixed_address_family_dests; unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */ }; #define DEFAULT_SYNC_THRESHOLD 3 #define DEFAULT_SYNC_PERIOD 50 #define DEFAULT_SYNC_VER 1 #define DEFAULT_SLOPPY_TCP 0 #define DEFAULT_SLOPPY_SCTP 0 #define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) #define DEFAULT_SYNC_RETRIES 0 #define IPVS_SYNC_WAKEUP_RATE 8 #define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) #define IPVS_SYNC_SEND_DELAY (HZ / 50) #define IPVS_SYNC_CHECK_PERIOD HZ #define IPVS_SYNC_FLUSH_TIME (HZ * 2) #define IPVS_SYNC_PORTS_MAX (1 << 6) #ifdef CONFIG_SYSCTL static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_threshold[0]; } static inline int sysctl_sync_period(struct netns_ipvs *ipvs) { return READ_ONCE(ipvs->sysctl_sync_threshold[1]); } static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) { return READ_ONCE(ipvs->sysctl_sync_refresh_period); } static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_retries; } static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_ver; } static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) { return ipvs->sysctl_sloppy_tcp; } static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) { return ipvs->sysctl_sloppy_sctp; } static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) { return READ_ONCE(ipvs->sysctl_sync_ports); } static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_persist_mode; } static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_qlen_max; } static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) { return ipvs->sysctl_sync_sock_size; } static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) { return ipvs->sysctl_pmtu_disc; } static inline int sysctl_backup_only(struct netns_ipvs *ipvs) { return ipvs->sync_state & IP_VS_STATE_BACKUP && ipvs->sysctl_backup_only; } static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) { return ipvs->sysctl_conn_reuse_mode; } static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return ipvs->sysctl_expire_nodest_conn; } static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return ipvs->sysctl_schedule_icmp; } static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) { return ipvs->sysctl_ignore_tunneled; } static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) { return ipvs->sysctl_cache_bypass; } #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_THRESHOLD; } static inline int sysctl_sync_period(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_PERIOD; } static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_REFRESH_PERIOD; } static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_RETRIES & 3; } static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_VER; } static inline int sysctl_sloppy_tcp(struct netns_ipvs *ipvs) { return DEFAULT_SLOPPY_TCP; } static inline int sysctl_sloppy_sctp(struct netns_ipvs *ipvs) { return DEFAULT_SLOPPY_SCTP; } static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) { return 1; } static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs) { return 0; } static inline unsigned long sysctl_sync_qlen_max(struct netns_ipvs *ipvs) { return IPVS_SYNC_QLEN_MAX; } static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) { return 0; } static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) { return 1; } static inline int sysctl_backup_only(struct netns_ipvs *ipvs) { return 0; } static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) { return 1; } static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; } static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return 0; } static inline int sysctl_ignore_tunneled(struct netns_ipvs *ipvs) { return 0; } static inline int sysctl_cache_bypass(struct netns_ipvs *ipvs) { return 0; } #endif /* IPVS core functions * (from ip_vs_core.c) */ const char *ip_vs_proto_name(unsigned int proto); void ip_vs_init_hash_table(struct list_head *table, int rows); struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, struct ip_vs_dest *dest, struct sk_buff *skb, const struct ip_vs_iphdr *iph, __be16 dport, __be16 cport); #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) #define IP_VS_APP_TYPE_FTP 1 /* ip_vs_conn handling functions * (from ip_vs_conn.c) */ enum { IP_VS_DIR_INPUT = 0, IP_VS_DIR_OUTPUT, IP_VS_DIR_INPUT_ONLY, IP_VS_DIR_LAST, }; static inline void ip_vs_conn_fill_param(struct netns_ipvs *ipvs, int af, int protocol, const union nf_inet_addr *caddr, __be16 cport, const union nf_inet_addr *vaddr, __be16 vport, struct ip_vs_conn_param *p) { p->ipvs = ipvs; p->af = af; p->protocol = protocol; p->caddr = caddr; p->cport = cport; p->vaddr = vaddr; p->vport = vport; p->pe = NULL; p->pe_data = NULL; } struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); struct ip_vs_conn * ip_vs_conn_in_get_proto(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb, const struct ip_vs_iphdr *iph); struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af, const struct sk_buff *skb, const struct ip_vs_iphdr *iph); /* Get reference to gain full access to conn. * By default, RCU read-side critical sections have access only to * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference. */ static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp) { return refcount_inc_not_zero(&cp->refcnt); } /* put back the conn without restarting its timer */ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) { smp_mb__before_atomic(); refcount_dec(&cp->refcnt); } void ip_vs_conn_put(struct ip_vs_conn *cp); void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, const union nf_inet_addr *daddr, __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark); void ip_vs_conn_expire_now(struct ip_vs_conn *cp); const char *ip_vs_state_name(const struct ip_vs_conn *cp); void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); void ip_vs_random_dropentry(struct netns_ipvs *ipvs); int ip_vs_conn_init(void); void ip_vs_conn_cleanup(void); static inline void ip_vs_control_del(struct ip_vs_conn *cp) { struct ip_vs_conn *ctl_cp = cp->control; if (!ctl_cp) { IP_VS_ERR_BUF("request control DEL for uncontrolled: " "%s:%d to %s:%d\n", IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport)); return; } IP_VS_DBG_BUF(7, "DELeting control for: " "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), ntohs(ctl_cp->cport)); cp->control = NULL; if (atomic_read(&ctl_cp->n_control) == 0) { IP_VS_ERR_BUF("BUG control DEL with n=0 : " "%s:%d to %s:%d\n", IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport)); return; } atomic_dec(&ctl_cp->n_control); } static inline void ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp) { if (cp->control) { IP_VS_ERR_BUF("request control ADD for already controlled: " "%s:%d to %s:%d\n", IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport)); ip_vs_control_del(cp); } IP_VS_DBG_BUF(7, "ADDing control for: " "cp.dst=%s:%d ctl_cp.dst=%s:%d\n", IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), IP_VS_DBG_ADDR(cp->af, &ctl_cp->caddr), ntohs(ctl_cp->cport)); cp->control = ctl_cp; atomic_inc(&ctl_cp->n_control); } /* Mark our template as assured */ static inline void ip_vs_control_assure_ct(struct ip_vs_conn *cp) { struct ip_vs_conn *ct = cp->control; if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) && (ct->flags & IP_VS_CONN_F_TEMPLATE)) ct->state |= IP_VS_CTPL_S_ASSURED; } /* IPVS netns init & cleanup functions */ int ip_vs_estimator_net_init(struct netns_ipvs *ipvs); int ip_vs_control_net_init(struct netns_ipvs *ipvs); int ip_vs_protocol_net_init(struct netns_ipvs *ipvs); int ip_vs_app_net_init(struct netns_ipvs *ipvs); int ip_vs_conn_net_init(struct netns_ipvs *ipvs); int ip_vs_sync_net_init(struct netns_ipvs *ipvs); void ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_app_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs); void ip_vs_service_nets_cleanup(struct list_head *net_list); /* IPVS application functions * (from ip_vs_app.c) */ #define IP_VS_APP_MAX_PORTS 8 struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); void unregister_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *app); int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); void ip_vs_unbind_app(struct ip_vs_conn *cp); int register_ip_vs_app_inc(struct netns_ipvs *ipvs, struct ip_vs_app *app, __u16 proto, __u16 port); int ip_vs_app_inc_get(struct ip_vs_app *inc); void ip_vs_app_inc_put(struct ip_vs_app *inc); int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb, struct ip_vs_iphdr *ipvsh); int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb, struct ip_vs_iphdr *ipvsh); int register_ip_vs_pe(struct ip_vs_pe *pe); int unregister_ip_vs_pe(struct ip_vs_pe *pe); struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name); /* Use a #define to avoid all of module.h just for these trivial ops */ #define ip_vs_pe_get(pe) \ if (pe && pe->module) \ __module_get(pe->module); #define ip_vs_pe_put(pe) \ if (pe && pe->module) \ module_put(pe->module); /* IPVS protocol functions (from ip_vs_proto.c) */ int ip_vs_protocol_init(void); void ip_vs_protocol_cleanup(void); void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags); int *ip_vs_create_timeout_table(int *table, int size); void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, const struct sk_buff *skb, int offset, const char *msg); extern struct ip_vs_protocol ip_vs_protocol_tcp; extern struct ip_vs_protocol ip_vs_protocol_udp; extern struct ip_vs_protocol ip_vs_protocol_icmp; extern struct ip_vs_protocol ip_vs_protocol_esp; extern struct ip_vs_protocol ip_vs_protocol_ah; extern struct ip_vs_protocol ip_vs_protocol_sctp; /* Registering/unregistering scheduler functions * (from ip_vs_sched.c) */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); int ip_vs_bind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *scheduler); void ip_vs_unbind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *sched); struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); struct ip_vs_conn * ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_proto_data *pd, int *ignored, struct ip_vs_iphdr *iph); int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); /* IPVS control data and functions (from ip_vs_ctl.c) */ extern struct ip_vs_stats ip_vs_stats; extern int sysctl_ip_vs_sync_ver; struct ip_vs_service * ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport); bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); struct ip_vs_dest * ip_vs_find_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, const union nf_inet_addr *daddr, __be16 dport); struct ip_vs_dest *ip_vs_find_tunnel(struct netns_ipvs *ipvs, int af, const union nf_inet_addr *daddr, __be16 tun_port); int ip_vs_use_count_inc(void); void ip_vs_use_count_dec(void); int ip_vs_register_nl_ioctl(void); void ip_vs_unregister_nl_ioctl(void); int ip_vs_control_init(void); void ip_vs_control_cleanup(void); struct ip_vs_dest * ip_vs_find_dest(struct netns_ipvs *ipvs, int svc_af, int dest_af, const union nf_inet_addr *daddr, __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol, __u32 fwmark, __u32 flags); void ip_vs_try_bind_dest(struct ip_vs_conn *cp); static inline void ip_vs_dest_hold(struct ip_vs_dest *dest) { refcount_inc(&dest->refcnt); } static inline void ip_vs_dest_put(struct ip_vs_dest *dest) { smp_mb__before_atomic(); refcount_dec(&dest->refcnt); } static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest) { if (refcount_dec_and_test(&dest->refcnt)) kfree(dest); } /* IPVS sync daemon data and function prototypes * (from ip_vs_sync.c) */ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *cfg, int state); int stop_sync_thread(struct netns_ipvs *ipvs, int state); void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts); /* IPVS rate estimator prototypes (from ip_vs_est.c) */ void ip_vs_start_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); void ip_vs_stop_estimator(struct netns_ipvs *ipvs, struct ip_vs_stats *stats); void ip_vs_zero_estimator(struct ip_vs_stats *stats); void ip_vs_read_estimator(struct ip_vs_kstats *dst, struct ip_vs_stats *stats); /* Various IPVS packet transmitters (from ip_vs_xmit.c) */ int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset, unsigned int hooknum, struct ip_vs_iphdr *iph); void ip_vs_dest_dst_rcu_free(struct rcu_head *head); #ifdef CONFIG_IP_VS_IPV6 int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset, unsigned int hooknum, struct ip_vs_iphdr *iph); #endif #ifdef CONFIG_SYSCTL /* This is a simple mechanism to ignore packets when * we are loaded. Just set ip_vs_drop_rate to 'n' and * we start to drop 1/rate of the packets */ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { if (!ipvs->drop_rate) return 0; if (--ipvs->drop_counter > 0) return 0; ipvs->drop_counter = ipvs->drop_rate; return 1; } #else static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif #ifdef CONFIG_SYSCTL /* Enqueue delayed work for expiring no dest connections * Only run when sysctl_expire_nodest=1 */ static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) { if (sysctl_expire_nodest_conn(ipvs)) queue_delayed_work(system_long_wq, &ipvs->expire_nodest_conn_work, 1); } void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs); #else static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {} #endif #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ IP_VS_CONN_F_FWD_MASK) /* ip_vs_fwd_tag returns the forwarding tag of the connection */ #define IP_VS_FWD_METHOD(cp) (cp->flags & IP_VS_CONN_F_FWD_MASK) static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp) { char fwd; switch (IP_VS_FWD_METHOD(cp)) { case IP_VS_CONN_F_MASQ: fwd = 'M'; break; case IP_VS_CONN_F_LOCALNODE: fwd = 'L'; break; case IP_VS_CONN_F_TUNNEL: fwd = 'T'; break; case IP_VS_CONN_F_DROUTE: fwd = 'R'; break; case IP_VS_CONN_F_BYPASS: fwd = 'B'; break; default: fwd = '?'; break; } return fwd; } void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int dir); #ifdef CONFIG_IP_VS_IPV6 void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, int dir); #endif __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset); static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum) { __be32 diff[2] = { ~old, new }; return csum_partial(diff, sizeof(diff), oldsum); } #ifdef CONFIG_IP_VS_IPV6 static inline __wsum ip_vs_check_diff16(const __be32 *old, const __be32 *new, __wsum oldsum) { __be32 diff[8] = { ~old[3], ~old[2], ~old[1], ~old[0], new[3], new[2], new[1], new[0] }; return csum_partial(diff, sizeof(diff), oldsum); } #endif static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) { __be16 diff[2] = { ~old, new }; return csum_partial(diff, sizeof(diff), oldsum); } /* Forget current conntrack (unconfirmed) and attach notrack entry */ static inline void ip_vs_notrack(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) enum ip_conntrack_info ctinfo; struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (ct) { nf_conntrack_put(&ct->ct_general); nf_ct_set(skb, NULL, IP_CT_UNTRACKED); } #endif } #ifdef CONFIG_IP_VS_NFCT /* Netfilter connection tracking * (from ip_vs_nfct.c) */ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) { #ifdef CONFIG_SYSCTL return ipvs->sysctl_conntrack; #else return 0; #endif } void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin); int ip_vs_confirm_conntrack(struct sk_buff *skb); void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, struct ip_vs_conn *cp, u_int8_t proto, const __be16 port, int from_rs); void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); #else static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs) { return 0; } static inline void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin) { } static inline int ip_vs_confirm_conntrack(struct sk_buff *skb) { return NF_ACCEPT; } static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) { } #endif /* CONFIG_IP_VS_NFCT */ /* Using old conntrack that can not be redirected to another real server? */ static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; } static inline int ip_vs_register_conntrack(struct ip_vs_service *svc) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) int afmask = (svc->af == AF_INET6) ? 2 : 1; int ret = 0; if (!(svc->conntrack_afmask & afmask)) { ret = nf_ct_netns_get(svc->ipvs->net, svc->af); if (ret >= 0) svc->conntrack_afmask |= afmask; } return ret; #else return 0; #endif } static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) int afmask = (svc->af == AF_INET6) ? 2 : 1; if (svc->conntrack_afmask & afmask) { nf_ct_netns_put(svc->ipvs->net, svc->af); svc->conntrack_afmask &= ~afmask; } #endif } int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af); void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af); static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { /* We think the overhead of processing active connections is 256 * times higher than that of inactive connections in average. (This * 256 times might not be accurate, we will change it later) We * use the following formula to estimate the overhead now: * dest->activeconns*256 + dest->inactconns */ return (atomic_read(&dest->activeconns) << 8) + atomic_read(&dest->inactconns); } #ifdef CONFIG_IP_VS_PROTO_TCP INDIRECT_CALLABLE_DECLARE(int tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); #endif #ifdef CONFIG_IP_VS_PROTO_UDP INDIRECT_CALLABLE_DECLARE(int udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)); #endif #endif /* _NET_IP_VS_H */ |
151 149 1 2 1 2 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_GRE_H #define __LINUX_GRE_H #include <linux/skbuff.h> #include <net/ip_tunnels.h> struct gre_base_hdr { __be16 flags; __be16 protocol; } __packed; struct gre_full_hdr { struct gre_base_hdr fixed_header; __be16 csum; __be16 reserved1; __be32 key; __be32 seq; } __packed; #define GRE_HEADER_SECTION 4 #define GREPROTO_CISCO 0 #define GREPROTO_PPTP 1 #define GREPROTO_MAX 2 #define GRE_IP_PROTO_MAX 2 struct gre_protocol { int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); }; int gre_add_protocol(const struct gre_protocol *proto, u8 version); int gre_del_protocol(const struct gre_protocol *proto, u8 version); struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type); int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, bool *csum_err, __be16 proto, int nhs); static inline bool netif_is_gretap(const struct net_device *dev) { return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "gretap"); } static inline bool netif_is_ip6gretap(const struct net_device *dev) { return dev->rtnl_link_ops && !strcmp(dev->rtnl_link_ops->kind, "ip6gretap"); } static inline int gre_calc_hlen(__be16 o_flags) { int addend = 4; if (o_flags & TUNNEL_CSUM) addend += 4; if (o_flags & TUNNEL_KEY) addend += 4; if (o_flags & TUNNEL_SEQ) addend += 4; return addend; } static inline __be16 gre_flags_to_tnl_flags(__be16 flags) { __be16 tflags = 0; if (flags & GRE_CSUM) tflags |= TUNNEL_CSUM; if (flags & GRE_ROUTING) tflags |= TUNNEL_ROUTING; if (flags & GRE_KEY) tflags |= TUNNEL_KEY; if (flags & GRE_SEQ) tflags |= TUNNEL_SEQ; if (flags & GRE_STRICT) tflags |= TUNNEL_STRICT; if (flags & GRE_REC) tflags |= TUNNEL_REC; if (flags & GRE_VERSION) tflags |= TUNNEL_VERSION; return tflags; } static inline __be16 gre_tnl_flags_to_gre_flags(__be16 tflags) { __be16 flags = 0; if (tflags & TUNNEL_CSUM) flags |= GRE_CSUM; if (tflags & TUNNEL_ROUTING) flags |= GRE_ROUTING; if (tflags & TUNNEL_KEY) flags |= GRE_KEY; if (tflags & TUNNEL_SEQ) flags |= GRE_SEQ; if (tflags & TUNNEL_STRICT) flags |= GRE_STRICT; if (tflags & TUNNEL_REC) flags |= GRE_REC; if (tflags & TUNNEL_VERSION) flags |= GRE_VERSION; return flags; } static inline void gre_build_header(struct sk_buff *skb, int hdr_len, __be16 flags, __be16 proto, __be32 key, __be32 seq) { struct gre_base_hdr *greh; skb_push(skb, hdr_len); skb_set_inner_protocol(skb, proto); skb_reset_transport_header(skb); greh = (struct gre_base_hdr *)skb->data; greh->flags = gre_tnl_flags_to_gre_flags(flags); greh->protocol = proto; if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) { __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); if (flags & TUNNEL_SEQ) { *ptr = seq; ptr--; } if (flags & TUNNEL_KEY) { *ptr = key; ptr--; } if (flags & TUNNEL_CSUM && !(skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) { *ptr = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { *(__sum16 *)ptr = csum_fold(lco_csum(skb)); } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = sizeof(*greh); } } } } #endif |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 | // SPDX-License-Identifier: GPL-2.0-only /* * Context tracking: Probe on high level context boundaries such as kernel * and userspace. This includes syscalls and exceptions entry/exit. * * This is used by RCU to remove its dependency on the timer tick while a CPU * runs in userspace. * * Started by Frederic Weisbecker: * * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com> * * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton, * Steven Rostedt, Peter Zijlstra for suggestions and improvements. * */ #include <linux/context_tracking.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/export.h> #include <linux/kprobes.h> #define CREATE_TRACE_POINTS #include <trace/events/context_tracking.h> DEFINE_STATIC_KEY_FALSE(context_tracking_key); EXPORT_SYMBOL_GPL(context_tracking_key); DEFINE_PER_CPU(struct context_tracking, context_tracking); EXPORT_SYMBOL_GPL(context_tracking); static noinstr bool context_tracking_recursion_enter(void) { int recursion; recursion = __this_cpu_inc_return(context_tracking.recursion); if (recursion == 1) return true; WARN_ONCE((recursion < 1), "Invalid context tracking recursion value %d\n", recursion); __this_cpu_dec(context_tracking.recursion); return false; } static __always_inline void context_tracking_recursion_exit(void) { __this_cpu_dec(context_tracking.recursion); } /** * context_tracking_enter - Inform the context tracking that the CPU is going * enter user or guest space mode. * * This function must be called right before we switch from the kernel * to user or guest space, when it's guaranteed the remaining kernel * instructions to execute won't use any RCU read side critical section * because this function sets RCU in extended quiescent state. */ void noinstr __context_tracking_enter(enum ctx_state state) { /* Kernel threads aren't supposed to go to userspace */ WARN_ON_ONCE(!current->mm); if (!context_tracking_recursion_enter()) return; if ( __this_cpu_read(context_tracking.state) != state) { if (__this_cpu_read(context_tracking.active)) { /* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ if (state == CONTEXT_USER) { instrumentation_begin(); trace_user_enter(0); vtime_user_enter(current); instrumentation_end(); } rcu_user_enter(); } /* * Even if context tracking is disabled on this CPU, because it's outside * the full dynticks mask for example, we still have to keep track of the * context transitions and states to prevent inconsistency on those of * other CPUs. * If a task triggers an exception in userspace, sleep on the exception * handler and then migrate to another CPU, that new CPU must know where * the exception returns by the time we call exception_exit(). * This information can only be provided by the previous CPU when it called * exception_enter(). * OTOH we can spare the calls to vtime and RCU when context_tracking.active * is false because we know that CPU is not tickless. */ __this_cpu_write(context_tracking.state, state); } context_tracking_recursion_exit(); } EXPORT_SYMBOL_GPL(__context_tracking_enter); void context_tracking_enter(enum ctx_state state) { unsigned long flags; /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; local_irq_save(flags); __context_tracking_enter(state); local_irq_restore(flags); } NOKPROBE_SYMBOL(context_tracking_enter); EXPORT_SYMBOL_GPL(context_tracking_enter); void context_tracking_user_enter(void) { user_enter(); } NOKPROBE_SYMBOL(context_tracking_user_enter); /** * context_tracking_exit - Inform the context tracking that the CPU is * exiting user or guest mode and entering the kernel. * * This function must be called after we entered the kernel from user or * guest space before any use of RCU read side critical section. This * potentially include any high level kernel code like syscalls, exceptions, * signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ void noinstr __context_tracking_exit(enum ctx_state state) { if (!context_tracking_recursion_enter()) return; if (__this_cpu_read(context_tracking.state) == state) { if (__this_cpu_read(context_tracking.active)) { /* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ rcu_user_exit(); if (state == CONTEXT_USER) { instrumentation_begin(); vtime_user_exit(current); trace_user_exit(0); instrumentation_end(); } } __this_cpu_write(context_tracking.state, CONTEXT_KERNEL); } context_tracking_recursion_exit(); } EXPORT_SYMBOL_GPL(__context_tracking_exit); void context_tracking_exit(enum ctx_state state) { unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); __context_tracking_exit(state); local_irq_restore(flags); } NOKPROBE_SYMBOL(context_tracking_exit); EXPORT_SYMBOL_GPL(context_tracking_exit); void context_tracking_user_exit(void) { user_exit(); } NOKPROBE_SYMBOL(context_tracking_user_exit); void __init context_tracking_cpu_set(int cpu) { static __initdata bool initialized = false; if (!per_cpu(context_tracking.active, cpu)) { per_cpu(context_tracking.active, cpu) = true; static_branch_inc(&context_tracking_key); } if (initialized) return; #ifdef CONFIG_HAVE_TIF_NOHZ /* * Set TIF_NOHZ to init/0 and let it propagate to all tasks through fork * This assumes that init is the only task at this early boot stage. */ set_tsk_thread_flag(&init_task, TIF_NOHZ); #endif WARN_ON_ONCE(!tasklist_empty()); initialized = true; } #ifdef CONFIG_CONTEXT_TRACKING_FORCE void __init context_tracking_init(void) { int cpu; for_each_possible_cpu(cpu) context_tracking_cpu_set(cpu); } #endif |
12 12 7 8 8 5 5 15 26 1 5 3 8 19 7 13 13 13 13 13 13 13 13 26 25 6 13 10 8 16 5 3 2 2 2 10 10 6 3 2 1 2 1 5 2 3 9 1 15 5 5 5 2 14 14 42 42 10 9 7 9 20 11 8 6 2 34 1 2 32 3 4 9 1 7 2 22 22 20 31 22 10 2 36 2 6 9 12 10 3 7 7 16 6 17 19 9 2 4 7 38 1 1 7 26 34 10 10 4 30 4 2 8 8 5 11 11 9 4 5 2 27 17 10 36 64 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 | // SPDX-License-Identifier: GPL-2.0 /* * linux/ipc/msg.c * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly * Use GFP_KERNEL for messages as in 1.2 * Fixed up the unchecked user space derefs * Copyright (C) 1998 Alan Cox & Andi Kleen * * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * mostly rewritten, threaded and wake-one semantics added * MSGMAX limit removed, sysctl's added * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> */ #include <linux/capability.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> #include <linux/sched/wake_q.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> #include <linux/rhashtable.h> #include <asm/current.h> #include <linux/uaccess.h> #include "util.h" /* one msq_queue structure for each present queue on the system */ struct msg_queue { struct kern_ipc_perm q_perm; time64_t q_stime; /* last msgsnd time */ time64_t q_rtime; /* last msgrcv time */ time64_t q_ctime; /* last change time */ unsigned long q_cbytes; /* current number of bytes on queue */ unsigned long q_qnum; /* number of messages in queue */ unsigned long q_qbytes; /* max number of bytes on queue */ struct pid *q_lspid; /* pid of last msgsnd */ struct pid *q_lrpid; /* last receive pid */ struct list_head q_messages; struct list_head q_receivers; struct list_head q_senders; } __randomize_layout; /* * MSG_BARRIER Locking: * * Similar to the optimization used in ipc/mqueue.c, one syscall return path * does not acquire any locks when it sees that a message exists in * msg_receiver.r_msg. Therefore r_msg is set using smp_store_release() * and accessed using READ_ONCE()+smp_acquire__after_ctrl_dep(). In addition, * wake_q_add_safe() is used. See ipc/mqueue.c for more details */ /* one msg_receiver structure for each sleeping receiver */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; int r_mode; long r_msgtype; long r_maxsize; struct msg_msg *r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { struct list_head list; struct task_struct *tsk; size_t msgsz; }; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 #define SEARCH_NUMBER 5 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) { ipc_rmid(&msg_ids(ns), &s->q_perm); } static void msg_rcu_free(struct rcu_head *head) { struct kern_ipc_perm *p = container_of(head, struct kern_ipc_perm, rcu); struct msg_queue *msq = container_of(p, struct msg_queue, q_perm); security_msg_queue_free(&msq->q_perm); kfree(msq); } /** * newque - Create a new msg queue * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * * Called with msg_ids.rwsem held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { struct msg_queue *msq; int retval; key_t key = params->key; int msgflg = params->flg; msq = kmalloc(sizeof(*msq), GFP_KERNEL_ACCOUNT); if (unlikely(!msq)) return -ENOMEM; msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(&msq->q_perm); if (retval) { kfree(msq); return retval; } msq->q_stime = msq->q_rtime = 0; msq->q_ctime = ktime_get_real_seconds(); msq->q_cbytes = msq->q_qnum = 0; msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = NULL; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); /* ipc_addid() locks msq upon success. */ retval = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (retval < 0) { ipc_rcu_putref(&msq->q_perm, msg_rcu_free); return retval; } ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); return msq->q_perm.id; } static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz) { return msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes; } static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss, size_t msgsz) { mss->tsk = current; mss->msgsz = msgsz; /* * No memory barrier required: we did ipc_lock_object(), * and the waker obtains that lock before calling wake_q_add(). */ __set_current_state(TASK_INTERRUPTIBLE); list_add_tail(&mss->list, &msq->q_senders); } static inline void ss_del(struct msg_sender *mss) { if (mss->list.next) list_del(&mss->list); } static void ss_wakeup(struct msg_queue *msq, struct wake_q_head *wake_q, bool kill) { struct msg_sender *mss, *t; struct task_struct *stop_tsk = NULL; struct list_head *h = &msq->q_senders; list_for_each_entry_safe(mss, t, h, list) { if (kill) mss->list.next = NULL; /* * Stop at the first task we don't wakeup, * we've already iterated the original * sender queue. */ else if (stop_tsk == mss->tsk) break; /* * We are not in an EIDRM scenario here, therefore * verify that we really need to wakeup the task. * To maintain current semantics and wakeup order, * move the sender to the tail on behalf of the * blocked task. */ else if (!msg_fits_inqueue(msq, mss->msgsz)) { if (!stop_tsk) stop_tsk = mss->tsk; list_move_tail(&mss->list, &msq->q_senders); continue; } wake_q_add(wake_q, mss->tsk); } } static void expunge_all(struct msg_queue *msq, int res, struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { struct task_struct *r_tsk; r_tsk = get_task_struct(msr->r_tsk); /* see MSG_BARRIER for purpose/pairing */ smp_store_release(&msr->r_msg, ERR_PTR(res)); wake_q_add_safe(wake_q, r_tsk); } } /* * freeque() wakes up waiters on the sender and receiver waiting queue, * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * * msg_ids.rwsem (writer) and the spinlock for this message queue are held * before freeque() is called. msg_ids.rwsem remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) __releases(RCU) __releases(&msq->q_perm) { struct msg_msg *msg, *t; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); DEFINE_WAKE_Q(wake_q); expunge_all(msq, -EIDRM, &wake_q); ss_wakeup(msq, &wake_q, true); msg_rmid(ns, msq); ipc_unlock_object(&msq->q_perm); wake_up_q(&wake_q); rcu_read_unlock(); list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { atomic_dec(&ns->msg_hdrs); free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); ipc_update_pid(&msq->q_lspid, NULL); ipc_update_pid(&msq->q_lrpid, NULL); ipc_rcu_putref(&msq->q_perm, msg_rcu_free); } long ksys_msgget(key_t key, int msgflg) { struct ipc_namespace *ns; static const struct ipc_ops msg_ops = { .getnew = newque, .associate = security_msg_queue_associate, }; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; msg_params.key = key; msg_params.flg = msgflg; return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { return ksys_msgget(key, msgflg); } static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct msqid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); out.msg_stime = in->msg_stime; out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; out.msg_lqbytes = in->msg_qbytes; out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { switch (version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct msqid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->msg_perm.uid = tbuf_old.msg_perm.uid; out->msg_perm.gid = tbuf_old.msg_perm.gid; out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) out->msg_qbytes = tbuf_old.msg_lqbytes; else out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } default: return -EINVAL; } } /* * This function handles some msgctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct ipc64_perm *perm, int msg_qbytes) { struct kern_ipc_perm *ipcp; struct msg_queue *msq; int err; down_write(&msg_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_obtain_check(ns, &msg_ids(ns), msqid, cmd, perm, msg_qbytes); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } msq = container_of(ipcp, struct msg_queue, q_perm); err = security_msg_queue_msgctl(&msq->q_perm, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&msq->q_perm); /* freeque unlocks the ipc object and rcu */ freeque(ns, ipcp); goto out_up; case IPC_SET: { DEFINE_WAKE_Q(wake_q); if (msg_qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) { err = -EPERM; goto out_unlock1; } ipc_lock_object(&msq->q_perm); err = ipc_update_perm(perm, ipcp); if (err) goto out_unlock0; msq->q_qbytes = msg_qbytes; msq->q_ctime = ktime_get_real_seconds(); /* * Sleeping receivers might be excluded by * stricter permissions. */ expunge_all(msq, -EAGAIN, &wake_q); /* * Sleeping senders might be able to send * due to a larger queue size. */ ss_wakeup(msq, &wake_q, false); ipc_unlock_object(&msq->q_perm); wake_up_q(&wake_q); goto out_unlock1; } default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&msq->q_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&msg_ids(ns).rwsem); return err; } static int msgctl_info(struct ipc_namespace *ns, int msqid, int cmd, struct msginfo *msginfo) { int err; int max_idx; /* * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; memset(msginfo, 0, sizeof(*msginfo)); msginfo->msgmni = ns->msg_ctlmni; msginfo->msgmax = ns->msg_ctlmax; msginfo->msgmnb = ns->msg_ctlmnb; msginfo->msgssz = MSGSSZ; msginfo->msgseg = MSGSEG; down_read(&msg_ids(ns).rwsem); if (cmd == MSG_INFO) { msginfo->msgpool = msg_ids(ns).in_use; msginfo->msgmap = atomic_read(&ns->msg_hdrs); msginfo->msgtql = atomic_read(&ns->msg_bytes); } else { msginfo->msgmap = MSGMAP; msginfo->msgpool = MSGPOOL; msginfo->msgtql = MSGTQL; } max_idx = ipc_get_maxidx(&msg_ids(ns)); up_read(&msg_ids(ns).rwsem); return (max_idx < 0) ? 0 : max_idx; } static int msgctl_stat(struct ipc_namespace *ns, int msqid, int cmd, struct msqid64_ds *p) { struct msg_queue *msq; int err; memset(p, 0, sizeof(*p)); rcu_read_lock(); if (cmd == MSG_STAT || cmd == MSG_STAT_ANY) { msq = msq_obtain_object(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock; } } else { /* IPC_STAT */ msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock; } } /* see comment for SHM_STAT_ANY */ if (cmd == MSG_STAT_ANY) audit_ipc_obj(&msq->q_perm); else { err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; } err = security_msg_queue_msgctl(&msq->q_perm, cmd); if (err) goto out_unlock; ipc_lock_object(&msq->q_perm); if (!ipc_valid_object(&msq->q_perm)) { ipc_unlock_object(&msq->q_perm); err = -EIDRM; goto out_unlock; } kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm); p->msg_stime = msq->q_stime; p->msg_rtime = msq->q_rtime; p->msg_ctime = msq->q_ctime; #ifndef CONFIG_64BIT p->msg_stime_high = msq->q_stime >> 32; p->msg_rtime_high = msq->q_rtime >> 32; p->msg_ctime_high = msq->q_ctime >> 32; #endif p->msg_cbytes = msq->q_cbytes; p->msg_qnum = msq->q_qnum; p->msg_qbytes = msq->q_qbytes; p->msg_lspid = pid_vnr(msq->q_lspid); p->msg_lrpid = pid_vnr(msq->q_lrpid); if (cmd == IPC_STAT) { /* * As defined in SUS: * Return 0 on success */ err = 0; } else { /* * MSG_STAT and MSG_STAT_ANY (both Linux specific) * Return the full id, including the sequence number */ err = msq->q_perm.id; } ipc_unlock_object(&msq->q_perm); out_unlock: rcu_read_unlock(); return err; } static long ksys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf, int version) { struct ipc_namespace *ns; struct msqid64_ds msqid64; int err; if (msqid < 0 || cmd < 0) return -EINVAL; ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; err = msgctl_info(ns, msqid, cmd, &msginfo); if (err < 0) return err; if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) err = -EFAULT; return err; } case MSG_STAT: /* msqid is an index rather than a msg queue id */ case MSG_STAT_ANY: case IPC_STAT: err = msgctl_stat(ns, msqid, cmd, &msqid64); if (err < 0) return err; if (copy_msqid_to_user(buf, &msqid64, version)) err = -EFAULT; return err; case IPC_SET: if (copy_msqid_from_user(&msqid64, buf, version)) return -EFAULT; return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); case IPC_RMID: return msgctl_down(ns, msqid, cmd, NULL, 0); default: return -EINVAL; } } SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { return ksys_msgctl(msqid, cmd, buf, IPC_64); } #ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) { int version = ipc_parse_version(&cmd); return ksys_msgctl(msqid, cmd, buf, version); } SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) { return ksys_old_msgctl(msqid, cmd, buf); } #endif #ifdef CONFIG_COMPAT struct compat_msqid_ds { struct compat_ipc_perm msg_perm; compat_uptr_t msg_first; compat_uptr_t msg_last; old_time32_t msg_stime; old_time32_t msg_rtime; old_time32_t msg_ctime; compat_ulong_t msg_lcbytes; compat_ulong_t msg_lqbytes; unsigned short msg_cbytes; unsigned short msg_qnum; unsigned short msg_qbytes; compat_ipc_pid_t msg_lspid; compat_ipc_pid_t msg_lrpid; }; static int copy_compat_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { memset(out, 0, sizeof(*out)); if (version == IPC_64) { struct compat_msqid64_ds __user *p = buf; if (get_compat_ipc64_perm(&out->msg_perm, &p->msg_perm)) return -EFAULT; if (get_user(out->msg_qbytes, &p->msg_qbytes)) return -EFAULT; } else { struct compat_msqid_ds __user *p = buf; if (get_compat_ipc_perm(&out->msg_perm, &p->msg_perm)) return -EFAULT; if (get_user(out->msg_qbytes, &p->msg_qbytes)) return -EFAULT; } return 0; } static int copy_compat_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { if (version == IPC_64) { struct compat_msqid64_ds v; memset(&v, 0, sizeof(v)); to_compat_ipc64_perm(&v.msg_perm, &in->msg_perm); v.msg_stime = lower_32_bits(in->msg_stime); v.msg_stime_high = upper_32_bits(in->msg_stime); v.msg_rtime = lower_32_bits(in->msg_rtime); v.msg_rtime_high = upper_32_bits(in->msg_rtime); v.msg_ctime = lower_32_bits(in->msg_ctime); v.msg_ctime_high = upper_32_bits(in->msg_ctime); v.msg_cbytes = in->msg_cbytes; v.msg_qnum = in->msg_qnum; v.msg_qbytes = in->msg_qbytes; v.msg_lspid = in->msg_lspid; v.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &v, sizeof(v)); } else { struct compat_msqid_ds v; memset(&v, 0, sizeof(v)); to_compat_ipc_perm(&v.msg_perm, &in->msg_perm); v.msg_stime = in->msg_stime; v.msg_rtime = in->msg_rtime; v.msg_ctime = in->msg_ctime; v.msg_cbytes = in->msg_cbytes; v.msg_qnum = in->msg_qnum; v.msg_qbytes = in->msg_qbytes; v.msg_lspid = in->msg_lspid; v.msg_lrpid = in->msg_lrpid; return copy_to_user(buf, &v, sizeof(v)); } } static long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr, int version) { struct ipc_namespace *ns; int err; struct msqid64_ds msqid64; ns = current->nsproxy->ipc_ns; if (msqid < 0 || cmd < 0) return -EINVAL; switch (cmd & (~IPC_64)) { case IPC_INFO: case MSG_INFO: { struct msginfo msginfo; err = msgctl_info(ns, msqid, cmd, &msginfo); if (err < 0) return err; if (copy_to_user(uptr, &msginfo, sizeof(struct msginfo))) err = -EFAULT; return err; } case IPC_STAT: case MSG_STAT: case MSG_STAT_ANY: err = msgctl_stat(ns, msqid, cmd, &msqid64); if (err < 0) return err; if (copy_compat_msqid_to_user(uptr, &msqid64, version)) err = -EFAULT; return err; case IPC_SET: if (copy_compat_msqid_from_user(&msqid64, uptr, version)) return -EFAULT; return msgctl_down(ns, msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); case IPC_RMID: return msgctl_down(ns, msqid, cmd, NULL, 0); default: return -EINVAL; } } COMPAT_SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, void __user *, uptr) { return compat_ksys_msgctl(msqid, cmd, uptr, IPC_64); } #ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr) { int version = compat_ipc_parse_version(&cmd); return compat_ksys_msgctl(msqid, cmd, uptr, version); } COMPAT_SYSCALL_DEFINE3(old_msgctl, int, msqid, int, cmd, void __user *, uptr) { return compat_ksys_old_msgctl(msqid, cmd, uptr); } #endif #endif static int testmsg(struct msg_msg *msg, long type, int mode) { switch (mode) { case SEARCH_ANY: case SEARCH_NUMBER: return 1; case SEARCH_LESSEQUAL: if (msg->m_type <= type) return 1; break; case SEARCH_EQUAL: if (msg->m_type == type) return 1; break; case SEARCH_NOTEQUAL: if (msg->m_type != type) return 1; break; } return 0; } static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(&msq->q_perm, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { wake_q_add(wake_q, msr->r_tsk); /* See expunge_all regarding memory barrier */ smp_store_release(&msr->r_msg, ERR_PTR(-E2BIG)); } else { ipc_update_pid(&msq->q_lrpid, task_pid(msr->r_tsk)); msq->q_rtime = ktime_get_real_seconds(); wake_q_add(wake_q, msr->r_tsk); /* See expunge_all regarding memory barrier */ smp_store_release(&msr->r_msg, msg); return 1; } } } return 0; } static long do_msgsnd(int msqid, long mtype, void __user *mtext, size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; int err; struct ipc_namespace *ns; DEFINE_WAKE_Q(wake_q); ns = current->nsproxy->ipc_ns; if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; if (mtype < 1) return -EINVAL; msg = load_msg(mtext, msgsz); if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; rcu_read_lock(); msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); goto out_unlock1; } ipc_lock_object(&msq->q_perm); for (;;) { struct msg_sender s; err = -EACCES; if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock0; /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } err = security_msg_queue_msgsnd(&msq->q_perm, msg, msgflg); if (err) goto out_unlock0; if (msg_fits_inqueue(msq, msgsz)) break; /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; goto out_unlock0; } /* enqueue the sender and prepare to block */ ss_add(msq, &s, msgsz); if (!ipc_rcu_getref(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); schedule(); rcu_read_lock(); ipc_lock_object(&msq->q_perm); ipc_rcu_putref(&msq->q_perm, msg_rcu_free); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; goto out_unlock0; } ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; goto out_unlock0; } } ipc_update_pid(&msq->q_lspid, task_tgid(current)); msq->q_stime = ktime_get_real_seconds(); if (!pipelined_send(msq, msg, &wake_q)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; atomic_add(msgsz, &ns->msg_bytes); atomic_inc(&ns->msg_hdrs); } err = 0; msg = NULL; out_unlock0: ipc_unlock_object(&msq->q_perm); wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); if (msg != NULL) free_msg(msg); return err; } long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) { long mtype; if (get_user(mtype, &msgp->mtype)) return -EFAULT; return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); } SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, int, msgflg) { return ksys_msgsnd(msqid, msgp, msgsz, msgflg); } #ifdef CONFIG_COMPAT struct compat_msgbuf { compat_long_t mtype; char mtext[1]; }; long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, int msgflg) { struct compat_msgbuf __user *up = compat_ptr(msgp); compat_long_t mtype; if (get_user(mtype, &up->mtype)) return -EFAULT; return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg); } COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, compat_ssize_t, msgsz, int, msgflg) { return compat_ksys_msgsnd(msqid, msgp, msgsz, msgflg); } #endif static inline int convert_mode(long *msgtyp, int msgflg) { if (msgflg & MSG_COPY) return SEARCH_NUMBER; /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. * msgtyp < 0 => get message with least type must be < abs(msgtype). */ if (*msgtyp == 0) return SEARCH_ANY; if (*msgtyp < 0) { if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */ *msgtyp = LONG_MAX; else *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { struct msgbuf __user *msgp = dest; size_t msgsz; if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; if (store_msg(msgp->mtext, msg, msgsz)) return -EFAULT; return msgsz; } #ifdef CONFIG_CHECKPOINT_RESTORE /* * This function creates new kernel message structure, large enough to store * bufsz message bytes. */ static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { struct msg_msg *copy; /* * Create dummy message to copy real message to. */ copy = load_msg(buf, bufsz); if (!IS_ERR(copy)) copy->m_ts = bufsz; return copy; } static inline void free_copy(struct msg_msg *copy) { if (copy) free_msg(copy); } #else static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) { return ERR_PTR(-ENOSYS); } static inline void free_copy(struct msg_msg *copy) { } #endif static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) { struct msg_msg *msg, *found = NULL; long count = 0; list_for_each_entry(msg, &msq->q_messages, m_list) { if (testmsg(msg, *msgtyp, mode) && !security_msg_queue_msgrcv(&msq->q_perm, msg, current, *msgtyp, mode)) { if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { *msgtyp = msg->m_type - 1; found = msg; } else if (mode == SEARCH_NUMBER) { if (*msgtyp == count) return msg; } else return msg; count++; } } return found ?: ERR_PTR(-EAGAIN); } static long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { int mode; struct msg_queue *msq; struct ipc_namespace *ns; struct msg_msg *msg, *copy = NULL; DEFINE_WAKE_Q(wake_q); ns = current->nsproxy->ipc_ns; if (msqid < 0 || (long) bufsz < 0) return -EINVAL; if (msgflg & MSG_COPY) { if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) return -EINVAL; copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); if (IS_ERR(copy)) return PTR_ERR(copy); } mode = convert_mode(&msgtyp, msgflg); rcu_read_lock(); msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { rcu_read_unlock(); free_copy(copy); return PTR_ERR(msq); } for (;;) { struct msg_receiver msr_d; msg = ERR_PTR(-EACCES); if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock1; ipc_lock_object(&msq->q_perm); /* raced with RMID? */ if (!ipc_valid_object(&msq->q_perm)) { msg = ERR_PTR(-EIDRM); goto out_unlock0; } msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); goto out_unlock0; } /* * If we are copying, then do not unlink message and do * not update queue parameters. */ if (msgflg & MSG_COPY) { msg = copy_msg(msg, copy); goto out_unlock0; } list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = ktime_get_real_seconds(); ipc_update_pid(&msq->q_lrpid, task_tgid(current)); msq->q_cbytes -= msg->m_ts; atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(msq, &wake_q, false); goto out_unlock0; } /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); goto out_unlock0; } list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else msr_d.r_maxsize = bufsz; /* memory barrier not require due to ipc_lock_object() */ WRITE_ONCE(msr_d.r_msg, ERR_PTR(-EAGAIN)); /* memory barrier not required, we own ipc_lock_object() */ __set_current_state(TASK_INTERRUPTIBLE); ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); schedule(); /* * Lockless receive, part 1: * We don't hold a reference to the queue and getting a * reference would defeat the idea of a lockless operation, * thus the code relies on rcu to guarantee the existence of * msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. */ rcu_read_lock(); /* * Lockless receive, part 2: * The work in pipelined_send() and expunge_all(): * - Set pointer to message * - Queue the receiver task for later wakeup * - Wake up the process after the lock is dropped. * * Should the process wake up before this wakeup (due to a * signal) it will either see the message and continue ... */ msg = READ_ONCE(msr_d.r_msg); if (msg != ERR_PTR(-EAGAIN)) { /* see MSG_BARRIER for purpose/pairing */ smp_acquire__after_ctrl_dep(); goto out_unlock1; } /* * ... or see -EAGAIN, acquire the lock to check the message * again. */ ipc_lock_object(&msq->q_perm); msg = READ_ONCE(msr_d.r_msg); if (msg != ERR_PTR(-EAGAIN)) goto out_unlock0; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); goto out_unlock0; } ipc_unlock_object(&msq->q_perm); } out_unlock0: ipc_unlock_object(&msq->q_perm); wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); if (IS_ERR(msg)) { free_copy(copy); return PTR_ERR(msg); } bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); return bufsz; } long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, long msgtyp, int msgflg) { return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); } SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, long, msgtyp, int, msgflg) { return ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); } #ifdef CONFIG_COMPAT static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { struct compat_msgbuf __user *msgp = dest; size_t msgsz; if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; if (store_msg(msgp->mtext, msg, msgsz)) return -EFAULT; return msgsz; } long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, compat_long_t msgtyp, int msgflg) { return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp, msgflg, compat_do_msg_fill); } COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg) { return compat_ksys_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg); } #endif void msg_init_ns(struct ipc_namespace *ns) { ns->msg_ctlmax = MSGMAX; ns->msg_ctlmnb = MSGMNB; ns->msg_ctlmni = MSGMNI; atomic_set(&ns->msg_bytes, 0); atomic_set(&ns->msg_hdrs, 0); ipc_init_ids(&ns->ids[IPC_MSG_IDS]); } #ifdef CONFIG_IPC_NS void msg_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &msg_ids(ns), freeque); idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); rhashtable_destroy(&ns->ids[IPC_MSG_IDS].key_ht); } #endif #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); struct user_namespace *user_ns = seq_user_ns(s); struct kern_ipc_perm *ipcp = it; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); seq_printf(s, "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10llu %10llu %10llu\n", msq->q_perm.key, msq->q_perm.id, msq->q_perm.mode, msq->q_cbytes, msq->q_qnum, pid_nr_ns(msq->q_lspid, pid_ns), pid_nr_ns(msq->q_lrpid, pid_ns), from_kuid_munged(user_ns, msq->q_perm.uid), from_kgid_munged(user_ns, msq->q_perm.gid), from_kuid_munged(user_ns, msq->q_perm.cuid), from_kgid_munged(user_ns, msq->q_perm.cgid), msq->q_stime, msq->q_rtime, msq->q_ctime); return 0; } #endif void __init msg_init(void) { msg_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/msg", " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", IPC_MSG_IDS, sysvipc_msg_proc_show); } |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 | // SPDX-License-Identifier: GPL-2.0+ /* * HID driver for quirky Macally devices * * Copyright (c) 2019 Alex Henrie <alexhenrie24@gmail.com> */ #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" MODULE_AUTHOR("Alex Henrie <alexhenrie24@gmail.com>"); MODULE_DESCRIPTION("Macally devices"); MODULE_LICENSE("GPL"); /* * The Macally ikey keyboard says that its logical and usage maximums are both * 101, but the power key is 102 and the equals key is 103 */ static __u8 *macally_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 60 && rdesc[53] == 0x65 && rdesc[59] == 0x65) { hid_info(hdev, "fixing up Macally ikey keyboard report descriptor\n"); rdesc[53] = rdesc[59] = 0x67; } return rdesc; } static const struct hid_device_id macally_id_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR, USB_DEVICE_ID_MACALLY_IKEY_KEYBOARD) }, { } }; MODULE_DEVICE_TABLE(hid, macally_id_table); static struct hid_driver macally_driver = { .name = "macally", .id_table = macally_id_table, .report_fixup = macally_report_fixup, }; module_hid_driver(macally_driver); |
1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for various devices which are apparently based on the same chipset * from certain vendor which produces chips that contain wrong LogicalMaximum * value in their HID report descriptor. Currently supported devices are: * * Ortek PKB-1700 * Ortek WKB-2000 * iHome IMAC-A210S * Skycable wireless presenter * * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> * Copyright (c) 2011 Jiri Kosina */ /* */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) { hid_info(hdev, "Fixing up logical maximum in report descriptor (Ortek)\n"); rdesc[55] = 0x92; } else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) { hid_info(hdev, "Fixing up logical maximum in report descriptor (Skycable)\n"); rdesc[53] = 0x65; } return rdesc; } static const struct hid_device_id ortek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, { } }; MODULE_DEVICE_TABLE(hid, ortek_devices); static struct hid_driver ortek_driver = { .name = "ortek", .id_table = ortek_devices, .report_fixup = ortek_report_fixup }; module_hid_driver(ortek_driver); MODULE_LICENSE("GPL"); |
4 4 4 4 11 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 | /* SPDX-License-Identifier: GPL-2.0 */ /* XDP user-space ring structure * Copyright(c) 2018 Intel Corporation. */ #ifndef _LINUX_XSK_QUEUE_H #define _LINUX_XSK_QUEUE_H #include <linux/types.h> #include <linux/if_xdp.h> #include <net/xdp_sock.h> #include <net/xsk_buff_pool.h> #include "xsk.h" struct xdp_ring { u32 producer ____cacheline_aligned_in_smp; /* Hinder the adjacent cache prefetcher to prefetch the consumer * pointer if the producer pointer is touched and vice versa. */ u32 pad1 ____cacheline_aligned_in_smp; u32 consumer ____cacheline_aligned_in_smp; u32 pad2 ____cacheline_aligned_in_smp; u32 flags; u32 pad3 ____cacheline_aligned_in_smp; }; /* Used for the RX and TX queues for packets */ struct xdp_rxtx_ring { struct xdp_ring ptrs; struct xdp_desc desc[] ____cacheline_aligned_in_smp; }; /* Used for the fill and completion queues for buffers */ struct xdp_umem_ring { struct xdp_ring ptrs; u64 desc[] ____cacheline_aligned_in_smp; }; struct xsk_queue { u32 ring_mask; u32 nentries; u32 cached_prod; u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; u64 queue_empty_descs; }; /* The structure of the shared state of the rings are a simple * circular buffer, as outlined in * Documentation/core-api/circular-buffers.rst. For the Rx and * completion ring, the kernel is the producer and user space is the * consumer. For the Tx and fill rings, the kernel is the consumer and * user space is the producer. * * producer consumer * * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C) * STORE $data LOAD $data * STORE.rel ->producer (B) STORE.rel ->consumer (D) * } * * (A) pairs with (D), and (B) pairs with (C). * * Starting with (B), it protects the data from being written after * the producer pointer. If this barrier was missing, the consumer * could observe the producer pointer being set and thus load the data * before the producer has written the new data. The consumer would in * this case load the old data. * * (C) protects the consumer from speculatively loading the data before * the producer pointer actually has been read. If we do not have this * barrier, some architectures could load old data as speculative loads * are not discarded as the CPU does not know there is a dependency * between ->producer and data. * * (A) is a control dependency that separates the load of ->consumer * from the stores of $data. In case ->consumer indicates there is no * room in the buffer to store $data we do not. The dependency will * order both of the stores after the loads. So no barrier is needed. * * (D) protects the load of the data to be observed to happen after the * store of the consumer pointer. If we did not have this memory * barrier, the producer could observe the consumer pointer being set * and overwrite the data with a new value before the consumer got the * chance to read the old value. The consumer would thus miss reading * the old entry and very likely read the new entry twice, once right * now and again after circling through the ring. */ /* The operations on the rings are the following: * * producer consumer * * RESERVE entries PEEK in the ring for entries * WRITE data into the ring READ data from the ring * SUBMIT entries RELEASE entries * * The producer reserves one or more entries in the ring. It can then * fill in these entries and finally submit them so that they can be * seen and read by the consumer. * * The consumer peeks into the ring to see if the producer has written * any new entries. If so, the consumer can then read these entries * and when it is done reading them release them back to the producer * so that the producer can use these slots to fill in new entries. * * The function names below reflect these operations. */ /* Functions that read and validate content from consumer rings. */ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; if (q->cached_cons != q->cached_prod) { u32 idx = q->cached_cons & q->ring_mask; *addr = ring->desc[idx]; return true; } return false; } static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 chunk, chunk_end; chunk = xp_aligned_extract_addr(pool, desc->addr); if (likely(desc->len)) { chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1); if (chunk != chunk_end) return false; } if (chunk >= pool->addrs_cnt) return false; if (desc->options) return false; return true; } static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { u64 addr, base_addr; base_addr = xp_unaligned_extract_addr(desc->addr); addr = xp_unaligned_add_offset_to_addr(desc->addr); if (desc->len > pool->chunk_size) return false; if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt || addr + desc->len > pool->addrs_cnt || xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) return false; if (desc->options) return false; return true; } static inline bool xp_validate_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) : xp_aligned_validate_desc(pool, desc); } static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, struct xsk_buff_pool *pool) { if (!xp_validate_desc(pool, d)) { q->invalid_descs++; return false; } return true; } static inline bool xskq_cons_read_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { while (q->cached_cons != q->cached_prod) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = q->cached_cons & q->ring_mask; *desc = ring->desc[idx]; if (xskq_cons_is_valid_desc(q, desc, pool)) return true; q->cached_cons++; } return false; } static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt) { q->cached_cons += cnt; } static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, u32 max) { u32 cached_cons = q->cached_cons, nb_entries = 0; struct xdp_desc *descs = pool->tx_descs; while (cached_cons != q->cached_prod && nb_entries < max) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx = cached_cons & q->ring_mask; descs[nb_entries] = ring->desc[idx]; if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) { /* Skip the entry */ cached_cons++; continue; } nb_entries++; cached_cons++; } /* Release valid plus any invalid entries */ xskq_cons_release_n(q, cached_cons - q->cached_cons); return nb_entries; } /* Functions for consumers */ static inline void __xskq_cons_release(struct xsk_queue *q) { smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */ } static inline void __xskq_cons_peek(struct xsk_queue *q) { /* Refresh the local pointer */ q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */ } static inline void xskq_cons_get_entries(struct xsk_queue *q) { __xskq_cons_release(q); __xskq_cons_peek(q); } static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max) { u32 entries = q->cached_prod - q->cached_cons; if (entries >= max) return max; __xskq_cons_peek(q); entries = q->cached_prod - q->cached_cons; return entries >= max ? max : entries; } static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) { return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false; } static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_addr_unchecked(q, addr); } static inline bool xskq_cons_peek_desc(struct xsk_queue *q, struct xdp_desc *desc, struct xsk_buff_pool *pool) { if (q->cached_prod == q->cached_cons) xskq_cons_get_entries(q); return xskq_cons_read_desc(q, desc, pool); } /* To improve performance in the xskq_cons_release functions, only update local state here. * Reflect this to global state when we get new entries from the ring in * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. */ static inline void xskq_cons_release(struct xsk_queue *q) { q->cached_cons++; } static inline bool xskq_cons_is_full(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) == q->nentries; } static inline u32 xskq_cons_present_entries(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer); } /* Functions for producers */ static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max) { u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons); if (free_entries >= max) return max; /* Refresh the local tail pointer */ q->cached_cons = READ_ONCE(q->ring->consumer); free_entries = q->nentries - (q->cached_prod - q->cached_cons); return free_entries >= max ? max : free_entries; } static inline bool xskq_prod_is_full(struct xsk_queue *q) { return xskq_prod_nb_free(q, 1) ? false : true; } static inline void xskq_prod_cancel(struct xsk_queue *q) { q->cached_prod--; } static inline int xskq_prod_reserve(struct xsk_queue *q) { if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ q->cached_prod++; return 0; } static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ ring->desc[q->cached_prod++ & q->ring_mask] = addr; return 0; } static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_desc *descs, u32 nb_entries) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 i, cached_prod; /* A, matches D */ cached_prod = q->cached_prod; for (i = 0; i < nb_entries; i++) ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr; q->cached_prod = cached_prod; } static inline int xskq_prod_reserve_desc(struct xsk_queue *q, u64 addr, u32 len) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx; if (xskq_prod_is_full(q)) return -ENOSPC; /* A, matches D */ idx = q->cached_prod++ & q->ring_mask; ring->desc[idx].addr = addr; ring->desc[idx].len = len; return 0; } static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) { smp_store_release(&q->ring->producer, idx); /* B, matches C */ } static inline void xskq_prod_submit(struct xsk_queue *q) { __xskq_prod_submit(q, q->cached_prod); } static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; u32 idx = q->ring->producer; ring->desc[idx++ & q->ring_mask] = addr; __xskq_prod_submit(q, idx); } static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) { __xskq_prod_submit(q, q->ring->producer + nb_entries); } static inline bool xskq_prod_is_empty(struct xsk_queue *q) { /* No barriers needed since data is not accessed */ return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer); } /* For both producers and consumers */ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) { return q ? q->invalid_descs : 0; } static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) { return q ? q->queue_empty_descs : 0; } struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); #endif /* _LINUX_XSK_QUEUE_H */ |
4 1 1 1 2 2 1 1 1 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * KVM dirty ring implementation * * Copyright 2019 Red Hat, Inc. */ #include <linux/kvm_host.h> #include <linux/kvm.h> #include <linux/vmalloc.h> #include <linux/kvm_dirty_ring.h> #include <trace/events/kvm.h> #include "mmu_lock.h" int __weak kvm_cpu_dirty_log_size(void) { return 0; } u32 kvm_dirty_ring_get_rsvd_entries(void) { return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size(); } static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring) { return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index); } bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) { return kvm_dirty_ring_used(ring) >= ring->soft_limit; } static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring) { return kvm_dirty_ring_used(ring) >= ring->size; } struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm) { struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); WARN_ON_ONCE(vcpu->kvm != kvm); return &vcpu->dirty_ring; } static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask) { struct kvm_memory_slot *memslot; int as_id, id; as_id = slot >> 16; id = (u16)slot; if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS) return; memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id); if (!memslot || (offset + __fls(mask)) >= memslot->npages) return; KVM_MMU_LOCK(kvm); kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); KVM_MMU_UNLOCK(kvm); } int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) { ring->dirty_gfns = vzalloc(size); if (!ring->dirty_gfns) return -ENOMEM; ring->size = size / sizeof(struct kvm_dirty_gfn); ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries(); ring->dirty_index = 0; ring->reset_index = 0; ring->index = index; return 0; } static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) { gfn->flags = 0; } static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) { gfn->flags = KVM_DIRTY_GFN_F_DIRTY; } static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) { return gfn->flags & KVM_DIRTY_GFN_F_RESET; } int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) { u32 cur_slot, next_slot; u64 cur_offset, next_offset; unsigned long mask; int count = 0; struct kvm_dirty_gfn *entry; bool first_round = true; /* This is only needed to make compilers happy */ cur_slot = cur_offset = mask = 0; while (true) { entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)]; if (!kvm_dirty_gfn_harvested(entry)) break; next_slot = READ_ONCE(entry->slot); next_offset = READ_ONCE(entry->offset); /* Update the flags to reflect that this GFN is reset */ kvm_dirty_gfn_set_invalid(entry); ring->reset_index++; count++; /* * Try to coalesce the reset operations when the guest is * scanning pages in the same slot. */ if (!first_round && next_slot == cur_slot) { s64 delta = next_offset - cur_offset; if (delta >= 0 && delta < BITS_PER_LONG) { mask |= 1ull << delta; continue; } /* Backwards visit, careful about overflows! */ if (delta > -BITS_PER_LONG && delta < 0 && (mask << -delta >> -delta) == mask) { cur_offset = next_offset; mask = (mask << -delta) | 1; continue; } } kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); cur_slot = next_slot; cur_offset = next_offset; mask = 1; first_round = false; } kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask); trace_kvm_dirty_ring_reset(ring); return count; } void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset) { struct kvm_dirty_gfn *entry; /* It should never get full */ WARN_ON_ONCE(kvm_dirty_ring_full(ring)); entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)]; entry->slot = slot; entry->offset = offset; /* * Make sure the data is filled in before we publish this to * the userspace program. There's no paired kernel-side reader. */ smp_wmb(); kvm_dirty_gfn_set_dirtied(entry); ring->dirty_index++; trace_kvm_dirty_ring_push(ring, slot, offset); } struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset) { return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE); } void kvm_dirty_ring_free(struct kvm_dirty_ring *ring) { vfree(ring->dirty_gfns); ring->dirty_gfns = NULL; } |
41 41 41 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 | /* -*- linux-c -*- */ /* fs/reiserfs/procfs.c */ /* * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README */ /* proc info support a la one created by Sizif@Botik.RU for PGC */ #include <linux/module.h> #include <linux/time.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include "reiserfs.h" #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/blkdev.h> /* * LOCKING: * * These guys are evicted from procfs as the very first step in ->kill_sb(). * */ static int show_version(struct seq_file *m, void *unused) { struct super_block *sb = m->private; char *format; if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { format = "3.6"; } else if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_5)) { format = "3.5"; } else { format = "unknown"; } seq_printf(m, "%s format\twith checks %s\n", format, #if defined( CONFIG_REISERFS_CHECK ) "on" #else "off" #endif ); return 0; } #define SF( x ) ( r -> x ) #define SFP( x ) SF( s_proc_info_data.x ) #define SFPL( x ) SFP( x[ level ] ) #define SFPF( x ) SFP( scan_bitmap.x ) #define SFPJ( x ) SFP( journal.x ) #define D2C( x ) le16_to_cpu( x ) #define D4C( x ) le32_to_cpu( x ) #define DF( x ) D2C( rs -> s_v1.x ) #define DFL( x ) D4C( rs -> s_v1.x ) #define objectid_map( s, rs ) (old_format_only (s) ? \ (__le32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \ (__le32 *)(rs + 1)) #define MAP( i ) D4C( objectid_map( sb, rs )[ i ] ) #define DJF( x ) le32_to_cpu( rs -> x ) #define DJP( x ) le32_to_cpu( jp -> x ) #define JF( x ) ( r -> s_journal -> x ) static int show_super(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "state: \t%s\n" "mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n" "gen. counter: \t%i\n" "s_disk_reads: \t%i\n" "s_disk_writes: \t%i\n" "s_fix_nodes: \t%i\n" "s_do_balance: \t%i\n" "s_unneeded_left_neighbor: \t%i\n" "s_good_search_by_key_reada: \t%i\n" "s_bmaps: \t%i\n" "s_bmaps_without_search: \t%i\n" "s_direct2indirect: \t%i\n" "s_indirect2direct: \t%i\n" "\n" "max_hash_collisions: \t%i\n" "breads: \t%lu\n" "bread_misses: \t%lu\n" "search_by_key: \t%lu\n" "search_by_key_fs_changed: \t%lu\n" "search_by_key_restarted: \t%lu\n" "insert_item_restarted: \t%lu\n" "paste_into_item_restarted: \t%lu\n" "cut_from_item_restarted: \t%lu\n" "delete_solid_item_restarted: \t%lu\n" "delete_item_restarted: \t%lu\n" "leaked_oid: \t%lu\n" "leaves_removable: \t%lu\n", SF(s_mount_state) == REISERFS_VALID_FS ? "REISERFS_VALID_FS" : "REISERFS_ERROR_FS", reiserfs_r5_hash(sb) ? "FORCE_R5 " : "", reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "", reiserfs_tea_hash(sb) ? "FORCE_TEA " : "", reiserfs_hash_detect(sb) ? "DETECT_HASH " : "", reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ", reiserfs_no_unhashed_relocation(sb) ? "NO_UNHASHED_RELOCATION " : "", reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "", reiserfs_test4(sb) ? "TEST4 " : "", have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ? "SMALL_TAILS " : "NO_TAILS ", replay_only(sb) ? "REPLAY_ONLY " : "", convert_reiserfs(sb) ? "CONV " : "", atomic_read(&r->s_generation_counter), SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), SF(s_do_balance), SF(s_unneeded_left_neighbor), SF(s_good_search_by_key_reada), SF(s_bmaps), SF(s_bmaps_without_search), SF(s_direct2indirect), SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads), SFP(bread_miss), SFP(search_by_key), SFP(search_by_key_fs_changed), SFP(search_by_key_restarted), SFP(insert_item_restarted), SFP(paste_into_item_restarted), SFP(cut_from_item_restarted), SFP(delete_solid_item_restarted), SFP(delete_item_restarted), SFP(leaked_oid), SFP(leaves_removable)); return 0; } static int show_per_level(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); int level; seq_printf(m, "level\t" " balances" " [sbk: reads" " fs_changed" " restarted]" " free space" " items" " can_remove" " lnum" " rnum" " lbytes" " rbytes" " get_neig" " get_neig_res" " need_l_neig" " need_r_neig" "\n"); for (level = 0; level < MAX_HEIGHT; ++level) { seq_printf(m, "%i\t" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12lu" " %12li" " %12li" " %12li" " %12li" " %12lu" " %12lu" " %12lu" " %12lu" "\n", level, SFPL(balance_at), SFPL(sbk_read_at), SFPL(sbk_fs_changed), SFPL(sbk_restarted), SFPL(free_at), SFPL(items_at), SFPL(can_node_be_removed), SFPL(lnum), SFPL(rnum), SFPL(lbytes), SFPL(rbytes), SFPL(get_neighbors), SFPL(get_neighbors_restart), SFPL(need_l_neighbor), SFPL(need_r_neighbor) ); } return 0; } static int show_bitmap(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); seq_printf(m, "free_block: %lu\n" " scan_bitmap:" " wait" " bmap" " retry" " stolen" " journal_hint" "journal_nohint" "\n" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" " %14lu" "\n", SFP(free_block), SFPF(call), SFPF(wait), SFPF(bmap), SFPF(retry), SFPF(stolen), SFPF(in_journal_hint), SFPF(in_journal_nohint)); return 0; } static int show_on_disk_super(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; int hash_code = DFL(s_hash_function_code); __u32 flags = DJF(s_flags); seq_printf(m, "block_count: \t%i\n" "free_blocks: \t%i\n" "root_block: \t%i\n" "blocksize: \t%i\n" "oid_maxsize: \t%i\n" "oid_cursize: \t%i\n" "umount_state: \t%i\n" "magic: \t%10.10s\n" "fs_state: \t%i\n" "hash: \t%s\n" "tree_height: \t%i\n" "bmap_nr: \t%i\n" "version: \t%i\n" "flags: \t%x[%s]\n" "reserved_for_journal: \t%i\n", DFL(s_block_count), DFL(s_free_blocks), DFL(s_root_block), DF(s_blocksize), DF(s_oid_maxsize), DF(s_oid_cursize), DF(s_umount_state), rs->s_v1.s_magic, DF(s_fs_state), hash_code == TEA_HASH ? "tea" : (hash_code == YURA_HASH) ? "rupasov" : (hash_code == R5_HASH) ? "r5" : (hash_code == UNSET_HASH) ? "unset" : "unknown", DF(s_tree_height), DF(s_bmap_nr), DF(s_version), flags, (flags & reiserfs_attrs_cleared) ? "attrs_cleared" : "", DF(s_reserved_for_journal)); return 0; } static int show_oidmap(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); struct reiserfs_super_block *rs = sb_info->s_rs; unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); unsigned long total_used = 0; int i; for (i = 0; i < mapsize; ++i) { __u32 right; right = (i == mapsize - 1) ? MAX_KEY_OBJECTID : MAP(i + 1); seq_printf(m, "%s: [ %x .. %x )\n", (i & 1) ? "free" : "used", MAP(i), right); if (!(i & 1)) { total_used += right - MAP(i); } } #if defined( REISERFS_USE_OIDMAPF ) if (sb_info->oidmap.use_file && (sb_info->oidmap.mapf != NULL)) { loff_t size = file_inode(sb_info->oidmap.mapf)->i_size; total_used += size / sizeof(reiserfs_oidinterval_d_t); } #endif seq_printf(m, "total: \t%i [%i/%i] used: %lu [exact]\n", mapsize, mapsize, le16_to_cpu(rs->s_v1.s_oid_maxsize), total_used); return 0; } static time64_t ktime_mono_to_real_seconds(time64_t mono) { ktime_t kt = ktime_set(mono, NSEC_PER_SEC/2); return ktime_divns(ktime_mono_to_real(kt), NSEC_PER_SEC); } static int show_journal(struct seq_file *m, void *unused) { struct super_block *sb = m->private; struct reiserfs_sb_info *r = REISERFS_SB(sb); struct reiserfs_super_block *rs = r->s_rs; struct journal_params *jp = &rs->s_v1.s_journal; seq_printf(m, /* on-disk fields */ "jp_journal_1st_block: \t%i\n" "jp_journal_dev: \t%pg[%x]\n" "jp_journal_size: \t%i\n" "jp_journal_trans_max: \t%i\n" "jp_journal_magic: \t%i\n" "jp_journal_max_batch: \t%i\n" "jp_journal_max_commit_age: \t%i\n" "jp_journal_max_trans_age: \t%i\n" /* incore fields */ "j_1st_reserved_block: \t%i\n" "j_state: \t%li\n" "j_trans_id: \t%u\n" "j_mount_id: \t%lu\n" "j_start: \t%lu\n" "j_len: \t%lu\n" "j_len_alloc: \t%lu\n" "j_wcount: \t%i\n" "j_bcount: \t%lu\n" "j_first_unflushed_offset: \t%lu\n" "j_last_flush_trans_id: \t%u\n" "j_trans_start_time: \t%lli\n" "j_list_bitmap_index: \t%i\n" "j_must_wait: \t%i\n" "j_next_full_flush: \t%i\n" "j_next_async_flush: \t%i\n" "j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n" /* reiserfs_proc_info_data_t.journal fields */ "in_journal: \t%12lu\n" "in_journal_bitmap: \t%12lu\n" "in_journal_reusable: \t%12lu\n" "lock_journal: \t%12lu\n" "lock_journal_wait: \t%12lu\n" "journal_begin: \t%12lu\n" "journal_relock_writers: \t%12lu\n" "journal_relock_wcount: \t%12lu\n" "mark_dirty: \t%12lu\n" "mark_dirty_already: \t%12lu\n" "mark_dirty_notjournal: \t%12lu\n" "restore_prepared: \t%12lu\n" "prepare: \t%12lu\n" "prepare_retry: \t%12lu\n", DJP(jp_journal_1st_block), SB_JOURNAL(sb)->j_dev_bd, DJP(jp_journal_dev), DJP(jp_journal_size), DJP(jp_journal_trans_max), DJP(jp_journal_magic), DJP(jp_journal_max_batch), SB_JOURNAL(sb)->j_max_commit_age, DJP(jp_journal_max_trans_age), JF(j_1st_reserved_block), JF(j_state), JF(j_trans_id), JF(j_mount_id), JF(j_start), JF(j_len), JF(j_len_alloc), atomic_read(&r->s_journal->j_wcount), JF(j_bcount), JF(j_first_unflushed_offset), JF(j_last_flush_trans_id), ktime_mono_to_real_seconds(JF(j_trans_start_time)), JF(j_list_bitmap_index), JF(j_must_wait), JF(j_next_full_flush), JF(j_next_async_flush), JF(j_cnode_used), JF(j_cnode_free), SFPJ(in_journal), SFPJ(in_journal_bitmap), SFPJ(in_journal_reusable), SFPJ(lock_journal), SFPJ(lock_journal_wait), SFPJ(journal_being), SFPJ(journal_relock_writers), SFPJ(journal_relock_wcount), SFPJ(mark_dirty), SFPJ(mark_dirty_already), SFPJ(mark_dirty_notjournal), SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry) ); return 0; } static struct proc_dir_entry *proc_info_root = NULL; static const char proc_info_root_name[] = "fs/reiserfs"; static void add_file(struct super_block *sb, char *name, int (*func) (struct seq_file *, void *)) { proc_create_single_data(name, 0, REISERFS_SB(sb)->procdir, func, sb); } int reiserfs_proc_info_init(struct super_block *sb) { char b[BDEVNAME_SIZE]; char *s; /* Some block devices use /'s */ strlcpy(b, sb->s_id, BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; spin_lock_init(&__PINFO(sb).lock); REISERFS_SB(sb)->procdir = proc_mkdir_data(b, 0, proc_info_root, sb); if (REISERFS_SB(sb)->procdir) { add_file(sb, "version", show_version); add_file(sb, "super", show_super); add_file(sb, "per-level", show_per_level); add_file(sb, "bitmap", show_bitmap); add_file(sb, "on-disk-super", show_on_disk_super); add_file(sb, "oidmap", show_oidmap); add_file(sb, "journal", show_journal); return 0; } reiserfs_warning(sb, "cannot create /proc/%s/%s", proc_info_root_name, b); return 1; } int reiserfs_proc_info_done(struct super_block *sb) { struct proc_dir_entry *de = REISERFS_SB(sb)->procdir; if (de) { char b[BDEVNAME_SIZE]; char *s; /* Some block devices use /'s */ strlcpy(b, sb->s_id, BDEVNAME_SIZE); s = strchr(b, '/'); if (s) *s = '!'; remove_proc_subtree(b, proc_info_root); REISERFS_SB(sb)->procdir = NULL; } return 0; } int reiserfs_proc_info_global_init(void) { if (proc_info_root == NULL) { proc_info_root = proc_mkdir(proc_info_root_name, NULL); if (!proc_info_root) { reiserfs_warning(NULL, "cannot create /proc/%s", proc_info_root_name); return 1; } } return 0; } int reiserfs_proc_info_global_done(void) { if (proc_info_root != NULL) { proc_info_root = NULL; remove_proc_entry(proc_info_root_name, NULL); } return 0; } /* * Revision 1.1.8.2 2001/07/15 17:08:42 god * . use get_super() in procfs.c * . remove remove_save_link() from reiserfs_do_truncate() * * I accept terms and conditions stated in the Legal Agreement * (available at http://www.namesys.com/legalese.html) * * Revision 1.1.8.1 2001/07/11 16:48:50 god * proc info support * * I accept terms and conditions stated in the Legal Agreement * (available at http://www.namesys.com/legalese.html) * */ |
96 7028 96 2001 155 3770 2700 4370 4292 897 33 9 13 12 4529 4286 4534 4503 15 4500 4508 3254 2043 3259 4242 12 4250 2502 2497 2501 354 190 169 4646 190 4861 29 29 8 8 8 8 8 8 8 393 394 109 109 96 96 26 28 2 2 96 96 95 112 112 5 169 4577 4619 4619 4572 4576 4575 4511 4515 4519 89 102 2002 2002 1999 2000 2001 4635 55 4582 4004 4535 4547 3826 3986 1997 52 4449 1 2 1 4643 4633 22 4637 4644 14 4314 4510 3 4633 4316 4513 4542 55 4500 16 4493 3901 2003 4306 1705 1704 5 1699 5 1700 1704 1 4499 332 9 58 15 2 318 1714 1717 1717 25 25 25 1 25 25 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2016 - 2020 Christoph Hellwig */ #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/kmod.h> #include <linux/major.h> #include <linux/device_cgroup.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/module.h> #include <linux/blkpg.h> #include <linux/magic.h> #include <linux/buffer_head.h> #include <linux/swap.h> #include <linux/writeback.h> #include <linux/mount.h> #include <linux/pseudo_fs.h> #include <linux/uio.h> #include <linux/namei.h> #include <linux/cleancache.h> #include <linux/part_stat.h> #include <linux/uaccess.h> #include "../fs/internal.h" #include "blk.h" struct bdev_inode { struct block_device bdev; struct inode vfs_inode; }; static inline struct bdev_inode *BDEV_I(struct inode *inode) { return container_of(inode, struct bdev_inode, vfs_inode); } struct block_device *I_BDEV(struct inode *inode) { return &BDEV_I(inode)->bdev; } EXPORT_SYMBOL(I_BDEV); static void bdev_write_inode(struct block_device *bdev) { struct inode *inode = bdev->bd_inode; int ret; spin_lock(&inode->i_lock); while (inode->i_state & I_DIRTY) { spin_unlock(&inode->i_lock); ret = write_inode_now(inode, true); if (ret) { char name[BDEVNAME_SIZE]; pr_warn_ratelimited("VFS: Dirty inode writeback failed " "for block device %s (err=%d).\n", bdevname(bdev, name), ret); } spin_lock(&inode->i_lock); } spin_unlock(&inode->i_lock); } /* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; if (mapping_empty(mapping)) return; invalidate_bh_lrus(); truncate_inode_pages(mapping, 0); } /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) { struct address_space *mapping = bdev->bd_inode->i_mapping; if (mapping->nrpages) { invalidate_bh_lrus(); lru_add_drain_all(); /* make sure all lru add caches are flushed */ invalidate_mapping_pages(mapping, 0, -1); } /* 99% of the time, we don't need to flush the cleancache on the bdev. * But, for the strange corners, lets be cautious */ cleancache_invalidate_inode(mapping); } EXPORT_SYMBOL(invalidate_bdev); /* * Drop all buffers & page cache for given bdev range. This function bails * with error if bdev has other exclusive owner (such as filesystem). */ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, loff_t lend) { /* * If we don't hold exclusive handle for the device, upgrade to it * while we discard the buffer cache to avoid discarding buffers * under live filesystem. */ if (!(mode & FMODE_EXCL)) { int err = bd_prepare_to_claim(bdev, truncate_bdev_range); if (err) goto invalidate; } truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); if (!(mode & FMODE_EXCL)) bd_abort_claiming(bdev, truncate_bdev_range); return 0; invalidate: /* * Someone else has handle exclusively open. Try invalidating instead. * The 'end' argument is inclusive so the rounding is safe. */ return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, lstart >> PAGE_SHIFT, lend >> PAGE_SHIFT); } static void set_init_blocksize(struct block_device *bdev) { unsigned int bsize = bdev_logical_block_size(bdev); loff_t size = i_size_read(bdev->bd_inode); while (bsize < PAGE_SIZE) { if (size & bsize) break; bsize <<= 1; } bdev->bd_inode->i_blkbits = blksize_bits(bsize); } int set_blocksize(struct block_device *bdev, int size) { /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; /* Size cannot be smaller than the size supported by the device */ if (size < bdev_logical_block_size(bdev)) return -EINVAL; /* Don't change the size if it is same as current */ if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { sync_blockdev(bdev); bdev->bd_inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } return 0; } EXPORT_SYMBOL(set_blocksize); int sb_set_blocksize(struct super_block *sb, int size) { if (set_blocksize(sb->s_bdev, size)) return 0; /* If we get here, we know size is power of two * and it's value is between 512 and PAGE_SIZE */ sb->s_blocksize = size; sb->s_blocksize_bits = blksize_bits(size); return sb->s_blocksize; } EXPORT_SYMBOL(sb_set_blocksize); int sb_min_blocksize(struct super_block *sb, int size) { int minsize = bdev_logical_block_size(sb->s_bdev); if (size < minsize) size = minsize; return sb_set_blocksize(sb, size); } EXPORT_SYMBOL(sb_min_blocksize); int sync_blockdev_nowait(struct block_device *bdev) { if (!bdev) return 0; return filemap_flush(bdev->bd_inode->i_mapping); } EXPORT_SYMBOL_GPL(sync_blockdev_nowait); /* * Write out and wait upon all the dirty data associated with a block * device via its mapping. Does not take the superblock lock. */ int sync_blockdev(struct block_device *bdev) { if (!bdev) return 0; return filemap_write_and_wait(bdev->bd_inode->i_mapping); } EXPORT_SYMBOL(sync_blockdev); /* * Write out and wait upon all dirty data associated with this * device. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ int fsync_bdev(struct block_device *bdev) { struct super_block *sb = get_super(bdev); if (sb) { int res = sync_filesystem(sb); drop_super(sb); return res; } return sync_blockdev(bdev); } EXPORT_SYMBOL(fsync_bdev); /** * freeze_bdev -- lock a filesystem and force it into a consistent state * @bdev: blockdevice to lock * * If a superblock is found on this device, we take the s_umount semaphore * on it to make sure nobody unmounts until the snapshot creation is done. * The reference counter (bd_fsfreeze_count) guarantees that only the last * unfreeze process can unfreeze the frozen filesystem actually when multiple * freeze requests arrive simultaneously. It counts up in freeze_bdev() and * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze * actually. */ int freeze_bdev(struct block_device *bdev) { struct super_block *sb; int error = 0; mutex_lock(&bdev->bd_fsfreeze_mutex); if (++bdev->bd_fsfreeze_count > 1) goto done; sb = get_active_super(bdev); if (!sb) goto sync; if (sb->s_op->freeze_super) error = sb->s_op->freeze_super(sb); else error = freeze_super(sb); deactivate_super(sb); if (error) { bdev->bd_fsfreeze_count--; goto done; } bdev->bd_fsfreeze_sb = sb; sync: sync_blockdev(bdev); done: mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(freeze_bdev); /** * thaw_bdev -- unlock filesystem * @bdev: blockdevice to unlock * * Unlocks the filesystem and marks it writeable again after freeze_bdev(). */ int thaw_bdev(struct block_device *bdev) { struct super_block *sb; int error = -EINVAL; mutex_lock(&bdev->bd_fsfreeze_mutex); if (!bdev->bd_fsfreeze_count) goto out; error = 0; if (--bdev->bd_fsfreeze_count > 0) goto out; sb = bdev->bd_fsfreeze_sb; if (!sb) goto out; if (sb->s_op->thaw_super) error = sb->s_op->thaw_super(sb); else error = thaw_super(sb); if (error) bdev->bd_fsfreeze_count++; else bdev->bd_fsfreeze_sb = NULL; out: mutex_unlock(&bdev->bd_fsfreeze_mutex); return error; } EXPORT_SYMBOL(thaw_bdev); /** * bdev_read_page() - Start reading a page from a block device * @bdev: The device to read the page from * @sector: The offset on the device to read the page to (need not be aligned) * @page: The page to read * * On entry, the page should be locked. It will be unlocked when the page * has been read. If the block driver implements rw_page synchronously, * that will be true on exit from this function, but it need not be. * * Errors returned by this function are usually "soft", eg out of memory, or * queue full; callers should try a different route to read this page rather * than propagate an error back up the stack. * * Return: negative errno if an error occurs, 0 if submission was successful. */ int bdev_read_page(struct block_device *bdev, sector_t sector, struct page *page) { const struct block_device_operations *ops = bdev->bd_disk->fops; int result = -EOPNOTSUPP; if (!ops->rw_page || bdev_get_integrity(bdev)) return result; result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); blk_queue_exit(bdev->bd_disk->queue); return result; } /** * bdev_write_page() - Start writing a page to a block device * @bdev: The device to write the page to * @sector: The offset on the device to write the page to (need not be aligned) * @page: The page to write * @wbc: The writeback_control for the write * * On entry, the page should be locked and not currently under writeback. * On exit, if the write started successfully, the page will be unlocked and * under writeback. If the write failed already (eg the driver failed to * queue the page to the device), the page will still be locked. If the * caller is a ->writepage implementation, it will need to unlock the page. * * Errors returned by this function are usually "soft", eg out of memory, or * queue full; callers should try a different route to write this page rather * than propagate an error back up the stack. * * Return: negative errno if an error occurs, 0 if submission was successful. */ int bdev_write_page(struct block_device *bdev, sector_t sector, struct page *page, struct writeback_control *wbc) { int result; const struct block_device_operations *ops = bdev->bd_disk->fops; if (!ops->rw_page || bdev_get_integrity(bdev)) return -EOPNOTSUPP; result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; set_page_writeback(page); result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_WRITE); if (result) { end_page_writeback(page); } else { clean_page_buffers(page); unlock_page(page); } blk_queue_exit(bdev->bd_disk->queue); return result; } /* * pseudo-fs */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); static struct kmem_cache * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->bdev, 0, sizeof(ei->bdev)); return &ei->vfs_inode; } static void bdev_free_inode(struct inode *inode) { struct block_device *bdev = I_BDEV(inode); free_percpu(bdev->bd_stats); kfree(bdev->bd_meta_info); if (!bdev_is_partition(bdev)) { if (bdev->bd_disk && bdev->bd_disk->bdi) bdi_put(bdev->bd_disk->bdi); kfree(bdev->bd_disk); } if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) blk_free_ext_minor(MINOR(bdev->bd_dev)); kmem_cache_free(bdev_cachep, BDEV_I(inode)); } static void init_once(void *data) { struct bdev_inode *ei = data; inode_init_once(&ei->vfs_inode); } static void bdev_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); } static const struct super_operations bdev_sops = { .statfs = simple_statfs, .alloc_inode = bdev_alloc_inode, .free_inode = bdev_free_inode, .drop_inode = generic_delete_inode, .evict_inode = bdev_evict_inode, }; static int bd_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_CGROUPWB; ctx->ops = &bdev_sops; return 0; } static struct file_system_type bd_type = { .name = "bdev", .init_fs_context = bd_init_fs_context, .kill_sb = kill_anon_super, }; struct super_block *blockdev_superblock __read_mostly; EXPORT_SYMBOL_GPL(blockdev_superblock); void __init bdev_cache_init(void) { int err; static struct vfsmount *bd_mnt; bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC), init_once); err = register_filesystem(&bd_type); if (err) panic("Cannot register bdev pseudo-fs"); bd_mnt = kern_mount(&bd_type); if (IS_ERR(bd_mnt)) panic("Cannot create bdev pseudo-fs"); blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ } struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) { struct block_device *bdev; struct inode *inode; inode = new_inode(blockdev_superblock); if (!inode) return NULL; inode->i_mode = S_IFBLK; inode->i_rdev = 0; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); bdev = I_BDEV(inode); mutex_init(&bdev->bd_fsfreeze_mutex); spin_lock_init(&bdev->bd_size_lock); bdev->bd_partno = partno; bdev->bd_inode = inode; bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); return NULL; } bdev->bd_disk = disk; return bdev; } void bdev_add(struct block_device *bdev, dev_t dev) { bdev->bd_dev = dev; bdev->bd_inode->i_rdev = dev; bdev->bd_inode->i_ino = dev; insert_inode_hash(bdev->bd_inode); } long nr_blockdev_pages(void) { struct inode *inode; long ret = 0; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) ret += inode->i_mapping->nrpages; spin_unlock(&blockdev_superblock->s_inode_list_lock); return ret; } /** * bd_may_claim - test whether a block device can be claimed * @bdev: block device of interest * @whole: whole block device containing @bdev, may equal @bdev * @holder: holder trying to claim @bdev * * Test whether @bdev can be claimed by @holder. * * CONTEXT: * spin_lock(&bdev_lock). * * RETURNS: * %true if @bdev can be claimed, %false otherwise. */ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, void *holder) { if (bdev->bd_holder == holder) return true; /* already a holder */ else if (bdev->bd_holder != NULL) return false; /* held by someone else */ else if (whole == bdev) return true; /* is a whole device which isn't held */ else if (whole->bd_holder == bd_may_claim) return true; /* is a partition of a device that is being partitioned */ else if (whole->bd_holder != NULL) return false; /* is a partition of a held device */ else return true; /* is a partition of an un-held device */ } /** * bd_prepare_to_claim - claim a block device * @bdev: block device of interest * @holder: holder trying to claim @bdev * * Claim @bdev. This function fails if @bdev is already claimed by another * holder and waits if another claiming is in progress. return, the caller * has ownership of bd_claiming and bd_holder[s]. * * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ int bd_prepare_to_claim(struct block_device *bdev, void *holder) { struct block_device *whole = bdev_whole(bdev); if (WARN_ON_ONCE(!holder)) return -EINVAL; retry: spin_lock(&bdev_lock); /* if someone else claimed, fail */ if (!bd_may_claim(bdev, whole, holder)) { spin_unlock(&bdev_lock); return -EBUSY; } /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); DEFINE_WAIT(wait); prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); spin_unlock(&bdev_lock); schedule(); finish_wait(wq, &wait); goto retry; } /* yay, all mine */ whole->bd_claiming = holder; spin_unlock(&bdev_lock); return 0; } EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ static void bd_clear_claiming(struct block_device *whole, void *holder) { lockdep_assert_held(&bdev_lock); /* tell others that we're done */ BUG_ON(whole->bd_claiming != holder); whole->bd_claiming = NULL; wake_up_bit(&whole->bd_claiming, 0); } /** * bd_finish_claiming - finish claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ static void bd_finish_claiming(struct block_device *bdev, void *holder) { struct block_device *whole = bdev_whole(bdev); spin_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, whole, holder)); /* * Note that for a whole device bd_holders will be incremented twice, * and bd_holder will be set to bd_may_claim before being set to holder */ whole->bd_holders++; whole->bd_holder = bd_may_claim; bdev->bd_holders++; bdev->bd_holder = holder; bd_clear_claiming(whole, holder); spin_unlock(&bdev_lock); } /** * bd_abort_claiming - abort claiming of a block device * @bdev: block device of interest * @holder: holder that has claimed @bdev * * Abort claiming of a block device when the exclusive open failed. This can be * also used when exclusive open is not actually desired and we just needed * to block other exclusive openers for a while. */ void bd_abort_claiming(struct block_device *bdev, void *holder) { spin_lock(&bdev_lock); bd_clear_claiming(bdev_whole(bdev), holder); spin_unlock(&bdev_lock); } EXPORT_SYMBOL(bd_abort_claiming); static void blkdev_flush_mapping(struct block_device *bdev) { WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); bdev_write_inode(bdev); } static int blkdev_get_whole(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; int ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); if (ret) { /* avoid ghost partitions on a removed medium */ if (ret == -ENOMEDIUM && test_bit(GD_NEED_PART_SCAN, &disk->state)) bdev_disk_changed(disk, true); return ret; } } if (!bdev->bd_openers) set_init_blocksize(bdev); if (test_bit(GD_NEED_PART_SCAN, &disk->state)) bdev_disk_changed(disk, false); bdev->bd_openers++; return 0;; } static void blkdev_put_whole(struct block_device *bdev, fmode_t mode) { if (!--bdev->bd_openers) blkdev_flush_mapping(bdev); if (bdev->bd_disk->fops->release) bdev->bd_disk->fops->release(bdev->bd_disk, mode); } static int blkdev_get_part(struct block_device *part, fmode_t mode) { struct gendisk *disk = part->bd_disk; int ret; if (part->bd_openers) goto done; ret = blkdev_get_whole(bdev_whole(part), mode); if (ret) return ret; ret = -ENXIO; if (!bdev_nr_sectors(part)) goto out_blkdev_put; disk->open_partitions++; set_init_blocksize(part); done: part->bd_openers++; return 0; out_blkdev_put: blkdev_put_whole(bdev_whole(part), mode); return ret; } static void blkdev_put_part(struct block_device *part, fmode_t mode) { struct block_device *whole = bdev_whole(part); if (--part->bd_openers) return; blkdev_flush_mapping(part); whole->bd_disk->open_partitions--; blkdev_put_whole(whole, mode); } struct block_device *blkdev_get_no_open(dev_t dev) { struct block_device *bdev; struct inode *inode; inode = ilookup(blockdev_superblock, dev); if (!inode) { blk_request_module(dev); inode = ilookup(blockdev_superblock, dev); if (!inode) return NULL; } /* switch from the inode reference to a device mode one: */ bdev = &BDEV_I(inode)->bdev; if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) bdev = NULL; iput(inode); if (!bdev) return NULL; if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) || !try_module_get(bdev->bd_disk->fops->owner)) { put_device(&bdev->bd_device); return NULL; } return bdev; } void blkdev_put_no_open(struct block_device *bdev) { module_put(bdev->bd_disk->fops->owner); put_device(&bdev->bd_device); } /** * blkdev_get_by_dev - open a block device by device number * @dev: device number of block device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier * * Open the block device described by device number @dev. If @mode includes * %FMODE_EXCL, the block device is opened with exclusive access. Specifying * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for * the same @holder. * * Use this interface ONLY if you really do not have anything better - i.e. when * you are behind a truly sucky interface and all you are given is a device * number. Everything else should use blkdev_get_by_path(). * * CONTEXT: * Might sleep. * * RETURNS: * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) { bool unblock_events = true; struct block_device *bdev; struct gendisk *disk; int ret; ret = devcgroup_check_permission(DEVCG_DEV_BLOCK, MAJOR(dev), MINOR(dev), ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) | ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0)); if (ret) return ERR_PTR(ret); bdev = blkdev_get_no_open(dev); if (!bdev) return ERR_PTR(-ENXIO); disk = bdev->bd_disk; if (mode & FMODE_EXCL) { ret = bd_prepare_to_claim(bdev, holder); if (ret) goto put_blkdev; } disk_block_events(disk); mutex_lock(&disk->open_mutex); ret = -ENXIO; if (!disk_live(disk)) goto abort_claiming; if (bdev_is_partition(bdev)) ret = blkdev_get_part(bdev, mode); else ret = blkdev_get_whole(bdev, mode); if (ret) goto abort_claiming; if (mode & FMODE_EXCL) { bd_finish_claiming(bdev, holder); /* * Block event polling for write claims if requested. Any write * holder makes the write_holder state stick until all are * released. This is good enough and tracking individual * writeable reference is too fragile given the way @mode is * used in blkdev_get/put(). */ if ((mode & FMODE_WRITE) && !bdev->bd_write_holder && (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { bdev->bd_write_holder = true; unblock_events = false; } } mutex_unlock(&disk->open_mutex); if (unblock_events) disk_unblock_events(disk); return bdev; abort_claiming: if (mode & FMODE_EXCL) bd_abort_claiming(bdev, holder); mutex_unlock(&disk->open_mutex); disk_unblock_events(disk); put_blkdev: blkdev_put_no_open(bdev); return ERR_PTR(ret); } EXPORT_SYMBOL(blkdev_get_by_dev); /** * blkdev_get_by_path - open a block device by name * @path: path to the block device to open * @mode: FMODE_* mask * @holder: exclusive holder identifier * * Open the block device described by the device file at @path. If @mode * includes %FMODE_EXCL, the block device is opened with exclusive access. * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may * nest for the same @holder. * * CONTEXT: * Might sleep. * * RETURNS: * Reference to the block_device on success, ERR_PTR(-errno) on failure. */ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, void *holder) { struct block_device *bdev; dev_t dev; int error; error = lookup_bdev(path, &dev); if (error) return ERR_PTR(error); bdev = blkdev_get_by_dev(dev, mode, holder); if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { blkdev_put(bdev, mode); return ERR_PTR(-EACCES); } return bdev; } EXPORT_SYMBOL(blkdev_get_by_path); void blkdev_put(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; /* * Sync early if it looks like we're the last one. If someone else * opens the block device between now and the decrement of bd_openers * then we did a sync that we didn't need to, but that's not the end * of the world and we want to avoid long (could be several minute) * syncs while holding the mutex. */ if (bdev->bd_openers == 1) sync_blockdev(bdev); mutex_lock(&disk->open_mutex); if (mode & FMODE_EXCL) { struct block_device *whole = bdev_whole(bdev); bool bdev_free; /* * Release a claim on the device. The holder fields * are protected with bdev_lock. open_mutex is to * synchronize disk_holder unlinking. */ spin_lock(&bdev_lock); WARN_ON_ONCE(--bdev->bd_holders < 0); WARN_ON_ONCE(--whole->bd_holders < 0); if ((bdev_free = !bdev->bd_holders)) bdev->bd_holder = NULL; if (!whole->bd_holders) whole->bd_holder = NULL; spin_unlock(&bdev_lock); /* * If this was the last claim, remove holder link and * unblock evpoll if it was a write holder. */ if (bdev_free && bdev->bd_write_holder) { disk_unblock_events(disk); bdev->bd_write_holder = false; } } /* * Trigger event checking and tell drivers to flush MEDIA_CHANGE * event. This is to ensure detection of media removal commanded * from userland - e.g. eject(1). */ disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE); if (bdev_is_partition(bdev)) blkdev_put_part(bdev, mode); else blkdev_put_whole(bdev, mode); mutex_unlock(&disk->open_mutex); blkdev_put_no_open(bdev); } EXPORT_SYMBOL(blkdev_put); /** * lookup_bdev - lookup a struct block_device by name * @pathname: special file representing the block device * @dev: return value of the block device's dev_t * * Get a reference to the blockdevice at @pathname in the current * namespace if possible and return it. Return ERR_PTR(error) * otherwise. */ int lookup_bdev(const char *pathname, dev_t *dev) { struct inode *inode; struct path path; int error; if (!pathname || !*pathname) return -EINVAL; error = kern_path(pathname, LOOKUP_FOLLOW, &path); if (error) return error; inode = d_backing_inode(path.dentry); error = -ENOTBLK; if (!S_ISBLK(inode->i_mode)) goto out_path_put; error = -EACCES; if (!may_open_dev(&path)) goto out_path_put; *dev = inode->i_rdev; error = 0; out_path_put: path_put(&path); return error; } EXPORT_SYMBOL(lookup_bdev); int __invalidate_device(struct block_device *bdev, bool kill_dirty) { struct super_block *sb = get_super(bdev); int res = 0; if (sb) { /* * no need to lock the super, get_super holds the * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ shrink_dcache_sb(sb); res = invalidate_inodes(sb, kill_dirty); drop_super(sb); } invalidate_bdev(bdev); return res; } EXPORT_SYMBOL(__invalidate_device); void sync_bdevs(bool wait) { struct inode *inode, *old_inode = NULL; spin_lock(&blockdev_superblock->s_inode_list_lock); list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { struct address_space *mapping = inode->i_mapping; struct block_device *bdev; spin_lock(&inode->i_lock); if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || mapping->nrpages == 0) { spin_unlock(&inode->i_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); spin_unlock(&blockdev_superblock->s_inode_list_lock); /* * We hold a reference to 'inode' so it couldn't have been * removed from s_inodes list while we dropped the * s_inode_list_lock We cannot iput the inode now as we can * be holding the last reference and we cannot iput it under * s_inode_list_lock. So we keep the reference and iput it * later. */ iput(old_inode); old_inode = inode; bdev = I_BDEV(inode); mutex_lock(&bdev->bd_disk->open_mutex); if (!bdev->bd_openers) { ; /* skip */ } else if (wait) { /* * We keep the error status of individual mapping so * that applications can catch the writeback error using * fsync(2). See filemap_fdatawait_keep_errors() for * details. */ filemap_fdatawait_keep_errors(inode->i_mapping); } else { filemap_fdatawrite(inode->i_mapping); } mutex_unlock(&bdev->bd_disk->open_mutex); spin_lock(&blockdev_superblock->s_inode_list_lock); } spin_unlock(&blockdev_superblock->s_inode_list_lock); iput(old_inode); } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IEEE802154_CORE_H #define __IEEE802154_CORE_H #include <net/cfg802154.h> struct cfg802154_registered_device { const struct cfg802154_ops *ops; struct list_head list; /* wpan_phy index, internal only */ int wpan_phy_idx; /* also protected by devlist_mtx */ int opencount; wait_queue_head_t dev_wait; /* protected by RTNL only */ int num_running_ifaces; /* associated wpan interfaces, protected by rtnl or RCU */ struct list_head wpan_dev_list; int devlist_generation, wpan_dev_id; /* must be last because of the way we do wpan_phy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN); }; static inline struct cfg802154_registered_device * wpan_phy_to_rdev(struct wpan_phy *wpan_phy) { BUG_ON(!wpan_phy); return container_of(wpan_phy, struct cfg802154_registered_device, wpan_phy); } extern struct list_head cfg802154_rdev_list; extern int cfg802154_rdev_list_generation; int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, struct net *net); /* free object */ void cfg802154_dev_free(struct cfg802154_registered_device *rdev); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx); #endif /* __IEEE802154_CORE_H */ |
142 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 | /* * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #ifndef __DRM_PLANE_H__ #define __DRM_PLANE_H__ #include <linux/list.h> #include <linux/ctype.h> #include <drm/drm_mode_object.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_rect.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_util.h> struct drm_crtc; struct drm_printer; struct drm_modeset_acquire_ctx; enum drm_scaling_filter { DRM_SCALING_FILTER_DEFAULT, DRM_SCALING_FILTER_NEAREST_NEIGHBOR, }; /** * struct drm_plane_state - mutable plane state * * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the * raw coordinates provided by userspace. Drivers should use * drm_atomic_helper_check_plane_state() and only use the derived rectangles in * @src and @dst to program the hardware. */ struct drm_plane_state { /** @plane: backpointer to the plane */ struct drm_plane *plane; /** * @crtc: * * Currently bound CRTC, NULL if disabled. Do not this write directly, * use drm_atomic_set_crtc_for_plane() */ struct drm_crtc *crtc; /** * @fb: * * Currently bound framebuffer. Do not write this directly, use * drm_atomic_set_fb_for_plane() */ struct drm_framebuffer *fb; /** * @fence: * * Optional fence to wait for before scanning out @fb. The core atomic * code will set this when userspace is using explicit fencing. Do not * write this field directly for a driver's implicit fence, use * drm_atomic_set_fence_for_plane() to ensure that an explicit fence is * preserved. * * Drivers should store any implicit fence in this from their * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb() * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers. */ struct dma_fence *fence; /** * @crtc_x: * * Left position of visible portion of plane on crtc, signed dest * location allows it to be partially off screen. */ int32_t crtc_x; /** * @crtc_y: * * Upper position of visible portion of plane on crtc, signed dest * location allows it to be partially off screen. */ int32_t crtc_y; /** @crtc_w: width of visible portion of plane on crtc */ /** @crtc_h: height of visible portion of plane on crtc */ uint32_t crtc_w, crtc_h; /** * @src_x: left position of visible portion of plane within plane (in * 16.16 fixed point). */ uint32_t src_x; /** * @src_y: upper position of visible portion of plane within plane (in * 16.16 fixed point). */ uint32_t src_y; /** @src_w: width of visible portion of plane (in 16.16) */ /** @src_h: height of visible portion of plane (in 16.16) */ uint32_t src_h, src_w; /** * @alpha: * Opacity of the plane with 0 as completely transparent and 0xffff as * completely opaque. See drm_plane_create_alpha_property() for more * details. */ u16 alpha; /** * @pixel_blend_mode: * The alpha blending equation selection, describing how the pixels from * the current plane are composited with the background. Value can be * one of DRM_MODE_BLEND_* */ uint16_t pixel_blend_mode; /** * @rotation: * Rotation of the plane. See drm_plane_create_rotation_property() for * more details. */ unsigned int rotation; /** * @zpos: * Priority of the given plane on crtc (optional). * * User-space may set mutable zpos properties so that multiple active * planes on the same CRTC have identical zpos values. This is a * user-space bug, but drivers can solve the conflict by comparing the * plane object IDs; the plane with a higher ID is stacked on top of a * plane with a lower ID. * * See drm_plane_create_zpos_property() and * drm_plane_create_zpos_immutable_property() for more details. */ unsigned int zpos; /** * @normalized_zpos: * Normalized value of zpos: unique, range from 0 to N-1 where N is the * number of active planes for given crtc. Note that the driver must set * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to * update this before it can be trusted. */ unsigned int normalized_zpos; /** * @color_encoding: * * Color encoding for non RGB formats */ enum drm_color_encoding color_encoding; /** * @color_range: * * Color range for non RGB formats */ enum drm_color_range color_range; /** * @fb_damage_clips: * * Blob representing damage (area in plane framebuffer that changed * since last plane update) as an array of &drm_mode_rect in framebuffer * coodinates of the attached framebuffer. Note that unlike plane src, * damage clips are not in 16.16 fixed point. * * See drm_plane_get_damage_clips() and * drm_plane_get_damage_clips_count() for accessing these. */ struct drm_property_blob *fb_damage_clips; /** * @src: * * source coordinates of the plane (in 16.16). * * When using drm_atomic_helper_check_plane_state(), * the coordinates are clipped, but the driver may choose * to use unclipped coordinates instead when the hardware * performs the clipping automatically. */ /** * @dst: * * clipped destination coordinates of the plane. * * When using drm_atomic_helper_check_plane_state(), * the coordinates are clipped, but the driver may choose * to use unclipped coordinates instead when the hardware * performs the clipping automatically. */ struct drm_rect src, dst; /** * @visible: * * Visibility of the plane. This can be false even if fb!=NULL and * crtc!=NULL, due to clipping. */ bool visible; /** * @scaling_filter: * * Scaling filter to be applied */ enum drm_scaling_filter scaling_filter; /** * @commit: Tracks the pending commit to prevent use-after-free conditions, * and for async plane updates. * * May be NULL. */ struct drm_crtc_commit *commit; /** @state: backpointer to global drm_atomic_state */ struct drm_atomic_state *state; }; static inline struct drm_rect drm_plane_state_src(const struct drm_plane_state *state) { struct drm_rect src = { .x1 = state->src_x, .y1 = state->src_y, .x2 = state->src_x + state->src_w, .y2 = state->src_y + state->src_h, }; return src; } static inline struct drm_rect drm_plane_state_dest(const struct drm_plane_state *state) { struct drm_rect dest = { .x1 = state->crtc_x, .y1 = state->crtc_y, .x2 = state->crtc_x + state->crtc_w, .y2 = state->crtc_y + state->crtc_h, }; return dest; } /** * struct drm_plane_funcs - driver plane control functions */ struct drm_plane_funcs { /** * @update_plane: * * This is the legacy entry point to enable and configure the plane for * the given CRTC and framebuffer. It is never called to disable the * plane, i.e. the passed-in crtc and fb paramters are never NULL. * * The source rectangle in frame buffer memory coordinates is given by * the src_x, src_y, src_w and src_h parameters (as 16.16 fixed point * values). Devices that don't support subpixel plane coordinates can * ignore the fractional part. * * The destination rectangle in CRTC coordinates is given by the * crtc_x, crtc_y, crtc_w and crtc_h parameters (as integer values). * Devices scale the source rectangle to the destination rectangle. If * scaling is not supported, and the source rectangle size doesn't match * the destination rectangle size, the driver must return a * -<errorname>EINVAL</errorname> error. * * Drivers implementing atomic modeset should use * drm_atomic_helper_update_plane() to implement this hook. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, struct drm_modeset_acquire_ctx *ctx); /** * @disable_plane: * * This is the legacy entry point to disable the plane. The DRM core * calls this method in response to a DRM_IOCTL_MODE_SETPLANE IOCTL call * with the frame buffer ID set to 0. Disabled planes must not be * processed by the CRTC. * * Drivers implementing atomic modeset should use * drm_atomic_helper_disable_plane() to implement this hook. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*disable_plane)(struct drm_plane *plane, struct drm_modeset_acquire_ctx *ctx); /** * @destroy: * * Clean up plane resources. This is only called at driver unload time * through drm_mode_config_cleanup() since a plane cannot be hotplugged * in DRM. */ void (*destroy)(struct drm_plane *plane); /** * @reset: * * Reset plane hardware and software state to off. This function isn't * called by the core directly, only through drm_mode_config_reset(). * It's not a helper hook only for historical reasons. * * Atomic drivers can use drm_atomic_helper_plane_reset() to reset * atomic state using this hook. */ void (*reset)(struct drm_plane *plane); /** * @set_property: * * This is the legacy entry point to update a property attached to the * plane. * * This callback is optional if the driver does not support any legacy * driver-private properties. For atomic drivers it is not used because * property handling is done entirely in the DRM core. * * RETURNS: * * 0 on success or a negative error code on failure. */ int (*set_property)(struct drm_plane *plane, struct drm_property *property, uint64_t val); /** * @atomic_duplicate_state: * * Duplicate the current atomic state for this plane and return it. * The core and helpers guarantee that any atomic state duplicated with * this hook and still owned by the caller (i.e. not transferred to the * driver by calling &drm_mode_config_funcs.atomic_commit) will be * cleaned up by calling the @atomic_destroy_state hook in this * structure. * * This callback is mandatory for atomic drivers. * * Atomic drivers which don't subclass &struct drm_plane_state should use * drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the * state structure to extend it with driver-private state should use * __drm_atomic_helper_plane_duplicate_state() to make sure shared state is * duplicated in a consistent fashion across drivers. * * It is an error to call this hook before &drm_plane.state has been * initialized correctly. * * NOTE: * * If the duplicate state references refcounted resources this hook must * acquire a reference for each of them. The driver must release these * references again in @atomic_destroy_state. * * RETURNS: * * Duplicated atomic state or NULL when the allocation failed. */ struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane); /** * @atomic_destroy_state: * * Destroy a state duplicated with @atomic_duplicate_state and release * or unreference all resources it references * * This callback is mandatory for atomic drivers. */ void (*atomic_destroy_state)(struct drm_plane *plane, struct drm_plane_state *state); /** * @atomic_set_property: * * Decode a driver-private property value and store the decoded value * into the passed-in state structure. Since the atomic core decodes all * standardized properties (even for extensions beyond the core set of * properties which might not be implemented by all drivers) this * requires drivers to subclass the state structure. * * Such driver-private properties should really only be implemented for * truly hardware/vendor specific state. Instead it is preferred to * standardize atomic extension and decode the properties used to expose * such an extension in the core. * * Do not call this function directly, use * drm_atomic_plane_set_property() instead. * * This callback is optional if the driver does not support any * driver-private atomic properties. * * NOTE: * * This function is called in the state assembly phase of atomic * modesets, which can be aborted for any reason (including on * userspace's request to just check whether a configuration would be * possible). Drivers MUST NOT touch any persistent state (hardware or * software) or data structures except the passed in @state parameter. * * Also since userspace controls in which order properties are set this * function must not do any input validation (since the state update is * incomplete and hence likely inconsistent). Instead any such input * validation must be done in the various atomic_check callbacks. * * RETURNS: * * 0 if the property has been found, -EINVAL if the property isn't * implemented by the driver (which shouldn't ever happen, the core only * asks for properties attached to this plane). No other validation is * allowed by the driver. The core already checks that the property * value is within the range (integer, valid enum value, ...) the driver * set when registering the property. */ int (*atomic_set_property)(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val); /** * @atomic_get_property: * * Reads out the decoded driver-private property. This is used to * implement the GETPLANE IOCTL. * * Do not call this function directly, use * drm_atomic_plane_get_property() instead. * * This callback is optional if the driver does not support any * driver-private atomic properties. * * RETURNS: * * 0 on success, -EINVAL if the property isn't implemented by the * driver (which should never happen, the core only asks for * properties attached to this plane). */ int (*atomic_get_property)(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, uint64_t *val); /** * @late_register: * * This optional hook can be used to register additional userspace * interfaces attached to the plane like debugfs interfaces. * It is called late in the driver load sequence from drm_dev_register(). * Everything added from this callback should be unregistered in * the early_unregister callback. * * Returns: * * 0 on success, or a negative error code on failure. */ int (*late_register)(struct drm_plane *plane); /** * @early_unregister: * * This optional hook should be used to unregister the additional * userspace interfaces attached to the plane from * @late_register. It is called from drm_dev_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. */ void (*early_unregister)(struct drm_plane *plane); /** * @atomic_print_state: * * If driver subclasses &struct drm_plane_state, it should implement * this optional hook for printing additional driver specific state. * * Do not call this directly, use drm_atomic_plane_print_state() * instead. */ void (*atomic_print_state)(struct drm_printer *p, const struct drm_plane_state *state); /** * @format_mod_supported: * * This optional hook is used for the DRM to determine if the given * format/modifier combination is valid for the plane. This allows the * DRM to generate the correct format bitmask (which formats apply to * which modifier), and to valdiate modifiers at atomic_check time. * * If not present, then any modifier in the plane's modifier * list is allowed with any of the plane's formats. * * Returns: * * True if the given modifier is valid for that format on the plane. * False otherwise. */ bool (*format_mod_supported)(struct drm_plane *plane, uint32_t format, uint64_t modifier); }; /** * enum drm_plane_type - uapi plane type enumeration * * For historical reasons not all planes are made the same. This enumeration is * used to tell the different types of planes apart to implement the different * uapi semantics for them. For userspace which is universal plane aware and * which is using that atomic IOCTL there's no difference between these planes * (beyong what the driver and hardware can support of course). * * For compatibility with legacy userspace, only overlay planes are made * available to userspace by default. Userspace clients may set the * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they * wish to receive a universal plane list containing all plane types. See also * drm_for_each_legacy_plane(). * * In addition to setting each plane's type, drivers need to setup the * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy * IOCTLs. See drm_crtc_init_with_planes(). * * WARNING: The values of this enum is UABI since they're exposed in the "type" * property. */ enum drm_plane_type { /** * @DRM_PLANE_TYPE_OVERLAY: * * Overlay planes represent all non-primary, non-cursor planes. Some * drivers refer to these types of planes as "sprites" internally. */ DRM_PLANE_TYPE_OVERLAY, /** * @DRM_PLANE_TYPE_PRIMARY: * * A primary plane attached to a CRTC is the most likely to be able to * light up the CRTC when no scaling/cropping is used and the plane * covers the whole CRTC. */ DRM_PLANE_TYPE_PRIMARY, /** * @DRM_PLANE_TYPE_CURSOR: * * A cursor plane attached to a CRTC is more likely to be able to be * enabled when no scaling/cropping is used and the framebuffer has the * size indicated by &drm_mode_config.cursor_width and * &drm_mode_config.cursor_height. Additionally, if the driver doesn't * support modifiers, the framebuffer should have a linear layout. */ DRM_PLANE_TYPE_CURSOR, }; /** * struct drm_plane - central DRM plane control structure * * Planes represent the scanout hardware of a display block. They receive their * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control * the color conversion, see `Plane Composition Properties`_ for more details, * and are also involved in the color conversion of input pixels, see `Color * Management Properties`_ for details on that. */ struct drm_plane { /** @dev: DRM device this plane belongs to */ struct drm_device *dev; /** * @head: * * List of all planes on @dev, linked from &drm_mode_config.plane_list. * Invariant over the lifetime of @dev and therefore does not need * locking. */ struct list_head head; /** @name: human readable name, can be overwritten by the driver */ char *name; /** * @mutex: * * Protects modeset plane state, together with the &drm_crtc.mutex of * CRTC this plane is linked to (when active, getting activated or * getting disabled). * * For atomic drivers specifically this protects @state. */ struct drm_modeset_lock mutex; /** @base: base mode object */ struct drm_mode_object base; /** * @possible_crtcs: pipes this plane can be bound to constructed from * drm_crtc_mask() */ uint32_t possible_crtcs; /** @format_types: array of formats supported by this plane */ uint32_t *format_types; /** @format_count: Size of the array pointed at by @format_types. */ unsigned int format_count; /** * @format_default: driver hasn't supplied supported formats for the * plane. Used by the drm_plane_init compatibility wrapper only. */ bool format_default; /** @modifiers: array of modifiers supported by this plane */ uint64_t *modifiers; /** @modifier_count: Size of the array pointed at by @modifier_count. */ unsigned int modifier_count; /** * @crtc: * * Currently bound CRTC, only meaningful for non-atomic drivers. For * atomic drivers this is forced to be NULL, atomic drivers should * instead check &drm_plane_state.crtc. */ struct drm_crtc *crtc; /** * @fb: * * Currently bound framebuffer, only meaningful for non-atomic drivers. * For atomic drivers this is forced to be NULL, atomic drivers should * instead check &drm_plane_state.fb. */ struct drm_framebuffer *fb; /** * @old_fb: * * Temporary tracking of the old fb while a modeset is ongoing. Only * used by non-atomic drivers, forced to be NULL for atomic drivers. */ struct drm_framebuffer *old_fb; /** @funcs: plane control functions */ const struct drm_plane_funcs *funcs; /** @properties: property tracking for this plane */ struct drm_object_properties properties; /** @type: Type of plane, see &enum drm_plane_type for details. */ enum drm_plane_type type; /** * @index: Position inside the mode_config.list, can be used as an array * index. It is invariant over the lifetime of the plane. */ unsigned index; /** @helper_private: mid-layer private data */ const struct drm_plane_helper_funcs *helper_private; /** * @state: * * Current atomic state for this plane. * * This is protected by @mutex. Note that nonblocking atomic commits * access the current plane state without taking locks. Either by going * through the &struct drm_atomic_state pointers, see * for_each_oldnew_plane_in_state(), for_each_old_plane_in_state() and * for_each_new_plane_in_state(). Or through careful ordering of atomic * commit operations as implemented in the atomic helpers, see * &struct drm_crtc_commit. */ struct drm_plane_state *state; /** * @alpha_property: * Optional alpha property for this plane. See * drm_plane_create_alpha_property(). */ struct drm_property *alpha_property; /** * @zpos_property: * Optional zpos property for this plane. See * drm_plane_create_zpos_property(). */ struct drm_property *zpos_property; /** * @rotation_property: * Optional rotation property for this plane. See * drm_plane_create_rotation_property(). */ struct drm_property *rotation_property; /** * @blend_mode_property: * Optional "pixel blend mode" enum property for this plane. * Blend mode property represents the alpha blending equation selection, * describing how the pixels from the current plane are composited with * the background. */ struct drm_property *blend_mode_property; /** * @color_encoding_property: * * Optional "COLOR_ENCODING" enum property for specifying * color encoding for non RGB formats. * See drm_plane_create_color_properties(). */ struct drm_property *color_encoding_property; /** * @color_range_property: * * Optional "COLOR_RANGE" enum property for specifying * color range for non RGB formats. * See drm_plane_create_color_properties(). */ struct drm_property *color_range_property; /** * @scaling_filter_property: property to apply a particular filter while * scaling. */ struct drm_property *scaling_filter_property; }; #define obj_to_plane(x) container_of(x, struct drm_plane, base) __printf(9, 10) int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type type, const char *name, ...); int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, bool is_primary); void drm_plane_cleanup(struct drm_plane *plane); __printf(10, 11) void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size, size_t offset, uint32_t possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, const uint64_t *format_modifiers, enum drm_plane_type plane_type, const char *name, ...); /** * drmm_universal_plane_alloc - Allocate and initialize an universal plane object * @dev: DRM device * @type: the type of the struct which contains struct &drm_plane * @member: the name of the &drm_plane within @type * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane * @formats: array of supported formats (DRM_FORMAT\_\*) * @format_count: number of elements in @formats * @format_modifiers: array of struct drm_format modifiers terminated by * DRM_FORMAT_MOD_INVALID * @plane_type: type of plane (overlay, primary, cursor) * @name: printf style format string for the plane name, or NULL for default name * * Allocates and initializes a plane object of type @type. Cleanup is * automatically handled through registering drm_plane_cleanup() with * drmm_add_action(). * * The @drm_plane_funcs.destroy hook must be NULL. * * Returns: * Pointer to new plane, or ERR_PTR on failure. */ #define drmm_universal_plane_alloc(dev, type, member, possible_crtcs, funcs, formats, \ format_count, format_modifiers, plane_type, name, ...) \ ((type *)__drmm_universal_plane_alloc(dev, sizeof(type), \ offsetof(type, member), \ possible_crtcs, funcs, formats, \ format_count, format_modifiers, \ plane_type, name, ##__VA_ARGS__)) /** * drm_plane_index - find the index of a registered plane * @plane: plane to find index for * * Given a registered plane, return the index of that plane within a DRM * device's list of planes. */ static inline unsigned int drm_plane_index(const struct drm_plane *plane) { return plane->index; } /** * drm_plane_mask - find the mask of a registered plane * @plane: plane to find mask for */ static inline u32 drm_plane_mask(const struct drm_plane *plane) { return 1 << drm_plane_index(plane); } struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx); void drm_plane_force_disable(struct drm_plane *plane); int drm_mode_plane_set_obj_prop(struct drm_plane *plane, struct drm_property *property, uint64_t value); /** * drm_plane_find - find a &drm_plane * @dev: DRM device * @file_priv: drm file to check for lease against. * @id: plane id * * Returns the plane with @id, NULL if it doesn't exist. Simple wrapper around * drm_mode_object_find(). */ static inline struct drm_plane *drm_plane_find(struct drm_device *dev, struct drm_file *file_priv, uint32_t id) { struct drm_mode_object *mo; mo = drm_mode_object_find(dev, file_priv, id, DRM_MODE_OBJECT_PLANE); return mo ? obj_to_plane(mo) : NULL; } /** * drm_for_each_plane_mask - iterate over planes specified by bitmask * @plane: the loop cursor * @dev: the DRM device * @plane_mask: bitmask of plane indices * * Iterate over all planes specified by bitmask. */ #define drm_for_each_plane_mask(plane, dev, plane_mask) \ list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \ for_each_if ((plane_mask) & drm_plane_mask(plane)) /** * drm_for_each_legacy_plane - iterate over all planes for legacy userspace * @plane: the loop cursor * @dev: the DRM device * * Iterate over all legacy planes of @dev, excluding primary and cursor planes. * This is useful for implementing userspace apis when userspace is not * universal plane aware. See also &enum drm_plane_type. */ #define drm_for_each_legacy_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) \ for_each_if (plane->type == DRM_PLANE_TYPE_OVERLAY) /** * drm_for_each_plane - iterate over all planes * @plane: the loop cursor * @dev: the DRM device * * Iterate over all planes of @dev, include primary and cursor planes. */ #define drm_for_each_plane(plane, dev) \ list_for_each_entry(plane, &(dev)->mode_config.plane_list, head) bool drm_any_plane_has_format(struct drm_device *dev, u32 format, u64 modifier); void drm_plane_enable_fb_damage_clips(struct drm_plane *plane); unsigned int drm_plane_get_damage_clips_count(const struct drm_plane_state *state); struct drm_mode_rect * drm_plane_get_damage_clips(const struct drm_plane_state *state); int drm_plane_create_scaling_filter_property(struct drm_plane *plane, unsigned int supported_filters); #endif |
15 2 2 1 1 1 1 1 1 1 1 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 2 2 2 1 2 2 2 2 21 21 21 111 83 83 1 1 1 1 1 1 1 1 3 3 3 5 5 1 1 4 2 3 3 1 1 1 1 1 3 1 1 1 1 4 4 4 1 2 2 1 1 3 3 1 1 1 2 1 5 5 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 4 4 2 2 1 1 2 2 1 1 3 3 1 1 1 2 2 2 1 2 2 2 2 2 2 2 1 1 1 1 1 7 3 1 4 5 3 3 2 2 2 1 1 1 1 1 1 1 1 1 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 3 3 3 1 2 1 1 1 1 2 2 1 2 3 3 1 1 1 4 3 1 1 5 5 1 3 1 1 4 2 2 1 2 2 2 2 3 3 1 1 1 2 2 1 1 1 1 1 1 1 1 1 1 1 4 4 3 1 1 3 4 3 3 1 1 1 1 1 1 2 2 2 1 1 1 4 4 1 1 1 1 1 1 6 6 1 2 1 2 1 1 1 1 1 1 1 1 1 3 3 1 1 1 1 3 1 3 1 1 1 1 1 1 1 2 2 1 1 1 1 1 7 7 8 8 7 1 27 27 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 | /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2010 Nokia Corporation Copyright (C) 2011-2012 Intel Corporation This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ /* Bluetooth HCI Management interface */ #include <linux/module.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci_sock.h> #include <net/bluetooth/l2cap.h> #include <net/bluetooth/mgmt.h> #include "hci_request.h" #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" #include "msft.h" #define MGMT_VERSION 1 #define MGMT_REVISION 21 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, MGMT_OP_READ_INFO, MGMT_OP_SET_POWERED, MGMT_OP_SET_DISCOVERABLE, MGMT_OP_SET_CONNECTABLE, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_OP_SET_BONDABLE, MGMT_OP_SET_LINK_SECURITY, MGMT_OP_SET_SSP, MGMT_OP_SET_HS, MGMT_OP_SET_LE, MGMT_OP_SET_DEV_CLASS, MGMT_OP_SET_LOCAL_NAME, MGMT_OP_ADD_UUID, MGMT_OP_REMOVE_UUID, MGMT_OP_LOAD_LINK_KEYS, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_OP_DISCONNECT, MGMT_OP_GET_CONNECTIONS, MGMT_OP_PIN_CODE_REPLY, MGMT_OP_PIN_CODE_NEG_REPLY, MGMT_OP_SET_IO_CAPABILITY, MGMT_OP_PAIR_DEVICE, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_OP_UNPAIR_DEVICE, MGMT_OP_USER_CONFIRM_REPLY, MGMT_OP_USER_CONFIRM_NEG_REPLY, MGMT_OP_USER_PASSKEY_REPLY, MGMT_OP_USER_PASSKEY_NEG_REPLY, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_OP_REMOVE_REMOTE_OOB_DATA, MGMT_OP_START_DISCOVERY, MGMT_OP_STOP_DISCOVERY, MGMT_OP_CONFIRM_NAME, MGMT_OP_BLOCK_DEVICE, MGMT_OP_UNBLOCK_DEVICE, MGMT_OP_SET_DEVICE_ID, MGMT_OP_SET_ADVERTISING, MGMT_OP_SET_BREDR, MGMT_OP_SET_STATIC_ADDRESS, MGMT_OP_SET_SCAN_PARAMS, MGMT_OP_SET_SECURE_CONN, MGMT_OP_SET_DEBUG_KEYS, MGMT_OP_SET_PRIVACY, MGMT_OP_LOAD_IRKS, MGMT_OP_GET_CONN_INFO, MGMT_OP_GET_CLOCK_INFO, MGMT_OP_ADD_DEVICE, MGMT_OP_REMOVE_DEVICE, MGMT_OP_LOAD_CONN_PARAM, MGMT_OP_READ_UNCONF_INDEX_LIST, MGMT_OP_READ_CONFIG_INFO, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, MGMT_OP_READ_EXT_INDEX_LIST, MGMT_OP_READ_ADV_FEATURES, MGMT_OP_ADD_ADVERTISING, MGMT_OP_REMOVE_ADVERTISING, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_OP_START_LIMITED_DISCOVERY, MGMT_OP_READ_EXT_INFO, MGMT_OP_SET_APPEARANCE, MGMT_OP_GET_PHY_CONFIGURATION, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_OP_SET_BLOCKED_KEYS, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_SET_EXP_FEATURE, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, MGMT_OP_SET_DEF_RUNTIME_CONFIG, MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, MGMT_OP_REMOVE_ADV_MONITOR, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, }; static const u16 mgmt_events[] = { MGMT_EV_CONTROLLER_ERROR, MGMT_EV_INDEX_ADDED, MGMT_EV_INDEX_REMOVED, MGMT_EV_NEW_SETTINGS, MGMT_EV_CLASS_OF_DEV_CHANGED, MGMT_EV_LOCAL_NAME_CHANGED, MGMT_EV_NEW_LINK_KEY, MGMT_EV_NEW_LONG_TERM_KEY, MGMT_EV_DEVICE_CONNECTED, MGMT_EV_DEVICE_DISCONNECTED, MGMT_EV_CONNECT_FAILED, MGMT_EV_PIN_CODE_REQUEST, MGMT_EV_USER_CONFIRM_REQUEST, MGMT_EV_USER_PASSKEY_REQUEST, MGMT_EV_AUTH_FAILED, MGMT_EV_DEVICE_FOUND, MGMT_EV_DISCOVERING, MGMT_EV_DEVICE_BLOCKED, MGMT_EV_DEVICE_UNBLOCKED, MGMT_EV_DEVICE_UNPAIRED, MGMT_EV_PASSKEY_NOTIFY, MGMT_EV_NEW_IRK, MGMT_EV_NEW_CSRK, MGMT_EV_DEVICE_ADDED, MGMT_EV_DEVICE_REMOVED, MGMT_EV_NEW_CONN_PARAM, MGMT_EV_UNCONF_INDEX_ADDED, MGMT_EV_UNCONF_INDEX_REMOVED, MGMT_EV_NEW_CONFIG_OPTIONS, MGMT_EV_EXT_INDEX_ADDED, MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_LOCAL_OOB_DATA_UPDATED, MGMT_EV_ADVERTISING_ADDED, MGMT_EV_ADVERTISING_REMOVED, MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, MGMT_EV_DEVICE_FLAGS_CHANGED, MGMT_EV_ADV_MONITOR_ADDED, MGMT_EV_ADV_MONITOR_REMOVED, MGMT_EV_CONTROLLER_SUSPEND, MGMT_EV_CONTROLLER_RESUME, }; static const u16 mgmt_untrusted_commands[] = { MGMT_OP_READ_INDEX_LIST, MGMT_OP_READ_INFO, MGMT_OP_READ_UNCONF_INDEX_LIST, MGMT_OP_READ_CONFIG_INFO, MGMT_OP_READ_EXT_INDEX_LIST, MGMT_OP_READ_EXT_INFO, MGMT_OP_READ_CONTROLLER_CAP, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, }; static const u16 mgmt_untrusted_events[] = { MGMT_EV_INDEX_ADDED, MGMT_EV_INDEX_REMOVED, MGMT_EV_NEW_SETTINGS, MGMT_EV_CLASS_OF_DEV_CHANGED, MGMT_EV_LOCAL_NAME_CHANGED, MGMT_EV_UNCONF_INDEX_ADDED, MGMT_EV_UNCONF_INDEX_REMOVED, MGMT_EV_NEW_CONFIG_OPTIONS, MGMT_EV_EXT_INDEX_ADDED, MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" /* HCI to MGMT error code conversion table */ static const u8 mgmt_status_table[] = { MGMT_STATUS_SUCCESS, MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ MGMT_STATUS_NOT_CONNECTED, /* No Connection */ MGMT_STATUS_FAILED, /* Hardware Failure */ MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */ MGMT_STATUS_NO_RESOURCES, /* Memory Full */ MGMT_STATUS_TIMEOUT, /* Connection Timeout */ MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ MGMT_STATUS_BUSY, /* Command Disallowed */ MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ MGMT_STATUS_REJECTED, /* Rejected Security */ MGMT_STATUS_REJECTED, /* Rejected Personal */ MGMT_STATUS_TIMEOUT, /* Host Timeout */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ MGMT_STATUS_DISCONNECTED, /* OE Power Off */ MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ MGMT_STATUS_BUSY, /* Repeated Attempts */ MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ MGMT_STATUS_FAILED, /* Unknown LMP PDU */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ MGMT_STATUS_REJECTED, /* Air Mode Rejected */ MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ MGMT_STATUS_FAILED, /* Unspecified Error */ MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ MGMT_STATUS_FAILED, /* Role Change Not Allowed */ MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ MGMT_STATUS_FAILED, /* Unit Link Key Used */ MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ MGMT_STATUS_TIMEOUT, /* Instant Passed */ MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ MGMT_STATUS_FAILED, /* Transaction Collision */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ MGMT_STATUS_REJECTED, /* QoS Rejected */ MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ MGMT_STATUS_REJECTED, /* Insufficient Security */ MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_BUSY, /* Role Switch Pending */ MGMT_STATUS_FAILED, /* Reserved for future use */ MGMT_STATUS_FAILED, /* Slot Violation */ MGMT_STATUS_FAILED, /* Role Switch Failed */ MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ MGMT_STATUS_BUSY, /* Host Busy Pairing */ MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ MGMT_STATUS_BUSY, /* Controller Busy */ MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ }; static u8 mgmt_status(u8 hci_status) { if (hci_status < ARRAY_SIZE(mgmt_status_table)) return mgmt_status_table[hci_status]; return MGMT_STATUS_FAILED; } static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, flag, NULL); } static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, u16 len, int flag, struct sock *skip_sk) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, flag, skip_sk); } static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, struct sock *skip_sk) { return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, HCI_SOCK_TRUSTED, skip_sk); } static u8 le_addr_type(u8 mgmt_addr_type) { if (mgmt_addr_type == BDADDR_LE_PUBLIC) return ADDR_LE_DEV_PUBLIC; else return ADDR_LE_DEV_RANDOM; } void mgmt_fill_version_info(void *ver) { struct mgmt_rp_read_version *rp = ver; rp->version = MGMT_VERSION; rp->revision = cpu_to_le16(MGMT_REVISION); } static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_version rp; bt_dev_dbg(hdev, "sock %p", sk); mgmt_fill_version_info(&rp); return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp, sizeof(rp)); } static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_commands *rp; u16 num_commands, num_events; size_t rp_size; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { num_commands = ARRAY_SIZE(mgmt_commands); num_events = ARRAY_SIZE(mgmt_events); } else { num_commands = ARRAY_SIZE(mgmt_untrusted_commands); num_events = ARRAY_SIZE(mgmt_untrusted_events); } rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16)); rp = kmalloc(rp_size, GFP_KERNEL); if (!rp) return -ENOMEM; rp->num_commands = cpu_to_le16(num_commands); rp->num_events = cpu_to_le16(num_events); if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { __le16 *opcode = rp->opcodes; for (i = 0; i < num_commands; i++, opcode++) put_unaligned_le16(mgmt_commands[i], opcode); for (i = 0; i < num_events; i++, opcode++) put_unaligned_le16(mgmt_events[i], opcode); } else { __le16 *opcode = rp->opcodes; for (i = 0; i < num_commands; i++, opcode++) put_unaligned_le16(mgmt_untrusted_commands[i], opcode); for (i = 0; i < num_events; i++, opcode++) put_unaligned_le16(mgmt_untrusted_events[i], opcode); } err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp, rp_size); kfree(rp); return err; } static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_index_list *rp; struct hci_dev *d; size_t rp_len; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY && !hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } rp_len = sizeof(*rp) + (2 * count); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY && !hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } } rp->num_controllers = cpu_to_le16(count); rp_len = sizeof(*rp) + (2 * count); read_unlock(&hci_dev_list_lock); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp, rp_len); kfree(rp); return err; } static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_unconf_index_list *rp; struct hci_dev *d; size_t rp_len; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY && hci_dev_test_flag(d, HCI_UNCONFIGURED)) count++; } rp_len = sizeof(*rp) + (2 * count); rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY && hci_dev_test_flag(d, HCI_UNCONFIGURED)) { rp->index[count++] = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } } rp->num_controllers = cpu_to_le16(count); rp_len = sizeof(*rp) + (2 * count); read_unlock(&hci_dev_list_lock); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len); kfree(rp); return err; } static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_ext_index_list *rp; struct hci_dev *d; u16 count; int err; bt_dev_dbg(hdev, "sock %p", sk); read_lock(&hci_dev_list_lock); count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP) count++; } rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC); if (!rp) { read_unlock(&hci_dev_list_lock); return -ENOMEM; } count = 0; list_for_each_entry(d, &hci_dev_list, list) { if (hci_dev_test_flag(d, HCI_SETUP) || hci_dev_test_flag(d, HCI_CONFIG) || hci_dev_test_flag(d, HCI_USER_CHANNEL)) continue; /* Devices marked as raw-only are neither configured * nor unconfigured controllers. */ if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) continue; if (d->dev_type == HCI_PRIMARY) { if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) rp->entry[count].type = 0x01; else rp->entry[count].type = 0x00; } else if (d->dev_type == HCI_AMP) { rp->entry[count].type = 0x02; } else { continue; } rp->entry[count].bus = d->bus; rp->entry[count++].index = cpu_to_le16(d->id); bt_dev_dbg(hdev, "Added hci%u", d->id); } rp->num_controllers = cpu_to_le16(count); read_unlock(&hci_dev_list_lock); /* If this command is called at least once, then all the * default index and unconfigured index events are disabled * and from now on only extended index events are used. */ hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, struct_size(rp, entry, count)); kfree(rp); return err; } static bool is_configured(struct hci_dev *hdev) { if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) return false; if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) return false; return true; } static __le32 get_missing_options(struct hci_dev *hdev) { u32 options = 0; if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && !bacmp(&hdev->public_addr, BDADDR_ANY)) options |= MGMT_OPTION_PUBLIC_ADDRESS; return cpu_to_le32(options); } static int new_options(struct hci_dev *hdev, struct sock *skip) { __le32 options = get_missing_options(hdev); return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, sizeof(options), HCI_MGMT_OPTION_EVENTS, skip); } static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 options = get_missing_options(hdev); return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options, sizeof(options)); } static int read_config_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_config_info rp; u32 options = 0; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); rp.manufacturer = cpu_to_le16(hdev->manufacturer); if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) options |= MGMT_OPTION_EXTERNAL_CONFIG; if (hdev->set_bdaddr) options |= MGMT_OPTION_PUBLIC_ADDRESS; rp.supported_options = cpu_to_le32(options); rp.missing_options = get_missing_options(hdev); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp, sizeof(rp)); } static u32 get_supported_phys(struct hci_dev *hdev) { u32 supported_phys = 0; if (lmp_bredr_capable(hdev)) { supported_phys |= MGMT_PHY_BR_1M_1SLOT; if (hdev->features[0][0] & LMP_3SLOT) supported_phys |= MGMT_PHY_BR_1M_3SLOT; if (hdev->features[0][0] & LMP_5SLOT) supported_phys |= MGMT_PHY_BR_1M_5SLOT; if (lmp_edr_2m_capable(hdev)) { supported_phys |= MGMT_PHY_EDR_2M_1SLOT; if (lmp_edr_3slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_2M_3SLOT; if (lmp_edr_5slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_2M_5SLOT; if (lmp_edr_3m_capable(hdev)) { supported_phys |= MGMT_PHY_EDR_3M_1SLOT; if (lmp_edr_3slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_3M_3SLOT; if (lmp_edr_5slot_capable(hdev)) supported_phys |= MGMT_PHY_EDR_3M_5SLOT; } } } if (lmp_le_capable(hdev)) { supported_phys |= MGMT_PHY_LE_1M_TX; supported_phys |= MGMT_PHY_LE_1M_RX; if (hdev->le_features[1] & HCI_LE_PHY_2M) { supported_phys |= MGMT_PHY_LE_2M_TX; supported_phys |= MGMT_PHY_LE_2M_RX; } if (hdev->le_features[1] & HCI_LE_PHY_CODED) { supported_phys |= MGMT_PHY_LE_CODED_TX; supported_phys |= MGMT_PHY_LE_CODED_RX; } } return supported_phys; } static u32 get_selected_phys(struct hci_dev *hdev) { u32 selected_phys = 0; if (lmp_bredr_capable(hdev)) { selected_phys |= MGMT_PHY_BR_1M_1SLOT; if (hdev->pkt_type & (HCI_DM3 | HCI_DH3)) selected_phys |= MGMT_PHY_BR_1M_3SLOT; if (hdev->pkt_type & (HCI_DM5 | HCI_DH5)) selected_phys |= MGMT_PHY_BR_1M_5SLOT; if (lmp_edr_2m_capable(hdev)) { if (!(hdev->pkt_type & HCI_2DH1)) selected_phys |= MGMT_PHY_EDR_2M_1SLOT; if (lmp_edr_3slot_capable(hdev) && !(hdev->pkt_type & HCI_2DH3)) selected_phys |= MGMT_PHY_EDR_2M_3SLOT; if (lmp_edr_5slot_capable(hdev) && !(hdev->pkt_type & HCI_2DH5)) selected_phys |= MGMT_PHY_EDR_2M_5SLOT; if (lmp_edr_3m_capable(hdev)) { if (!(hdev->pkt_type & HCI_3DH1)) selected_phys |= MGMT_PHY_EDR_3M_1SLOT; if (lmp_edr_3slot_capable(hdev) && !(hdev->pkt_type & HCI_3DH3)) selected_phys |= MGMT_PHY_EDR_3M_3SLOT; if (lmp_edr_5slot_capable(hdev) && !(hdev->pkt_type & HCI_3DH5)) selected_phys |= MGMT_PHY_EDR_3M_5SLOT; } } } if (lmp_le_capable(hdev)) { if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M) selected_phys |= MGMT_PHY_LE_1M_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M) selected_phys |= MGMT_PHY_LE_1M_RX; if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M) selected_phys |= MGMT_PHY_LE_2M_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M) selected_phys |= MGMT_PHY_LE_2M_RX; if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED) selected_phys |= MGMT_PHY_LE_CODED_TX; if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED) selected_phys |= MGMT_PHY_LE_CODED_RX; } return selected_phys; } static u32 get_configurable_phys(struct hci_dev *hdev) { return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT & ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX); } static u32 get_supported_settings(struct hci_dev *hdev) { u32 settings = 0; settings |= MGMT_SETTING_POWERED; settings |= MGMT_SETTING_BONDABLE; settings |= MGMT_SETTING_DEBUG_KEYS; settings |= MGMT_SETTING_CONNECTABLE; settings |= MGMT_SETTING_DISCOVERABLE; if (lmp_bredr_capable(hdev)) { if (hdev->hci_ver >= BLUETOOTH_VER_1_2) settings |= MGMT_SETTING_FAST_CONNECTABLE; settings |= MGMT_SETTING_BREDR; settings |= MGMT_SETTING_LINK_SECURITY; if (lmp_ssp_capable(hdev)) { settings |= MGMT_SETTING_SSP; if (IS_ENABLED(CONFIG_BT_HS)) settings |= MGMT_SETTING_HS; } if (lmp_sc_capable(hdev)) settings |= MGMT_SETTING_SECURE_CONN; if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; } if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; /* When the experimental feature for LL Privacy support is * enabled, then advertising is no longer supported. */ if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || hdev->set_bdaddr) settings |= MGMT_SETTING_CONFIGURATION; settings |= MGMT_SETTING_PHY_CONFIGURATION; return settings; } static u32 get_current_settings(struct hci_dev *hdev) { u32 settings = 0; if (hdev_is_powered(hdev)) settings |= MGMT_SETTING_POWERED; if (hci_dev_test_flag(hdev, HCI_CONNECTABLE)) settings |= MGMT_SETTING_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) settings |= MGMT_SETTING_FAST_CONNECTABLE; if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) settings |= MGMT_SETTING_DISCOVERABLE; if (hci_dev_test_flag(hdev, HCI_BONDABLE)) settings |= MGMT_SETTING_BONDABLE; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) settings |= MGMT_SETTING_BREDR; if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) settings |= MGMT_SETTING_LE; if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) settings |= MGMT_SETTING_LINK_SECURITY; if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) settings |= MGMT_SETTING_SSP; if (hci_dev_test_flag(hdev, HCI_HS_ENABLED)) settings |= MGMT_SETTING_HS; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) settings |= MGMT_SETTING_ADVERTISING; if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) settings |= MGMT_SETTING_SECURE_CONN; if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) settings |= MGMT_SETTING_DEBUG_KEYS; if (hci_dev_test_flag(hdev, HCI_PRIVACY)) settings |= MGMT_SETTING_PRIVACY; /* The current setting for static address has two purposes. The * first is to indicate if the static address will be used and * the second is to indicate if it is actually set. * * This means if the static address is not configured, this flag * will never be set. If the address is configured, then if the * address is actually used decides if the flag is set or not. * * For single mode LE only controllers and dual-mode controllers * with BR/EDR disabled, the existence of the static address will * be evaluated. */ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || !bacmp(&hdev->bdaddr, BDADDR_ANY)) { if (bacmp(&hdev->static_addr, BDADDR_ANY)) settings |= MGMT_SETTING_STATIC_ADDRESS; } if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) settings |= MGMT_SETTING_WIDEBAND_SPEECH; return settings; } static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) { return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); } static struct mgmt_pending_cmd *pending_find_data(u16 opcode, struct hci_dev *hdev, const void *data) { return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data); } u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flags will not yet have * their final values, so check for this first. */ cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; if (cp->val == 0x01) return LE_AD_GENERAL; else if (cp->val == 0x02) return LE_AD_LIMITED; } else { if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) return LE_AD_LIMITED; else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) return LE_AD_GENERAL; } return 0; } bool mgmt_get_connectable(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; /* If there's a pending mgmt command the flag will not yet have * it's final value, so check for this first. */ cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); if (cmd) { struct mgmt_mode *cp = cmd->param; return cp->val; } return hci_dev_test_flag(hdev, HCI_CONNECTABLE); } static void service_cache_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, service_cache.work); struct hci_request req; if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) return; hci_req_init(&req, hdev); hci_dev_lock(hdev); __hci_req_update_eir(&req); __hci_req_update_class(&req); hci_dev_unlock(hdev); hci_req_run(&req, NULL); } static void rpa_expired(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, rpa_expired.work); struct hci_request req; bt_dev_dbg(hdev, ""); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; /* The generation of a new RPA and programming it into the * controller happens in the hci_req_enable_advertising() * function. */ hci_req_init(&req, hdev); if (ext_adv_capable(hdev)) __hci_req_start_ext_adv(&req, hdev->cur_adv_instance); else __hci_req_enable_advertising(&req); hci_req_run(&req, NULL); } static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) { if (hci_dev_test_and_set_flag(hdev, HCI_MGMT)) return; INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); /* Non-mgmt controlled devices get this bit set * implicitly so that pairing works for them, however * for mgmt we require user-space to explicitly enable * it */ hci_dev_clear_flag(hdev, HCI_BONDABLE); } static int read_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_info rp; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); bacpy(&rp.bdaddr, &hdev->bdaddr); rp.version = hdev->hci_ver; rp.manufacturer = cpu_to_le16(hdev->manufacturer); rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); rp.current_settings = cpu_to_le32(get_current_settings(hdev)); memcpy(rp.dev_class, hdev->dev_class, 3); memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name)); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, sizeof(rp)); } static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) { u16 eir_len = 0; size_t name_len; if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV, hdev->dev_class, 3); if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE, hdev->appearance); name_len = strlen(hdev->dev_name); eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE, hdev->dev_name, name_len); name_len = strlen(hdev->short_name); eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT, hdev->short_name, name_len); return eir_len; } static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { char buf[512]; struct mgmt_rp_read_ext_info *rp = (void *)buf; u16 eir_len; bt_dev_dbg(hdev, "sock %p", sk); memset(&buf, 0, sizeof(buf)); hci_dev_lock(hdev); bacpy(&rp->bdaddr, &hdev->bdaddr); rp->version = hdev->hci_ver; rp->manufacturer = cpu_to_le16(hdev->manufacturer); rp->supported_settings = cpu_to_le32(get_supported_settings(hdev)); rp->current_settings = cpu_to_le32(get_current_settings(hdev)); eir_len = append_eir_data_to_buf(hdev, rp->eir); rp->eir_len = cpu_to_le16(eir_len); hci_dev_unlock(hdev); /* If this command is called at least once, then the events * for class of device and local name changes are disabled * and only the new extended controller information event * is used. */ hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp, sizeof(*rp) + eir_len); } static int ext_info_changed(struct hci_dev *hdev, struct sock *skip) { char buf[512]; struct mgmt_ev_ext_info_changed *ev = (void *)buf; u16 eir_len; memset(buf, 0, sizeof(buf)); eir_len = append_eir_data_to_buf(hdev, ev->eir); ev->eir_len = cpu_to_le16(eir_len); return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev, sizeof(*ev) + eir_len, HCI_MGMT_EXT_INFO_EVENTS, skip); } static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) { __le32 settings = cpu_to_le32(get_current_settings(hdev)); return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings, sizeof(settings)); } static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status 0x%02x", status); if (hci_conn_count(hdev) == 0) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); } } void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_added ev; ev.instance = instance; mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk); } void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance) { struct mgmt_ev_advertising_removed ev; ev.instance = instance; mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk); } static void cancel_adv_timeout(struct hci_dev *hdev) { if (hdev->adv_instance_timeout) { hdev->adv_instance_timeout = 0; cancel_delayed_work(&hdev->adv_instance_expire); } } static int clean_up_hci_state(struct hci_dev *hdev) { struct hci_request req; struct hci_conn *conn; bool discov_stopped; int err; hci_req_init(&req, hdev); if (test_bit(HCI_ISCAN, &hdev->flags) || test_bit(HCI_PSCAN, &hdev->flags)) { u8 scan = 0x00; hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(&req); discov_stopped = hci_req_stop_discovery(&req); list_for_each_entry(conn, &hdev->conn_hash.list, list) { /* 0x15 == Terminated due to Power Off */ __hci_abort_conn(&req, conn, 0x15); } err = hci_req_run(&req, clean_up_hci_complete); if (!err && discov_stopped) hci_discovery_set_state(hdev, DISCOVERY_STOPPING); return err; } static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_POWERED, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, MGMT_STATUS_BUSY); goto failed; } if (!!cp->val == hdev_is_powered(hdev)) { err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (cp->val) { queue_work(hdev->req_workqueue, &hdev->power_on); err = 0; } else { /* Disconnect connections, stop scans, etc */ err = clean_up_hci_state(hdev); if (!err) queue_delayed_work(hdev->req_workqueue, &hdev->power_off, HCI_POWER_OFF_TIMEOUT); /* ENODATA means there were no HCI commands queued */ if (err == -ENODATA) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); err = 0; } } failed: hci_dev_unlock(hdev); return err; } static int new_settings(struct hci_dev *hdev, struct sock *skip) { __le32 ev = cpu_to_le32(get_current_settings(hdev)); return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); } int mgmt_new_settings(struct hci_dev *hdev) { return new_settings(hdev, NULL); } struct cmd_lookup { struct sock *sk; struct hci_dev *hdev; u8 mgmt_status; }; static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); list_del(&cmd->list); if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } mgmt_pending_free(cmd); } static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) { u8 *status = data; mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); mgmt_pending_remove(cmd); } static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) { if (cmd->cmd_complete) { u8 *status = data; cmd->cmd_complete(cmd, *status); mgmt_pending_remove(cmd); return; } cmd_status_rsp(cmd, data); } static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, cmd->param_len); } static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, sizeof(struct mgmt_addr_info)); } static u8 mgmt_bredr_support(struct hci_dev *hdev) { if (!lmp_bredr_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; } static u8 mgmt_le_support(struct hci_dev *hdev) { if (!lmp_le_capable(hdev)) return MGMT_STATUS_NOT_SUPPORTED; else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return MGMT_STATUS_REJECTED; else return MGMT_STATUS_SUCCESS; } void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); if (!cmd) goto unlock; if (status) { u8 mgmt_err = mgmt_status(status); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); goto remove_cmd; } if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); new_settings(hdev, cmd->sk); remove_cmd: mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_discoverable *cp = data; struct mgmt_pending_cmd *cmd; u16 timeout; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_INVALID_PARAMS); timeout = __le16_to_cpu(cp->timeout); /* Disabling discoverable requires that no timeout is set, * and enabling limited discoverable requires a timeout. */ if ((cp->val == 0x00 && timeout > 0) || (cp->val == 0x02 && timeout == 0)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) && timeout > 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_NOT_POWERED); goto failed; } if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; } if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_REJECTED); goto failed; } if (hdev->advertising_paused) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; } if (!hdev_is_powered(hdev)) { bool changed = false; /* Setting limited discoverable when powered off is * not a valid operation since it requires a timeout * and so no need to check HCI_LIMITED_DISCOVERABLE. */ if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) { hci_dev_change_flag(hdev, HCI_DISCOVERABLE); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } /* If the current mode is the same, then just update the timeout * value with the new value. And if only the timeout gets updated, * then no need for any HCI transactions. */ if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = timeout; if (cp->val && hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } /* Cancel any potential discoverable timeout that might be * still active and store new timeout value. The arming of * the timeout happens in the complete handler. */ cancel_delayed_work(&hdev->discov_off); hdev->discov_timeout = timeout; if (cp->val) hci_dev_set_flag(hdev, HCI_DISCOVERABLE); else hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); /* Limited discoverable mode */ if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE); else hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); queue_work(hdev->req_workqueue, &hdev->discoverable_update); err = 0; failed: hci_dev_unlock(hdev); return err; } void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); if (!cmd) goto unlock; if (status) { u8 mgmt_err = mgmt_status(status); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); goto remove_cmd; } send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); remove_cmd: mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_connectable_update_settings(struct hci_dev *hdev, struct sock *sk, u8 val) { bool changed = false; int err; if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE)) changed = true; if (val) { hci_dev_set_flag(hdev, HCI_CONNECTABLE); } else { hci_dev_clear_flag(hdev, HCI_CONNECTABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); if (err < 0) return err; if (changed) { hci_req_update_scan(hdev); hci_update_background_scan(hdev); return new_settings(hdev, sk); } return 0; } static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = set_connectable_update_settings(hdev, sk, cp->val); goto failed; } if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_BUSY); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (cp->val) { hci_dev_set_flag(hdev, HCI_CONNECTABLE); } else { if (hdev->discov_timeout > 0) cancel_delayed_work(&hdev->discov_off); hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_CONNECTABLE); } queue_work(hdev->req_workqueue, &hdev->connectable_update); err = 0; failed: hci_dev_unlock(hdev); return err; } static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE); else changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE); err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); if (err < 0) goto unlock; if (changed) { /* In limited privacy mode the change of bondable mode * may affect the local advertising address. */ if (hdev_is_powered(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING) && hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) queue_work(hdev->req_workqueue, &hdev->discoverable_update); err = new_settings(hdev, sk); } unlock: hci_dev_unlock(hdev); return err; } static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 val, status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, status); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { bool changed = false; if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { hci_dev_change_flag(hdev, HCI_LINK_SECURITY); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, MGMT_STATUS_BUSY); goto failed; } val = !!cp->val; if (test_bit(HCI_AUTH, &hdev->flags) == val) { err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } failed: hci_dev_unlock(hdev); return err; } static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); if (!lmp_ssp_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { bool changed; if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); if (!changed) changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); else hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } if (pending_find(MGMT_OP_SET_SSP, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, MGMT_STATUS_BUSY); goto failed; } if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(cp->val), &cp->val); err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } failed: hci_dev_unlock(hdev); return err; } static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!IS_ENABLED(CONFIG_BT_HS)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_NOT_SUPPORTED); status = mgmt_bredr_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); if (!lmp_ssp_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_SSP, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_BUSY); goto unlock; } if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED); } else { if (hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, MGMT_STATUS_REJECTED); goto unlock; } changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); } err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct cmd_lookup match = { NULL, hdev }; hci_dev_lock(hdev); if (status) { u8 mgmt_err = mgmt_status(status); mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, &mgmt_err); goto unlock; } mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); /* Make sure the controller has a good default for * advertising data. Restrict the update to when LE * has actually been enabled. During power on, the * update in powered_update_hci will take care of it. */ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { struct hci_request req; hci_req_init(&req, hdev); if (ext_adv_capable(hdev)) { int err; err = __hci_req_setup_ext_adv_instance(&req, 0x00); if (!err) __hci_req_update_scan_rsp_data(&req, 0x00); } else { __hci_req_update_adv_data(&req, 0x00); __hci_req_update_scan_rsp_data(&req, 0x00); } hci_req_run(&req, NULL); hci_update_background_scan(hdev); } unlock: hci_dev_unlock(hdev); } static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct hci_cp_write_le_host_supported hci_cp; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; u8 val, enabled; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_INVALID_PARAMS); /* Bluetooth single mode LE only controllers or dual-mode * controllers configured as LE only devices, do not allow * switching LE off. These have either LE enabled explicitly * or BR/EDR has been previously switched off. * * When trying to enable an already enabled LE, then gracefully * send a positive response. Trying to disable it however will * result into rejection. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { if (cp->val == 0x01) return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_REJECTED); } hci_dev_lock(hdev); val = !!cp->val; enabled = lmp_host_le_capable(hdev); if (!val) hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true); if (!hdev_is_powered(hdev) || val == enabled) { bool changed = false; if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { hci_dev_change_flag(hdev, HCI_LE_ENABLED); changed = true; } if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) { hci_dev_clear_flag(hdev, HCI_ADVERTISING); changed = true; } err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); goto unlock; } if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); memset(&hci_cp, 0, sizeof(hci_cp)); if (val) { hci_cp.le = val; hci_cp.simul = 0x00; } else { if (hci_dev_test_flag(hdev, HCI_LE_ADV)) __hci_req_disable_advertising(&req); if (ext_adv_capable(hdev)) __hci_req_clear_ext_adv_sets(&req); } hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp), &hci_cp); err = hci_req_run(&req, le_enable_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } /* This is a helper function to test for pending mgmt commands that can * cause CoD or EIR HCI commands. We can only allow one such pending * mgmt command at a time since otherwise we cannot easily track what * the current values are, will be, and based on that calculate if a new * HCI command needs to be sent and if yes with what value. */ static bool pending_eir_or_class(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { switch (cmd->opcode) { case MGMT_OP_ADD_UUID: case MGMT_OP_REMOVE_UUID: case MGMT_OP_SET_DEV_CLASS: case MGMT_OP_SET_POWERED: return true; } } return false; } static const u8 bluetooth_base_uuid[] = { 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static u8 get_uuid_size(const u8 *uuid) { u32 val; if (memcmp(uuid, bluetooth_base_uuid, 12)) return 128; val = get_unaligned_le32(&uuid[12]); if (val > 0xffff) return 32; return 16; } static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status) { struct mgmt_pending_cmd *cmd; hci_dev_lock(hdev); cmd = pending_find(mgmt_op, hdev); if (!cmd) goto unlock; mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), hdev->dev_class, 3); mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status 0x%02x", status); mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status); } static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_uuid *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; struct bt_uuid *uuid; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, MGMT_STATUS_BUSY); goto failed; } uuid = kmalloc(sizeof(*uuid), GFP_KERNEL); if (!uuid) { err = -ENOMEM; goto failed; } memcpy(uuid->uuid, cp->uuid, 16); uuid->svc_hint = cp->svc_hint; uuid->size = get_uuid_size(cp->uuid); list_add_tail(&uuid->list, &hdev->uuids); hci_req_init(&req, hdev); __hci_req_update_class(&req); __hci_req_update_eir(&req); err = hci_req_run(&req, add_uuid_complete); if (err < 0) { if (err != -ENODATA) goto failed; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0, hdev->dev_class, 3); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } err = 0; failed: hci_dev_unlock(hdev); return err; } static bool enable_service_cache(struct hci_dev *hdev) { if (!hdev_is_powered(hdev)) return false; if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) { queue_delayed_work(hdev->workqueue, &hdev->service_cache, CACHE_TIMEOUT); return true; } return false; } static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status 0x%02x", status); mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status); } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_uuid *cp = data; struct mgmt_pending_cmd *cmd; struct bt_uuid *match, *tmp; u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; struct hci_request req; int err, found; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, MGMT_STATUS_BUSY); goto unlock; } if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { hci_uuids_clear(hdev); if (enable_service_cache(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, hdev->dev_class, 3); goto unlock; } goto update_class; } found = 0; list_for_each_entry_safe(match, tmp, &hdev->uuids, list) { if (memcmp(match->uuid, cp->uuid, 16) != 0) continue; list_del(&match->list); kfree(match); found++; } if (found == 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, MGMT_STATUS_INVALID_PARAMS); goto unlock; } update_class: hci_req_init(&req, hdev); __hci_req_update_class(&req); __hci_req_update_eir(&req); err = hci_req_run(&req, remove_uuid_complete); if (err < 0) { if (err != -ENODATA) goto unlock; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0, hdev->dev_class, 3); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } err = 0; unlock: hci_dev_unlock(hdev); return err; } static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status 0x%02x", status); mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status); } static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_dev_class *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (pending_eir_or_class(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_BUSY); goto unlock; } if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, MGMT_STATUS_INVALID_PARAMS); goto unlock; } hdev->major_class = cp->major; hdev->minor_class = cp->minor; if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, hdev->dev_class, 3); goto unlock; } hci_req_init(&req, hdev); if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { hci_dev_unlock(hdev); cancel_delayed_work_sync(&hdev->service_cache); hci_dev_lock(hdev); __hci_req_update_eir(&req); } __hci_req_update_class(&req); err = hci_req_run(&req, set_class_complete); if (err < 0) { if (err != -ENODATA) goto unlock; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, hdev->dev_class, 3); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } err = 0; unlock: hci_dev_unlock(hdev); return err; } static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_load_link_keys *cp = data; const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_link_key_info)); u16 key_count, expected_len; bool changed; int i; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "load_link_keys: too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, key_count); hci_dev_lock(hdev); hci_link_keys_clear(hdev); if (cp->debug_keys) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else changed = hci_dev_test_and_clear_flag(hdev, HCI_KEEP_DEBUG_KEYS); if (changed) new_settings(hdev, NULL); for (i = 0; i < key_count; i++) { struct mgmt_link_key_info *key = &cp->keys[i]; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LINKKEY, key->val)) { bt_dev_warn(hdev, "Skipping blocked link key for %pMR", &key->addr.bdaddr); continue; } if (key->addr.type != BDADDR_BREDR) { bt_dev_warn(hdev, "Invalid link address type %u for %pMR", key->addr.type, &key->addr.bdaddr); continue; } if (key->type > 0x08) { bt_dev_warn(hdev, "Invalid link key type %u for %pMR", key->type, &key->addr.bdaddr); continue; } /* Always ignore debug keys and require a new pairing if * the user wants to use them. */ if (key->type == HCI_LK_DEBUG_COMBINATION) continue; hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val, key->type, key->pin_len, NULL); } mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); return 0; } static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, struct sock *skip_sk) { struct mgmt_ev_device_unpaired ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = addr_type; return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev), skip_sk); } static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_unpair_device *cp = data; struct mgmt_rp_unpair_device rp; struct hci_conn_params *params; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; u8 addr_type; int err; memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); if (cp->disconnect != 0x00 && cp->disconnect != 0x01) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) { /* If disconnection is requested, then look up the * connection. If the remote device is connected, it * will be later used to terminate the link. * * Setting it to NULL explicitly will cause no * termination of the link. */ if (cp->disconnect) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = NULL; err = hci_remove_link_key(hdev, &cp->addr.bdaddr); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); goto unlock; } goto done; } /* LE address type */ addr_type = le_addr_type(cp->addr.type); /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type); if (err < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp)); goto unlock; } conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type); if (!conn) { hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); goto done; } /* Defer clearing up the connection parameters until closing to * give a chance of keeping them if a repairing happens. */ set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); /* Disable auto-connection parameters if present */ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (params) { if (params->explicit_connect) params->auto_connect = HCI_AUTO_CONN_EXPLICIT; else params->auto_connect = HCI_AUTO_CONN_DISABLED; } /* If disconnection is not requested, then clear the connection * variable so that the link is not terminated. */ if (!cp->disconnect) conn = NULL; done: /* If the connection variable is set, then termination of the * link is requested. */ if (!conn) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, &rp, sizeof(rp)); device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, sizeof(*cp)); if (!cmd) { err = -ENOMEM; goto unlock; } cmd->cmd_complete = addr_cmd_complete; err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_disconnect *cp = data; struct mgmt_rp_disconnect rp; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!test_bit(HCI_UP, &hdev->flags)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto failed; } if (pending_find(MGMT_OP_DISCONNECT, hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto failed; } if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = generic_cmd_complete; err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static u8 link_to_bdaddr(u8 link_type, u8 addr_type) { switch (link_type) { case LE_LINK: switch (addr_type) { case ADDR_LE_DEV_PUBLIC: return BDADDR_LE_PUBLIC; default: /* Fallback to LE Random address type */ return BDADDR_LE_RANDOM; } default: /* Fallback to BR/EDR type */ return BDADDR_BREDR; } } static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_get_connections *rp; struct hci_conn *c; int err; u16 i; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, MGMT_STATUS_NOT_POWERED); goto unlock; } i = 0; list_for_each_entry(c, &hdev->conn_hash.list, list) { if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) i++; } rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL); if (!rp) { err = -ENOMEM; goto unlock; } i = 0; list_for_each_entry(c, &hdev->conn_hash.list, list) { if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) continue; bacpy(&rp->addr[i].bdaddr, &c->dst); rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); if (c->type == SCO_LINK || c->type == ESCO_LINK) continue; i++; } rp->conn_count = cpu_to_le16(i); /* Recalculate length in case of filtered SCO connections, etc */ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, struct_size(rp, addr, i)); kfree(rp); unlock: hci_dev_unlock(hdev); return err; } static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp) { struct mgmt_pending_cmd *cmd; int err; cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; cmd->cmd_complete = addr_cmd_complete; err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->addr.bdaddr), &cp->addr.bdaddr); if (err < 0) mgmt_pending_remove(cmd); return err; } static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct hci_conn *conn; struct mgmt_cp_pin_code_reply *cp = data; struct hci_cp_pin_code_reply reply; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_NOT_POWERED); goto failed; } conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_NOT_CONNECTED); goto failed; } if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { struct mgmt_cp_pin_code_neg_reply ncp; memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr)); bt_dev_err(hdev, "PIN code is not 16 bytes long"); err = send_pin_code_neg_reply(sk, hdev, &ncp); if (err >= 0) err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, MGMT_STATUS_INVALID_PARAMS); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = addr_cmd_complete; bacpy(&reply.bdaddr, &cp->addr.bdaddr); reply.pin_len = cp->pin_len; memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_io_capability *cp = data; bt_dev_dbg(hdev, "sock %p", sk); if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->io_capability = cp->io_capability; bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL, 0); } static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct mgmt_pending_cmd *cmd; list_for_each_entry(cmd, &hdev->mgmt_pending, list) { if (cmd->opcode != MGMT_OP_PAIR_DEVICE) continue; if (cmd->user_data != conn) continue; return cmd; } return NULL; } static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct mgmt_rp_pair_device rp; struct hci_conn *conn = cmd->user_data; int err; bacpy(&rp.addr.bdaddr, &conn->dst); rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); /* So we don't get further callbacks for this connection */ conn->connect_cfm_cb = NULL; conn->security_cfm_cb = NULL; conn->disconn_cfm_cb = NULL; hci_conn_drop(conn); /* The device is paired so there is no need to remove * its connection parameters anymore. */ clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); hci_conn_put(conn); return err; } void mgmt_smp_complete(struct hci_conn *conn, bool complete) { u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; struct mgmt_pending_cmd *cmd; cmd = find_pairing(conn); if (cmd) { cmd->cmd_complete(cmd, status); mgmt_pending_remove(cmd); } } static void pairing_complete_cb(struct hci_conn *conn, u8 status) { struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) { struct mgmt_pending_cmd *cmd; BT_DBG("status %u", status); if (!status) return; cmd = find_pairing(conn); if (!cmd) { BT_DBG("Unable to find a pending command"); return; } cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_pair_device *cp = data; struct mgmt_rp_pair_device rp; struct mgmt_pending_cmd *cmd; u8 sec_level, auth_type; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_ALREADY_PAIRED, &rp, sizeof(rp)); goto unlock; } sec_level = BT_SECURITY_MEDIUM; auth_type = HCI_AT_DEDICATED_BONDING; if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, auth_type, CONN_REASON_PAIR_DEVICE); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; /* When pairing a new device, it is expected to remember * this device for future connections. Adding the connection * parameter information ahead of time allows tracking * of the peripheral preferred values and will speed up any * further connection establishment. * * If connection parameters already exist, then they * will be kept and this function does nothing. */ p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); if (!p) { err = -EIO; goto unlock; } if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, sec_level, HCI_LE_CONN_TIMEOUT, CONN_REASON_PAIR_DEVICE); } if (IS_ERR(conn)) { int status; if (PTR_ERR(conn) == -EBUSY) status = MGMT_STATUS_BUSY; else if (PTR_ERR(conn) == -EOPNOTSUPP) status = MGMT_STATUS_NOT_SUPPORTED; else if (PTR_ERR(conn) == -ECONNREFUSED) status = MGMT_STATUS_REJECTED; else status = MGMT_STATUS_CONNECT_FAILED; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, status, &rp, sizeof(rp)); goto unlock; } if (conn->connect_cfm_cb) { hci_conn_drop(conn); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); if (!cmd) { err = -ENOMEM; hci_conn_drop(conn); goto unlock; } cmd->cmd_complete = pairing_complete; /* For LE, just connecting isn't a proof that the pairing finished */ if (cp->addr.type == BDADDR_BREDR) { conn->connect_cfm_cb = pairing_complete_cb; conn->security_cfm_cb = pairing_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; } else { conn->connect_cfm_cb = le_pairing_complete_cb; conn->security_cfm_cb = le_pairing_complete_cb; conn->disconn_cfm_cb = le_pairing_complete_cb; } conn->io_capability = cp->io_cap; cmd->user_data = hci_conn_get(conn); if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) && hci_conn_security(conn, sec_level, auth_type, true)) { cmd->cmd_complete(cmd, 0); mgmt_pending_remove(cmd); } err = 0; unlock: hci_dev_unlock(hdev); return err; } static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_addr_info *addr = data; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_NOT_POWERED); goto unlock; } cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev); if (!cmd) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS); goto unlock; } conn = cmd->user_data; if (bacmp(&addr->bdaddr, &conn->dst) != 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, MGMT_STATUS_INVALID_PARAMS); goto unlock; } cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED); mgmt_pending_remove(cmd); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, addr, sizeof(*addr)); /* Since user doesn't want to proceed with the connection, abort any * ongoing pairing and then terminate the link if it was created * because of the pair device action. */ if (addr->type == BDADDR_BREDR) hci_remove_link_key(hdev, &addr->bdaddr); else smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, le_addr_type(addr->type)); if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); unlock: hci_dev_unlock(hdev); return err; } static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, struct mgmt_addr_info *addr, u16 mgmt_op, u16 hci_op, __le32 passkey) { struct mgmt_pending_cmd *cmd; struct hci_conn *conn; int err; hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_NOT_POWERED, addr, sizeof(*addr)); goto done; } if (addr->type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); else conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr, le_addr_type(addr->type)); if (!conn) { err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_NOT_CONNECTED, addr, sizeof(*addr)); goto done; } if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { err = smp_user_confirm_reply(conn, mgmt_op, passkey); if (!err) err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_SUCCESS, addr, sizeof(*addr)); else err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, MGMT_STATUS_FAILED, addr, sizeof(*addr)); goto done; } cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); if (!cmd) { err = -ENOMEM; goto done; } cmd->cmd_complete = addr_cmd_complete; /* Continue with pairing via HCI */ if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { struct hci_cp_user_passkey_reply cp; bacpy(&cp.bdaddr, &addr->bdaddr); cp.passkey = passkey; err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); } else err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr), &addr->bdaddr); if (err < 0) mgmt_pending_remove(cmd); done: hci_dev_unlock(hdev); return err; } static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_pin_code_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_PIN_CODE_NEG_REPLY, HCI_OP_PIN_CODE_NEG_REPLY, 0); } static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_confirm_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); if (len != sizeof(*cp)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, MGMT_STATUS_INVALID_PARAMS); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_CONFIRM_REPLY, HCI_OP_USER_CONFIRM_REPLY, 0); } static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_confirm_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_CONFIRM_NEG_REPLY, HCI_OP_USER_CONFIRM_NEG_REPLY, 0); } static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_passkey_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_PASSKEY_REPLY, HCI_OP_USER_PASSKEY_REPLY, cp->passkey); } static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_user_passkey_neg_reply *cp = data; bt_dev_dbg(hdev, "sock %p", sk); return user_pairing_resp(sk, hdev, &cp->addr, MGMT_OP_USER_PASSKEY_NEG_REPLY, HCI_OP_USER_PASSKEY_NEG_REPLY, 0); } static void adv_expire(struct hci_dev *hdev, u32 flags) { struct adv_info *adv_instance; struct hci_request req; int err; adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); if (!adv_instance) return; /* stop if current instance doesn't need to be changed */ if (!(adv_instance->flags & flags)) return; cancel_adv_timeout(hdev); adv_instance = hci_get_next_instance(hdev, adv_instance->instance); if (!adv_instance) return; hci_req_init(&req, hdev); err = __hci_req_schedule_adv_instance(&req, adv_instance->instance, true); if (err) return; hci_req_run(&req, NULL); } static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_cp_set_local_name *cp; struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) goto unlock; cp = cmd->param; if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, mgmt_status(status)); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, cp, sizeof(*cp)); if (hci_dev_test_flag(hdev, HCI_LE_ADV)) adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME); } mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_local_name *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); /* If the old values are the same as the new ones just return a * direct command complete event. */ if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && !memcmp(hdev->short_name, cp->short_name, sizeof(hdev->short_name))) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, data, len); goto failed; } memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); if (!hdev_is_powered(hdev)) { memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, data, len); if (err < 0) goto failed; err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len, HCI_MGMT_LOCAL_NAME_EVENTS, sk); ext_info_changed(hdev, sk); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); hci_req_init(&req, hdev); if (lmp_bredr_capable(hdev)) { __hci_req_update_name(&req); __hci_req_update_eir(&req); } /* The name is stored in the scan response data and so * no need to update the advertising data here. */ if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance); err = hci_req_run(&req, set_name_complete); if (err < 0) mgmt_pending_remove(cmd); failed: hci_dev_unlock(hdev); return err; } static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_appearance *cp = data; u16 appearance; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE, MGMT_STATUS_NOT_SUPPORTED); appearance = le16_to_cpu(cp->appearance); hci_dev_lock(hdev); if (hdev->appearance != appearance) { hdev->appearance = appearance; if (hci_dev_test_flag(hdev, HCI_LE_ADV)) adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE); ext_info_changed(hdev, sk); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_rp_get_phy_configuration rp; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); rp.supported_phys = cpu_to_le32(get_supported_phys(hdev)); rp.selected_phys = cpu_to_le32(get_selected_phys(hdev)); rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev)); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0, &rp, sizeof(rp)); } int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) { struct mgmt_ev_phy_configuration_changed ev; memset(&ev, 0, sizeof(ev)); ev.selected_phys = cpu_to_le32(get_selected_phys(hdev)); return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev, sizeof(ev), skip); } static void set_default_phy_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev); if (!cmd) goto unlock; if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, mgmt_status(status)); } else { mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); mgmt_phy_configuration_changed(hdev, cmd->sk); } mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_phy_configuration *cp = data; struct hci_cp_le_set_default_phy cp_phy; struct mgmt_pending_cmd *cmd; struct hci_request req; u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; u16 pkt_type = (HCI_DH1 | HCI_DM1); bool changed = false; int err; bt_dev_dbg(hdev, "sock %p", sk); configurable_phys = get_configurable_phys(hdev); supported_phys = get_supported_phys(hdev); selected_phys = __le32_to_cpu(cp->selected_phys); if (selected_phys & ~supported_phys) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_INVALID_PARAMS); unconfigure_phys = supported_phys & ~configurable_phys; if ((selected_phys & unconfigure_phys) != unconfigure_phys) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_INVALID_PARAMS); if (selected_phys == get_selected_phys(hdev)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_REJECTED); goto unlock; } if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, MGMT_STATUS_BUSY); goto unlock; } if (selected_phys & MGMT_PHY_BR_1M_3SLOT) pkt_type |= (HCI_DH3 | HCI_DM3); else pkt_type &= ~(HCI_DH3 | HCI_DM3); if (selected_phys & MGMT_PHY_BR_1M_5SLOT) pkt_type |= (HCI_DH5 | HCI_DM5); else pkt_type &= ~(HCI_DH5 | HCI_DM5); if (selected_phys & MGMT_PHY_EDR_2M_1SLOT) pkt_type &= ~HCI_2DH1; else pkt_type |= HCI_2DH1; if (selected_phys & MGMT_PHY_EDR_2M_3SLOT) pkt_type &= ~HCI_2DH3; else pkt_type |= HCI_2DH3; if (selected_phys & MGMT_PHY_EDR_2M_5SLOT) pkt_type &= ~HCI_2DH5; else pkt_type |= HCI_2DH5; if (selected_phys & MGMT_PHY_EDR_3M_1SLOT) pkt_type &= ~HCI_3DH1; else pkt_type |= HCI_3DH1; if (selected_phys & MGMT_PHY_EDR_3M_3SLOT) pkt_type &= ~HCI_3DH3; else pkt_type |= HCI_3DH3; if (selected_phys & MGMT_PHY_EDR_3M_5SLOT) pkt_type &= ~HCI_3DH5; else pkt_type |= HCI_3DH5; if (pkt_type != hdev->pkt_type) { hdev->pkt_type = pkt_type; changed = true; } if ((selected_phys & MGMT_PHY_LE_MASK) == (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { if (changed) mgmt_phy_configuration_changed(hdev, sk); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_PHY_CONFIGURATION, 0, NULL, 0); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); memset(&cp_phy, 0, sizeof(cp_phy)); if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) cp_phy.all_phys |= 0x01; if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) cp_phy.all_phys |= 0x02; if (selected_phys & MGMT_PHY_LE_1M_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; if (selected_phys & MGMT_PHY_LE_2M_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; if (selected_phys & MGMT_PHY_LE_CODED_TX) cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; if (selected_phys & MGMT_PHY_LE_1M_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; if (selected_phys & MGMT_PHY_LE_2M_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; if (selected_phys & MGMT_PHY_LE_CODED_RX) cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy); err = hci_req_run_skb(&req, set_default_phy_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { int err = MGMT_STATUS_SUCCESS; struct mgmt_cp_set_blocked_keys *keys = data; const u16 max_key_count = ((U16_MAX - sizeof(*keys)) / sizeof(struct mgmt_blocked_key_info)); u16 key_count, expected_len; int i; bt_dev_dbg(hdev, "sock %p", sk); key_count = __le16_to_cpu(keys->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(keys, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_blocked_keys_clear(hdev); for (i = 0; i < keys->key_count; ++i) { struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) { err = MGMT_STATUS_NO_RESOURCES; break; } b->type = keys->keys[i].type; memcpy(b->val, keys->keys[i].val, sizeof(b->val)); list_add_rcu(&b->list, &hdev->blocked_keys); } hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, err, NULL, 0); } static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; int err; bool changed = false; bt_dev_dbg(hdev, "sock %p", sk); if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_BUSY); goto unlock; } if (hdev_is_powered(hdev) && !!cp->val != hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_WIDEBAND_SPEECH, MGMT_STATUS_REJECTED); goto unlock; } if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); else changed = hci_dev_test_and_clear_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int read_controller_cap(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { char buf[20]; struct mgmt_rp_read_controller_cap *rp = (void *)buf; u16 cap_len = 0; u8 flags = 0; u8 tx_power_range[2]; bt_dev_dbg(hdev, "sock %p", sk); memset(&buf, 0, sizeof(buf)); hci_dev_lock(hdev); /* When the Read Simple Pairing Options command is supported, then * the remote public key validation is supported. * * Alternatively, when Microsoft extensions are available, they can * indicate support for public key validation as well. */ if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev)) flags |= 0x01; /* Remote public key validation (BR/EDR) */ flags |= 0x02; /* Remote public key validation (LE) */ /* When the Read Encryption Key Size command is supported, then the * encryption key size is enforced. */ if (hdev->commands[20] & 0x10) flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */ flags |= 0x08; /* Encryption key size enforcement (LE) */ cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS, &flags, 1); /* When the Read Simple Pairing Options command is supported, then * also max encryption key size information is provided. */ if (hdev->commands[41] & 0x08) cap_len = eir_append_le16(rp->cap, cap_len, MGMT_CAP_MAX_ENC_KEY_SIZE, hdev->max_enc_key_size); cap_len = eir_append_le16(rp->cap, cap_len, MGMT_CAP_SMP_MAX_ENC_KEY_SIZE, SMP_MAX_ENC_KEY_SIZE); /* Append the min/max LE tx power parameters if we were able to fetch * it from the controller */ if (hdev->commands[38] & 0x80) { memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1); memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1); cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR, tx_power_range, 2); } rp->cap_len = cpu_to_le16(cap_len); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0, rp, sizeof(*rp) + cap_len); } #ifdef CONFIG_BT_FEATURE_DEBUG /* d4992530-b9ec-469f-ab01-6c481c47da1c */ static const u8 debug_uuid[16] = { 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab, 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4, }; #endif /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ static const u8 simult_central_periph_uuid[16] = { 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; /* 15c0a148-c273-11ea-b3de-0242ac130004 */ static const u8 rpa_resolution_uuid[16] = { 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, }; static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { char buf[62]; /* Enough space for 3 features */ struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; u32 flags; bt_dev_dbg(hdev, "sock %p", sk); memset(&buf, 0, sizeof(buf)); #ifdef CONFIG_BT_FEATURE_DEBUG if (!hdev) { flags = bt_dbg_get() ? BIT(0) : 0; memcpy(rp->features[idx].uuid, debug_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } #endif if (hdev) { if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && (hdev->le_states[4] & 0x08) && /* Central */ (hdev->le_states[4] & 0x40) && /* Peripheral */ (hdev->le_states[3] & 0x10)) /* Simultaneous */ flags = BIT(0); else flags = 0; memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } if (hdev && use_ll_privacy(hdev)) { if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) flags = BIT(0) | BIT(1); else flags = BIT(1); memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); idx++; } rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable * the events to update client on any future change. */ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_READ_EXP_FEATURES_INFO, 0, rp, sizeof(*rp) + (20 * idx)); } static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; memset(&ev, 0, sizeof(ev)); memcpy(ev.uuid, rpa_resolution_uuid, 16); ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } #ifdef CONFIG_BT_FEATURE_DEBUG static int exp_debug_feature_changed(bool enabled, struct sock *skip) { struct mgmt_ev_exp_feature_changed ev; memset(&ev, 0, sizeof(ev)); memcpy(ev.uuid, debug_uuid, 16); ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL, &ev, sizeof(ev), HCI_MGMT_EXP_FEATURE_EVENTS, skip); } #endif #define EXP_FEAT(_uuid, _set_func) \ { \ .uuid = _uuid, \ .set_func = _set_func, \ } /* The zero key uuid is special. Multiple exp features are set through it. */ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; memset(rp.uuid, 0, 16); rp.flags = cpu_to_le32(0); #ifdef CONFIG_BT_FEATURE_DEBUG if (!hdev) { bool changed = bt_dbg_get(); bt_dbg_set(false); if (changed) exp_debug_feature_changed(false, sk); } #endif if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { bool changed; changed = hci_dev_test_and_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); if (changed) exp_ll_privacy_feature_changed(false, hdev, sk); } hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); } #ifdef CONFIG_BT_FEATURE_DEBUG static int set_debug_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; /* Command requires to use the non-controller index */ if (hdev) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; changed = val ? !bt_dbg_get() : bt_dbg_get(); bt_dbg_set(val); memcpy(rp.uuid, debug_uuid, 16); rp.flags = cpu_to_le32(val ? BIT(0) : 0); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_debug_feature_changed(val, sk); return err; } #endif static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len) { struct mgmt_rp_set_exp_feature rp; bool val, changed; int err; u32 flags; /* Command requires to use the controller index */ if (!hdev) return mgmt_cmd_status(sk, MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_INDEX); /* Changes can only be made when controller is powered down */ if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_REJECTED); /* Parameters are limited to a single octet */ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); /* Only boolean on/off is supported */ if (cp->param[0] != 0x00 && cp->param[0] != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_INVALID_PARAMS); val = !!cp->param[0]; if (val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); hci_dev_clear_flag(hdev, HCI_ADVERTISING); /* Enable LL privacy + supported settings changed */ flags = BIT(0) | BIT(1); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); /* Disable LL privacy + supported settings changed */ flags = BIT(1); } memcpy(rp.uuid, rpa_resolution_uuid, 16); rp.flags = cpu_to_le32(flags); hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, &rp, sizeof(rp)); if (changed) exp_ll_privacy_feature_changed(val, hdev, sk); return err; } static const struct mgmt_exp_feature { const u8 *uuid; int (*set_func)(struct sock *sk, struct hci_dev *hdev, struct mgmt_cp_set_exp_feature *cp, u16 data_len); } exp_features[] = { EXP_FEAT(ZERO_KEY, set_zero_key_func), #ifdef CONFIG_BT_FEATURE_DEBUG EXP_FEAT(debug_uuid, set_debug_func), #endif EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), /* end with a null feature */ EXP_FEAT(NULL, NULL) }; static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_set_exp_feature *cp = data; size_t i = 0; bt_dev_dbg(hdev, "sock %p", sk); for (i = 0; exp_features[i].uuid; i++) { if (!memcmp(cp->uuid, exp_features[i].uuid, 16)) return exp_features[i].set_func(sk, hdev, cp, data_len); } return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); } #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1) static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_get_device_flags *cp = data; struct mgmt_rp_get_device_flags rp; struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); u32 current_flags = 0; u8 status = MGMT_STATUS_INVALID_PARAMS; bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", &cp->addr.bdaddr, cp->addr.type); hci_dev_lock(hdev); memset(&rp, 0, sizeof(rp)); if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (!br_params) goto done; current_flags = br_params->current_flags; } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (!params) goto done; current_flags = params->current_flags; } bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; rp.supported_flags = cpu_to_le32(supported_flags); rp.current_flags = cpu_to_le32(current_flags); status = MGMT_STATUS_SUCCESS; done: hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, &rp, sizeof(rp)); } static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u32 supported_flags, u32 current_flags) { struct mgmt_ev_device_flags_changed ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = bdaddr_type; ev.supported_flags = cpu_to_le32(supported_flags); ev.current_flags = cpu_to_le32(current_flags); mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); } static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_flags *cp = data; struct bdaddr_list_with_flags *br_params; struct hci_conn_params *params; u8 status = MGMT_STATUS_INVALID_PARAMS; u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); u32 current_flags = __le32_to_cpu(cp->current_flags); bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", &cp->addr.bdaddr, cp->addr.type, __le32_to_cpu(current_flags)); if ((supported_flags | current_flags) != supported_flags) { bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", current_flags, supported_flags); goto done; } hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (br_params) { br_params->current_flags = current_flags; status = MGMT_STATUS_SUCCESS; } else { bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", &cp->addr.bdaddr, cp->addr.type); } } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, le_addr_type(cp->addr.type)); if (params) { params->current_flags = current_flags; status = MGMT_STATUS_SUCCESS; } else { bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", &cp->addr.bdaddr, le_addr_type(cp->addr.type)); } } done: hci_dev_unlock(hdev); if (status == MGMT_STATUS_SUCCESS) device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, &cp->addr, sizeof(cp->addr)); } static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, u16 handle) { struct mgmt_ev_adv_monitor_added ev; ev.monitor_handle = cpu_to_le16(handle); mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) { struct mgmt_ev_adv_monitor_removed ev; struct mgmt_pending_cmd *cmd; struct sock *sk_skip = NULL; struct mgmt_cp_remove_adv_monitor *cp; cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); if (cmd) { cp = cmd->param; if (cp->monitor_handle) sk_skip = cmd->sk; } ev.monitor_handle = cpu_to_le16(handle); mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); } static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct adv_monitor *monitor = NULL; struct mgmt_rp_read_adv_monitor_features *rp = NULL; int handle, err; size_t rp_size = 0; __u32 supported = 0; __u32 enabled = 0; __u16 num_handles = 0; __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; BT_DBG("request for %s", hdev->name); hci_dev_lock(hdev); if (msft_monitor_supported(hdev)) supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) handles[num_handles++] = monitor->handle; hci_dev_unlock(hdev); rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); rp = kmalloc(rp_size, GFP_KERNEL); if (!rp) return -ENOMEM; /* All supported features are currently enabled */ enabled = supported; rp->supported_features = cpu_to_le32(supported); rp->enabled_features = cpu_to_le32(enabled); rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; rp->num_handles = cpu_to_le16(num_handles); if (num_handles) memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_STATUS_SUCCESS, rp, rp_size); kfree(rp); return err; } int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) { struct mgmt_rp_add_adv_patterns_monitor rp; struct mgmt_pending_cmd *cmd; struct adv_monitor *monitor; int err = 0; hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev); if (!cmd) { cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev); if (!cmd) goto done; } monitor = cmd->user_data; rp.monitor_handle = cpu_to_le16(monitor->handle); if (!status) { mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle); hdev->adv_monitors_cnt++; if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) monitor->state = ADV_MONITOR_STATE_REGISTERED; hci_update_background_scan(hdev); } err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); done: hci_dev_unlock(hdev); bt_dev_dbg(hdev, "add monitor %d complete, status %u", rp.monitor_handle, status); return err; } static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, struct adv_monitor *m, u8 status, void *data, u16 len, u16 op) { struct mgmt_rp_add_adv_patterns_monitor rp; struct mgmt_pending_cmd *cmd; int err; bool pending; hci_dev_lock(hdev); if (status) goto unlock; if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } cmd = mgmt_pending_add(sk, op, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; } cmd->user_data = m; pending = hci_add_adv_monitor(hdev, m, &err); if (err) { if (err == -ENOSPC || err == -ENOMEM) status = MGMT_STATUS_NO_RESOURCES; else if (err == -EINVAL) status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_FAILED; mgmt_pending_remove(cmd); goto unlock; } if (!pending) { mgmt_pending_remove(cmd); rp.monitor_handle = cpu_to_le16(m->handle); mgmt_adv_monitor_added(sk, hdev, m->handle); m->state = ADV_MONITOR_STATE_REGISTERED; hdev->adv_monitors_cnt++; hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } hci_dev_unlock(hdev); return 0; unlock: hci_free_adv_monitor(hdev, m); hci_dev_unlock(hdev); return mgmt_cmd_status(sk, hdev->id, op, status); } static void parse_adv_monitor_rssi(struct adv_monitor *m, struct mgmt_adv_rssi_thresholds *rssi) { if (rssi) { m->rssi.low_threshold = rssi->low_threshold; m->rssi.low_threshold_timeout = __le16_to_cpu(rssi->low_threshold_timeout); m->rssi.high_threshold = rssi->high_threshold; m->rssi.high_threshold_timeout = __le16_to_cpu(rssi->high_threshold_timeout); m->rssi.sampling_period = rssi->sampling_period; } else { /* Default values. These numbers are the least constricting * parameters for MSFT API to work, so it behaves as if there * are no rssi parameter to consider. May need to be changed * if other API are to be supported. */ m->rssi.low_threshold = -127; m->rssi.low_threshold_timeout = 60; m->rssi.high_threshold = -127; m->rssi.high_threshold_timeout = 0; m->rssi.sampling_period = 0; } } static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, struct mgmt_adv_pattern *patterns) { u8 offset = 0, length = 0; struct adv_pattern *p = NULL; int i; for (i = 0; i < pattern_count; i++) { offset = patterns[i].offset; length = patterns[i].length; if (offset >= HCI_MAX_AD_LENGTH || length > HCI_MAX_AD_LENGTH || (offset + length) > HCI_MAX_AD_LENGTH) return MGMT_STATUS_INVALID_PARAMS; p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return MGMT_STATUS_NO_RESOURCES; p->ad_type = patterns[i].ad_type; p->offset = patterns[i].offset; p->length = patterns[i].length; memcpy(p->value, patterns[i].value, p->length); INIT_LIST_HEAD(&p->list); list_add(&p->list, &m->patterns); } return MGMT_STATUS_SUCCESS; } static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_adv_patterns_monitor *cp = data; struct adv_monitor *m = NULL; u8 status = MGMT_STATUS_SUCCESS; size_t expected_size = sizeof(*cp); BT_DBG("request for %s", hdev->name); if (len <= sizeof(*cp)) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); if (len != expected_size) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) { status = MGMT_STATUS_NO_RESOURCES; goto done; } INIT_LIST_HEAD(&m->patterns); parse_adv_monitor_rssi(m, NULL); status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); done: return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, MGMT_OP_ADD_ADV_PATTERNS_MONITOR); } static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data; struct adv_monitor *m = NULL; u8 status = MGMT_STATUS_SUCCESS; size_t expected_size = sizeof(*cp); BT_DBG("request for %s", hdev->name); if (len <= sizeof(*cp)) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); if (len != expected_size) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) { status = MGMT_STATUS_NO_RESOURCES; goto done; } INIT_LIST_HEAD(&m->patterns); parse_adv_monitor_rssi(m, &cp->rssi); status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); done: return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); } int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) { struct mgmt_rp_remove_adv_monitor rp; struct mgmt_cp_remove_adv_monitor *cp; struct mgmt_pending_cmd *cmd; int err = 0; hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); if (!cmd) goto done; cp = cmd->param; rp.monitor_handle = cp->monitor_handle; if (!status) hci_update_background_scan(hdev); err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); done: hci_dev_unlock(hdev); bt_dev_dbg(hdev, "remove monitor %d complete, status %u", rp.monitor_handle, status); return err; } static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_adv_monitor *cp = data; struct mgmt_rp_remove_adv_monitor rp; struct mgmt_pending_cmd *cmd; u16 handle = __le16_to_cpu(cp->monitor_handle); int err, status; bool pending; BT_DBG("request for %s", hdev->name); rp.monitor_handle = cp->monitor_handle; hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { status = MGMT_STATUS_BUSY; goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); if (!cmd) { status = MGMT_STATUS_NO_RESOURCES; goto unlock; } if (handle) pending = hci_remove_single_adv_monitor(hdev, handle, &err); else pending = hci_remove_all_adv_monitor(hdev, &err); if (err) { mgmt_pending_remove(cmd); if (err == -ENOENT) status = MGMT_STATUS_INVALID_INDEX; else status = MGMT_STATUS_FAILED; goto unlock; } /* monitor can be removed without forwarding request to controller */ if (!pending) { mgmt_pending_remove(cmd); hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } hci_dev_unlock(hdev); return 0; unlock: hci_dev_unlock(hdev); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, status); } static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { struct mgmt_rp_read_local_oob_data mgmt_rp; size_t rp_size = sizeof(mgmt_rp); struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status %u", status); cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev); if (!cmd) return; if (status || !skb) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status ? mgmt_status(status) : MGMT_STATUS_FAILED); goto remove; } memset(&mgmt_rp, 0, sizeof(mgmt_rp)); if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_FAILED); goto remove; } memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash)); memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand)); rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256); } else { struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; if (skb->len < sizeof(*rp)) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_FAILED); goto remove; } memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192)); memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192)); memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256)); memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256)); } mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); remove: mgmt_pending_remove(cmd); } static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_POWERED); goto unlock; } if (!lmp_ssp_capable(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_NOT_SUPPORTED); goto unlock; } if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); if (bredr_sc_enabled(hdev)) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); else hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); err = hci_req_run_skb(&req, read_local_oob_data_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_addr_info *addr = data; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(addr->type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, addr, sizeof(*addr)); hci_dev_lock(hdev); if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) { struct mgmt_cp_add_remote_oob_data *cp = data; u8 status; if (cp->addr.type != BDADDR_BREDR) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type, cp->hash, cp->rand, NULL, NULL); if (err < 0) status = MGMT_STATUS_FAILED; else status = MGMT_STATUS_SUCCESS; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { struct mgmt_cp_add_remote_oob_ext_data *cp = data; u8 *rand192, *hash192, *rand256, *hash256; u8 status; if (bdaddr_type_is_le(cp->addr.type)) { /* Enforce zero-valued 192-bit parameters as * long as legacy SMP OOB isn't implemented. */ if (memcmp(cp->rand192, ZERO_KEY, 16) || memcmp(cp->hash192, ZERO_KEY, 16)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, addr, sizeof(*addr)); goto unlock; } rand192 = NULL; hash192 = NULL; } else { /* In case one of the P-192 values is set to zero, * then just disable OOB data for P-192. */ if (!memcmp(cp->rand192, ZERO_KEY, 16) || !memcmp(cp->hash192, ZERO_KEY, 16)) { rand192 = NULL; hash192 = NULL; } else { rand192 = cp->rand192; hash192 = cp->hash192; } } /* In case one of the P-256 values is set to zero, then just * disable OOB data for P-256. */ if (!memcmp(cp->rand256, ZERO_KEY, 16) || !memcmp(cp->hash256, ZERO_KEY, 16)) { rand256 = NULL; hash256 = NULL; } else { rand256 = cp->rand256; hash256 = cp->hash256; } err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type, hash192, rand192, hash256, rand256); if (err < 0) status = MGMT_STATUS_FAILED; else status = MGMT_STATUS_SUCCESS; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); } else { bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes", len); err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS); } unlock: hci_dev_unlock(hdev); return err; } static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_remote_oob_data *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->addr.type != BDADDR_BREDR) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { hci_remote_oob_data_clear(hdev); status = MGMT_STATUS_SUCCESS; goto done; } err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type); if (err < 0) status = MGMT_STATUS_INVALID_PARAMS; else status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); if (!cmd) cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); if (!cmd) cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } hci_dev_unlock(hdev); /* Handle suspend notifier */ if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks)) { bt_dev_dbg(hdev, "Unpaused discovery"); wake_up(&hdev->suspend_wait_q); } } static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, uint8_t *mgmt_status) { switch (type) { case DISCOV_TYPE_LE: *mgmt_status = mgmt_le_support(hdev); if (*mgmt_status) return false; break; case DISCOV_TYPE_INTERLEAVED: *mgmt_status = mgmt_le_support(hdev); if (*mgmt_status) return false; fallthrough; case DISCOV_TYPE_BREDR: *mgmt_status = mgmt_bredr_support(hdev); if (*mgmt_status) return false; break; default: *mgmt_status = MGMT_STATUS_INVALID_PARAMS; return false; } return true; } static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, u16 op, void *data, u16 len) { struct mgmt_cp_start_discovery *cp = data; struct mgmt_pending_cmd *cmd; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_NOT_POWERED, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } if (!discovery_type_is_valid(hdev, cp->type, &status)) { err = mgmt_cmd_complete(sk, hdev->id, op, status, &cp->type, sizeof(cp->type)); goto failed; } /* Can't start discovery when it is paused */ if (hdev->discovery_paused) { err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ hci_discovery_filter_clear(hdev); hdev->discovery.type = cp->type; hdev->discovery.report_invalid_rssi = false; if (op == MGMT_OP_START_LIMITED_DISCOVERY) hdev->discovery.limited = true; else hdev->discovery.limited = false; cmd = mgmt_pending_add(sk, op, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = generic_cmd_complete; hci_discovery_set_state(hdev, DISCOVERY_STARTING); queue_work(hdev->req_workqueue, &hdev->discov_update); err = 0; failed: hci_dev_unlock(hdev); return err; } static int start_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY, data, len); } static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { return start_discovery_internal(sk, hdev, MGMT_OP_START_LIMITED_DISCOVERY, data, len); } static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param, 1); } static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_start_service_discovery *cp = data; struct mgmt_pending_cmd *cmd; const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); u16 uuid_count, expected_len; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_NOT_POWERED, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery.state != DISCOVERY_STOPPED || hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } if (hdev->discovery_paused) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_BUSY, &cp->type, sizeof(cp->type)); goto failed; } uuid_count = __le16_to_cpu(cp->uuid_count); if (uuid_count > max_uuid_count) { bt_dev_err(hdev, "service_discovery: too big uuid_count value %u", uuid_count); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &cp->type, sizeof(cp->type)); goto failed; } expected_len = sizeof(*cp) + uuid_count * 16; if (expected_len != len) { bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes", expected_len, len); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &cp->type, sizeof(cp->type)); goto failed; } if (!discovery_type_is_valid(hdev, cp->type, &status)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, status, &cp->type, sizeof(cp->type)); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } cmd->cmd_complete = service_discovery_cmd_complete; /* Clear the discovery filter first to free any previously * allocated memory for the UUID list. */ hci_discovery_filter_clear(hdev); hdev->discovery.result_filtering = true; hdev->discovery.type = cp->type; hdev->discovery.rssi = cp->rssi; hdev->discovery.uuid_count = uuid_count; if (uuid_count > 0) { hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, GFP_KERNEL); if (!hdev->discovery.uuids) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_SERVICE_DISCOVERY, MGMT_STATUS_FAILED, &cp->type, sizeof(cp->type)); mgmt_pending_remove(cmd); goto failed; } } hci_discovery_set_state(hdev, DISCOVERY_STARTING); queue_work(hdev->req_workqueue, &hdev->discov_update); err = 0; failed: hci_dev_unlock(hdev); return err; } void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); if (cmd) { cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } hci_dev_unlock(hdev); /* Handle suspend notifier */ if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) { bt_dev_dbg(hdev, "Paused discovery"); wake_up(&hdev->suspend_wait_q); } } static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_stop_discovery *mgmt_cp = data; struct mgmt_pending_cmd *cmd; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, MGMT_STATUS_REJECTED, &mgmt_cp->type, sizeof(mgmt_cp->type)); goto unlock; } if (hdev->discovery.type != mgmt_cp->type) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type, sizeof(mgmt_cp->type)); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } cmd->cmd_complete = generic_cmd_complete; hci_discovery_set_state(hdev, DISCOVERY_STOPPING); queue_work(hdev->req_workqueue, &hdev->discov_update); err = 0; unlock: hci_dev_unlock(hdev); return err; } static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_confirm_name *cp = data; struct inquiry_entry *e; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (!hci_discovery_active(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto failed; } e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); if (!e) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto failed; } if (cp->name_known) { e->name_state = NAME_KNOWN; list_del(&e->list); } else { e->name_state = NAME_NEEDED; hci_inquiry_cache_update_resolve(hdev, e); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr, sizeof(cp->addr)); failed: hci_dev_unlock(hdev); return err; } static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_block_device *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr, cp->addr.type); if (err < 0) { status = MGMT_STATUS_FAILED; goto done; } mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr), sk); status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_unblock_device *cp = data; u8 status; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr, cp->addr.type); if (err < 0) { status = MGMT_STATUS_INVALID_PARAMS; goto done; } mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr), sk); status = MGMT_STATUS_SUCCESS; done: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, &cp->addr, sizeof(cp->addr)); hci_dev_unlock(hdev); return err; } static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_device_id *cp = data; struct hci_request req; int err; __u16 source; bt_dev_dbg(hdev, "sock %p", sk); source = __le16_to_cpu(cp->source); if (source > 0x0002) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->devid_source = source; hdev->devid_vendor = __le16_to_cpu(cp->vendor); hdev->devid_product = __le16_to_cpu(cp->product); hdev->devid_version = __le16_to_cpu(cp->version); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0); hci_req_init(&req, hdev); __hci_req_update_eir(&req); hci_req_run(&req, NULL); hci_dev_unlock(hdev); return err; } static void enable_advertising_instance(struct hci_dev *hdev, u8 status, u16 opcode) { bt_dev_dbg(hdev, "status %u", status); } static void set_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct cmd_lookup match = { NULL, hdev }; struct hci_request req; u8 instance; struct adv_info *adv_instance; int err; hci_dev_lock(hdev); if (status) { u8 mgmt_err = mgmt_status(status); mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, cmd_status_rsp, &mgmt_err); goto unlock; } if (hci_dev_test_flag(hdev, HCI_LE_ADV)) hci_dev_set_flag(hdev, HCI_ADVERTISING); else hci_dev_clear_flag(hdev, HCI_ADVERTISING); mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); /* Handle suspend notifier */ if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks)) { bt_dev_dbg(hdev, "Paused advertising"); wake_up(&hdev->suspend_wait_q); } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING, hdev->suspend_tasks)) { bt_dev_dbg(hdev, "Unpaused advertising"); wake_up(&hdev->suspend_wait_q); } /* If "Set Advertising" was just disabled and instance advertising was * set up earlier, then re-enable multi-instance advertising. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || list_empty(&hdev->adv_instances)) goto unlock; instance = hdev->cur_adv_instance; if (!instance) { adv_instance = list_first_entry_or_null(&hdev->adv_instances, struct adv_info, list); if (!adv_instance) goto unlock; instance = adv_instance->instance; } hci_req_init(&req, hdev); err = __hci_req_schedule_adv_instance(&req, instance, true); if (!err) err = hci_req_run(&req, enable_advertising_instance); if (err) bt_dev_err(hdev, "failed to re-configure advertising"); unlock: hci_dev_unlock(hdev); } static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; u8 val, status; int err; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); /* Enabling the experimental LL Privay support disables support for * advertising. */ if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); if (hdev->advertising_paused) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_BUSY); hci_dev_lock(hdev); val = !!cp->val; /* The following conditions are ones which mean that we should * not do any HCI communication but directly send a mgmt * response to user space (after toggling the flag if * necessary). */ if (!hdev_is_powered(hdev) || (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || hci_conn_num(hdev, LE_LINK) > 0 || (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->le_scan_type == LE_SCAN_ACTIVE)) { bool changed; if (cp->val) { hdev->cur_adv_instance = 0x00; changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING); if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING); hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); } err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); goto unlock; } if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) || pending_find(MGMT_OP_SET_LE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); cancel_adv_timeout(hdev); if (val) { /* Switch to instance "0" for the Set Advertising setting. * We cannot use update_[adv|scan_rsp]_data() here as the * HCI_ADVERTISING flag is not yet set. */ hdev->cur_adv_instance = 0x00; if (ext_adv_capable(hdev)) { __hci_req_start_ext_adv(&req, 0x00); } else { __hci_req_update_adv_data(&req, 0x00); __hci_req_update_scan_rsp_data(&req, 0x00); __hci_req_enable_advertising(&req); } } else { __hci_req_disable_advertising(&req); } err = hci_req_run(&req, set_advertising_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int set_static_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_static_address *cp = data; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_NOT_SUPPORTED); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_REJECTED); if (bacmp(&cp->bdaddr, BDADDR_ANY)) { if (!bacmp(&cp->bdaddr, BDADDR_NONE)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); /* Two most significant bits shall be set */ if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); bacpy(&hdev->static_addr, &cp->bdaddr); err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev); if (err < 0) goto unlock; err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int set_scan_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_scan_params *cp = data; __u16 interval, window; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_NOT_SUPPORTED); interval = __le16_to_cpu(cp->interval); if (interval < 0x0004 || interval > 0x4000) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); window = __le16_to_cpu(cp->window); if (window < 0x0004 || window > 0x4000) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); if (window > interval) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); hdev->le_scan_interval = interval; hdev->le_scan_window = window; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0); /* If background scan is running, restart it so new parameters are * loaded. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && hdev->discovery.state == DISCOVERY_STOPPED) { struct hci_request req; hci_req_init(&req, hdev); hci_req_add_le_scan_disable(&req, false); hci_req_add_le_passive_scan(&req); hci_req_run(&req, NULL); } hci_dev_unlock(hdev); return err; } static void fast_connectable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev); if (!cmd) goto unlock; if (status) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, mgmt_status(status)); } else { struct mgmt_mode *cp = cmd->param; if (cp->val) hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE); else hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, cmd->sk); } mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || hdev->hci_ver < BLUETOOTH_VER_1_2) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_NOT_SUPPORTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_BUSY); goto unlock; } if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); new_settings(hdev, sk); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); __hci_req_write_fast_connectable(&req, cp->val); err = hci_req_run(&req, fast_connectable_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); } unlock: hci_dev_unlock(hdev); return err; } static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; bt_dev_dbg(hdev, "status 0x%02x", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_BREDR, hdev); if (!cmd) goto unlock; if (status) { u8 mgmt_err = mgmt_status(status); /* We need to restore the flag if related HCI commands * failed. */ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); } else { send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); new_settings(hdev, cmd->sk); } mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_NOT_SUPPORTED); if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); goto unlock; } if (!hdev_is_powered(hdev)) { if (!cp->val) { hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); hci_dev_clear_flag(hdev, HCI_LINK_SECURITY); hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } hci_dev_change_flag(hdev, HCI_BREDR_ENABLED); err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); if (err < 0) goto unlock; err = new_settings(hdev, sk); goto unlock; } /* Reject disabling when powered on */ if (!cp->val) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); goto unlock; } else { /* When configuring a dual-mode controller to operate * with LE only and using a static address, then switching * BR/EDR back on is not allowed. * * Dual-mode controllers shall operate with the public * address as its identity address for BR/EDR and LE. So * reject the attempt to create an invalid configuration. * * The same restrictions applies when secure connections * has been enabled. For BR/EDR this is a controller feature * while for LE it is a host stack feature. This means that * switching BR/EDR back on when secure connections has been * enabled is not a supported transaction. */ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && (bacmp(&hdev->static_addr, BDADDR_ANY) || hci_dev_test_flag(hdev, HCI_SC_ENABLED))) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_REJECTED); goto unlock; } } if (pending_find(MGMT_OP_SET_BREDR, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, MGMT_STATUS_BUSY); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } /* We need to flip the bit already here so that * hci_req_update_adv_data generates the correct flags. */ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); hci_req_init(&req, hdev); __hci_req_write_fast_connectable(&req, false); __hci_req_update_scan(&req); /* Since only the advertising data flags will change, there * is no need to update the scan response data. */ __hci_req_update_adv_data(&req, hdev->cur_adv_instance); err = hci_req_run(&req, set_bredr_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev); if (!cmd) goto unlock; if (status) { mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status)); goto remove; } cp = cmd->param; switch (cp->val) { case 0x00: hci_dev_clear_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x01: hci_dev_set_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); break; case 0x02: hci_dev_set_flag(hdev, HCI_SC_ENABLED); hci_dev_set_flag(hdev, HCI_SC_ONLY); break; } send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev); new_settings(hdev, cmd->sk); remove: mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; struct mgmt_pending_cmd *cmd; struct hci_request req; u8 val; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_sc_capable(hdev) && !hci_dev_test_flag(hdev, HCI_LE_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_NOT_SUPPORTED); if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && lmp_sc_capable(hdev) && !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_REJECTED); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) || !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { bool changed; if (cp->val) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SC_ENABLED); if (cp->val == 0x02) hci_dev_set_flag(hdev, HCI_SC_ONLY); else hci_dev_clear_flag(hdev, HCI_SC_ONLY); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SC_ENABLED); hci_dev_clear_flag(hdev, HCI_SC_ONLY); } err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); if (err < 0) goto failed; if (changed) err = new_settings(hdev, sk); goto failed; } if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, MGMT_STATUS_BUSY); goto failed; } val = !!cp->val; if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) { err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); goto failed; } cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); if (!cmd) { err = -ENOMEM; goto failed; } hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val); err = hci_req_run(&req, sc_enable_complete); if (err < 0) { mgmt_pending_remove(cmd); goto failed; } failed: hci_dev_unlock(hdev); return err; } static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_mode *cp = data; bool changed, use_changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (cp->val) changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); else changed = hci_dev_test_and_clear_flag(hdev, HCI_KEEP_DEBUG_KEYS); if (cp->val == 0x02) use_changed = !hci_dev_test_and_set_flag(hdev, HCI_USE_DEBUG_KEYS); else use_changed = hci_dev_test_and_clear_flag(hdev, HCI_USE_DEBUG_KEYS); if (hdev_is_powered(hdev) && use_changed && hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), &mode); } err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_set_privacy *cp = cp_data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_NOT_SUPPORTED); if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_INVALID_PARAMS); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, MGMT_STATUS_REJECTED); hci_dev_lock(hdev); /* If user space supports this command it is also expected to * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. */ hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); if (cp->privacy) { changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, true); if (cp->privacy == 0x02) hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); else hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); memset(hdev->irk, 0, sizeof(hdev->irk)); hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); hci_adv_instances_set_rpa_expired(hdev, false); hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); } err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); if (err < 0) goto unlock; if (changed) err = new_settings(hdev, sk); unlock: hci_dev_unlock(hdev); return err; } static bool irk_is_valid(struct mgmt_irk_info *irk) { switch (irk->addr.type) { case BDADDR_LE_PUBLIC: return true; case BDADDR_LE_RANDOM: /* Two most significant bits shall be set */ if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0) return false; return true; } return false; } static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_load_irks *cp = cp_data; const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_irk_info)); u16 irk_count, expected_len; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_NOT_SUPPORTED); irk_count = __le16_to_cpu(cp->irk_count); if (irk_count > max_irk_count) { bt_dev_err(hdev, "load_irks: too big irk_count value %u", irk_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, irks, irk_count); if (expected_len != len) { bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "irk_count %u", irk_count); for (i = 0; i < irk_count; i++) { struct mgmt_irk_info *key = &cp->irks[i]; if (!irk_is_valid(key)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, MGMT_STATUS_INVALID_PARAMS); } hci_dev_lock(hdev); hci_smp_irks_clear(hdev); for (i = 0; i < irk_count; i++) { struct mgmt_irk_info *irk = &cp->irks[i]; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, irk->val)) { bt_dev_warn(hdev, "Skipping blocked IRK for %pMR", &irk->addr.bdaddr); continue; } hci_add_irk(hdev, &irk->addr.bdaddr, le_addr_type(irk->addr.type), irk->val, BDADDR_ANY); } hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static bool ltk_is_valid(struct mgmt_ltk_info *key) { if (key->initiator != 0x00 && key->initiator != 0x01) return false; switch (key->addr.type) { case BDADDR_LE_PUBLIC: return true; case BDADDR_LE_RANDOM: /* Two most significant bits shall be set */ if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0) return false; return true; } return false; } static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, void *cp_data, u16 len) { struct mgmt_cp_load_long_term_keys *cp = cp_data; const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_ltk_info)); u16 key_count, expected_len; int i, err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_NOT_SUPPORTED); key_count = __le16_to_cpu(cp->key_count); if (key_count > max_key_count) { bt_dev_err(hdev, "load_ltks: too big key_count value %u", key_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, keys, key_count); if (expected_len != len) { bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "key_count %u", key_count); hci_dev_lock(hdev); hci_smp_ltks_clear(hdev); for (i = 0; i < key_count; i++) { struct mgmt_ltk_info *key = &cp->keys[i]; u8 type, authenticated; if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, key->val)) { bt_dev_warn(hdev, "Skipping blocked LTK for %pMR", &key->addr.bdaddr); continue; } if (!ltk_is_valid(key)) { bt_dev_warn(hdev, "Invalid LTK for %pMR", &key->addr.bdaddr); continue; } switch (key->type) { case MGMT_LTK_UNAUTHENTICATED: authenticated = 0x00; type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; break; case MGMT_LTK_AUTHENTICATED: authenticated = 0x01; type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; break; case MGMT_LTK_P256_UNAUTH: authenticated = 0x00; type = SMP_LTK_P256; break; case MGMT_LTK_P256_AUTH: authenticated = 0x01; type = SMP_LTK_P256; break; case MGMT_LTK_P256_DEBUG: authenticated = 0x00; type = SMP_LTK_P256_DEBUG; fallthrough; default: continue; } hci_add_ltk(hdev, &key->addr.bdaddr, le_addr_type(key->addr.type), type, authenticated, key->val, key->enc_size, key->ediv, key->rand); } err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, NULL, 0); hci_dev_unlock(hdev); return err; } static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct hci_conn *conn = cmd->user_data; struct mgmt_rp_get_conn_info rp; int err; memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); if (status == MGMT_STATUS_SUCCESS) { rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; rp.max_tx_power = conn->max_tx_power; } else { rp.rssi = HCI_RSSI_INVALID; rp.tx_power = HCI_TX_POWER_INVALID; rp.max_tx_power = HCI_TX_POWER_INVALID; } err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, &rp, sizeof(rp)); hci_conn_drop(conn); hci_conn_put(conn); return err; } static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status, u16 opcode) { struct hci_cp_read_rssi *cp; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; u16 handle; u8 status; bt_dev_dbg(hdev, "status 0x%02x", hci_status); hci_dev_lock(hdev); /* Commands sent in request are either Read RSSI or Read Transmit Power * Level so we check which one was last sent to retrieve connection * handle. Both commands have handle as first parameter so it's safe to * cast data on the same command struct. * * First command sent is always Read RSSI and we fail only if it fails. * In other case we simply override error to indicate success as we * already remembered if TX power value is actually valid. */ cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI); if (!cp) { cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); status = MGMT_STATUS_SUCCESS; } else { status = mgmt_status(hci_status); } if (!cp) { bt_dev_err(hdev, "invalid sent_cmd in conn_info response"); goto unlock; } handle = __le16_to_cpu(cp->handle); conn = hci_conn_hash_lookup_handle(hdev, handle); if (!conn) { bt_dev_err(hdev, "unknown handle (%u) in conn_info response", handle); goto unlock; } cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn); if (!cmd) goto unlock; cmd->cmd_complete(cmd, status); mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_get_conn_info *cp = data; struct mgmt_rp_get_conn_info rp; struct hci_conn *conn; unsigned long conn_info_age; int err = 0; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (!bdaddr_type_is_valid(cp->addr.type)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto unlock; } if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_BUSY, &rp, sizeof(rp)); goto unlock; } /* To avoid client trying to guess when to poll again for information we * calculate conn info age as random value between min/max set in hdev. */ conn_info_age = hdev->conn_info_min_age + prandom_u32_max(hdev->conn_info_max_age - hdev->conn_info_min_age); /* Query controller to refresh cached values if they are too old or were * never read. */ if (time_after(jiffies, conn->conn_info_timestamp + msecs_to_jiffies(conn_info_age)) || !conn->conn_info_timestamp) { struct hci_request req; struct hci_cp_read_tx_power req_txp_cp; struct hci_cp_read_rssi req_rssi_cp; struct mgmt_pending_cmd *cmd; hci_req_init(&req, hdev); req_rssi_cp.handle = cpu_to_le16(conn->handle); hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp), &req_rssi_cp); /* For LE links TX power does not change thus we don't need to * query for it once value is known. */ if (!bdaddr_type_is_le(cp->addr.type) || conn->tx_power == HCI_TX_POWER_INVALID) { req_txp_cp.handle = cpu_to_le16(conn->handle); req_txp_cp.type = 0x00; hci_req_add(&req, HCI_OP_READ_TX_POWER, sizeof(req_txp_cp), &req_txp_cp); } /* Max TX power needs to be read only once per connection */ if (conn->max_tx_power == HCI_TX_POWER_INVALID) { req_txp_cp.handle = cpu_to_le16(conn->handle); req_txp_cp.type = 0x01; hci_req_add(&req, HCI_OP_READ_TX_POWER, sizeof(req_txp_cp), &req_txp_cp); } err = hci_req_run(&req, conn_info_refresh_complete); if (err < 0) goto unlock; cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); cmd->cmd_complete = conn_info_cmd_complete; conn->conn_info_timestamp = jiffies; } else { /* Cache is valid, just reply with values cached in hci_conn */ rp.rssi = conn->rssi; rp.tx_power = conn->tx_power; rp.max_tx_power = conn->max_tx_power; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } unlock: hci_dev_unlock(hdev); return err; } static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) { struct hci_conn *conn = cmd->user_data; struct mgmt_rp_get_clock_info rp; struct hci_dev *hdev; int err; memset(&rp, 0, sizeof(rp)); memcpy(&rp.addr, cmd->param, sizeof(rp.addr)); if (status) goto complete; hdev = hci_dev_get(cmd->index); if (hdev) { rp.local_clock = cpu_to_le32(hdev->clock); hci_dev_put(hdev); } if (conn) { rp.piconet_clock = cpu_to_le32(conn->clock); rp.accuracy = cpu_to_le16(conn->clock_accuracy); } complete: err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, sizeof(rp)); if (conn) { hci_conn_drop(conn); hci_conn_put(conn); } return err; } static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct hci_cp_read_clock *hci_cp; struct mgmt_pending_cmd *cmd; struct hci_conn *conn; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); if (!hci_cp) goto unlock; if (hci_cp->which) { u16 handle = __le16_to_cpu(hci_cp->handle); conn = hci_conn_hash_lookup_handle(hdev, handle); } else { conn = NULL; } cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn); if (!cmd) goto unlock; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_get_clock_info *cp = data; struct mgmt_rp_get_clock_info rp; struct hci_cp_read_clock hci_cp; struct mgmt_pending_cmd *cmd; struct hci_request req; struct hci_conn *conn; int err; bt_dev_dbg(hdev, "sock %p", sk); memset(&rp, 0, sizeof(rp)); bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); rp.addr.type = cp->addr.type; if (cp->addr.type != BDADDR_BREDR) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_INVALID_PARAMS, &rp, sizeof(rp)); hci_dev_lock(hdev); if (!hdev_is_powered(hdev)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp)); goto unlock; } if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); if (!conn || conn->state != BT_CONNECTED) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp)); goto unlock; } } else { conn = NULL; } cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); if (!cmd) { err = -ENOMEM; goto unlock; } cmd->cmd_complete = clock_info_cmd_complete; hci_req_init(&req, hdev); memset(&hci_cp, 0, sizeof(hci_cp)); hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); if (conn) { hci_conn_hold(conn); cmd->user_data = hci_conn_get(conn); hci_cp.handle = cpu_to_le16(conn->handle); hci_cp.which = 0x01; /* Piconet clock */ hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp); } err = hci_req_run(&req, get_clock_info_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) { struct hci_conn *conn; conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); if (!conn) return false; if (conn->dst_type != type) return false; if (conn->state != BT_CONNECTED) return false; return true; } /* This function requires the caller holds hdev->lock */ static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, u8 auto_connect) { struct hci_conn_params *params; params = hci_conn_params_add(hdev, addr, addr_type); if (!params) return -EIO; if (params->auto_connect == auto_connect) return 0; list_del_init(¶ms->action); switch (auto_connect) { case HCI_AUTO_CONN_DISABLED: case HCI_AUTO_CONN_LINK_LOSS: /* If auto connect is being disabled when we're trying to * connect to device, keep connecting. */ if (params->explicit_connect) list_add(¶ms->action, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: if (params->explicit_connect) list_add(¶ms->action, &hdev->pend_le_conns); else list_add(¶ms->action, &hdev->pend_le_reports); break; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: if (!is_connected(hdev, addr, addr_type)) list_add(¶ms->action, &hdev->pend_le_conns); break; } params->auto_connect = auto_connect; bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u", addr, addr_type, auto_connect); return 0; } static void device_added(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type, u8 action) { struct mgmt_ev_device_added ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = type; ev.action = action; mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); } static int add_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; struct hci_conn_params *params; int err; u32 current_flags = 0; bt_dev_dbg(hdev, "sock %p", sk); if (!bdaddr_type_is_valid(cp->addr.type) || !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); hci_dev_lock(hdev); if (cp->addr.type == BDADDR_BREDR) { /* Only incoming connections action is supported for now */ if (cp->action != 0x01) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } err = hci_bdaddr_list_add_with_flags(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type, 0); if (err) goto unlock; hci_req_update_scan(hdev); goto added; } addr_type = le_addr_type(cp->addr.type); if (cp->action == 0x02) auto_conn = HCI_AUTO_CONN_ALWAYS; else if (cp->action == 0x01) auto_conn = HCI_AUTO_CONN_DIRECT; else auto_conn = HCI_AUTO_CONN_REPORT; /* Kernel internally uses conn_params with resolvable private * address, but Add Device allows only identity addresses. * Make sure it is enforced before calling * hci_conn_params_lookup. */ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } /* If the connection parameters don't exist for this device, * they will be created and configured with defaults. */ if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, auto_conn) < 0) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; } else { params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (params) current_flags = params->current_flags; } hci_update_background_scan(hdev); added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, SUPPORTED_DEVICE_FLAGS(), current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); unlock: hci_dev_unlock(hdev); return err; } static void device_removed(struct sock *sk, struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) { struct mgmt_ev_device_removed ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = type; mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); } static int remove_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_remove_device *cp = data; int err; bt_dev_dbg(hdev, "sock %p", sk); hci_dev_lock(hdev); if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { struct hci_conn_params *params; u8 addr_type; if (!bdaddr_type_is_valid(cp->addr.type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } if (cp->addr.type == BDADDR_BREDR) { err = hci_bdaddr_list_del(&hdev->accept_list, &cp->addr.bdaddr, cp->addr.type); if (err) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } hci_req_update_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); goto complete; } addr_type = le_addr_type(cp->addr.type); /* Kernel internally uses conn_params with resolvable private * address, but Remove Device allows only identity addresses. * Make sure it is enforced before calling * hci_conn_params_lookup. */ if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); if (!params) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } if (params->auto_connect == HCI_AUTO_CONN_DISABLED || params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } list_del(¶ms->action); list_del(¶ms->list); kfree(params); hci_update_background_scan(hdev); device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); } else { struct hci_conn_params *p, *tmp; struct bdaddr_list *b, *btmp; if (cp->addr.type) { err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_INVALID_PARAMS, &cp->addr, sizeof(cp->addr)); goto unlock; } list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) { device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type); list_del(&b->list); kfree(b); } hci_req_update_scan(hdev); list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { if (p->auto_connect == HCI_AUTO_CONN_DISABLED) continue; device_removed(sk, hdev, &p->addr, p->addr_type); if (p->explicit_connect) { p->auto_connect = HCI_AUTO_CONN_EXPLICIT; continue; } list_del(&p->action); list_del(&p->list); kfree(p); } bt_dev_dbg(hdev, "All LE connection parameters were removed"); hci_update_background_scan(hdev); } complete: err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr)); unlock: hci_dev_unlock(hdev); return err; } static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_load_conn_param *cp = data; const u16 max_param_count = ((U16_MAX - sizeof(*cp)) / sizeof(struct mgmt_conn_param)); u16 param_count, expected_len; int i; if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_NOT_SUPPORTED); param_count = __le16_to_cpu(cp->param_count); if (param_count > max_param_count) { bt_dev_err(hdev, "load_conn_param: too big param_count value %u", param_count); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_INVALID_PARAMS); } expected_len = struct_size(cp, params, param_count); if (expected_len != len) { bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes", expected_len, len); return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, MGMT_STATUS_INVALID_PARAMS); } bt_dev_dbg(hdev, "param_count %u", param_count); hci_dev_lock(hdev); hci_conn_params_clear_disabled(hdev); for (i = 0; i < param_count; i++) { struct mgmt_conn_param *param = &cp->params[i]; struct hci_conn_params *hci_param; u16 min, max, latency, timeout; u8 addr_type; bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr, param->addr.type); if (param->addr.type == BDADDR_LE_PUBLIC) { addr_type = ADDR_LE_DEV_PUBLIC; } else if (param->addr.type == BDADDR_LE_RANDOM) { addr_type = ADDR_LE_DEV_RANDOM; } else { bt_dev_err(hdev, "ignoring invalid connection parameters"); continue; } min = le16_to_cpu(param->min_interval); max = le16_to_cpu(param->max_interval); latency = le16_to_cpu(param->latency); timeout = le16_to_cpu(param->timeout); bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x", min, max, latency, timeout); if (hci_check_conn_params(min, max, latency, timeout) < 0) { bt_dev_err(hdev, "ignoring invalid connection parameters"); continue; } hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr, addr_type); if (!hci_param) { bt_dev_err(hdev, "failed to add connection parameters"); continue; } hci_param->conn_min_interval = min; hci_param->conn_max_interval = max; hci_param->conn_latency = latency; hci_param->supervision_timeout = timeout; } hci_dev_unlock(hdev); return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0); } static int set_external_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_external_config *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_REJECTED); if (cp->config != 0x00 && cp->config != 0x01) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_INVALID_PARAMS); if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (cp->config) changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED); else changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED); err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); if (err < 0) goto unlock; if (!changed) goto unlock; err = new_options(hdev, sk); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) { mgmt_index_removed(hdev); if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) { hci_dev_set_flag(hdev, HCI_CONFIG); hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } else { set_bit(HCI_RAW, &hdev->flags); mgmt_index_added(hdev); } } unlock: hci_dev_unlock(hdev); return err; } static int set_public_address(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { struct mgmt_cp_set_public_address *cp = data; bool changed; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_REJECTED); if (!bacmp(&cp->bdaddr, BDADDR_ANY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_INVALID_PARAMS); if (!hdev->set_bdaddr) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); changed = !!bacmp(&hdev->public_addr, &cp->bdaddr); bacpy(&hdev->public_addr, &cp->bdaddr); err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev); if (err < 0) goto unlock; if (!changed) goto unlock; if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) err = new_options(hdev, sk); if (is_configured(hdev)) { mgmt_index_removed(hdev); hci_dev_clear_flag(hdev, HCI_UNCONFIGURED); hci_dev_set_flag(hdev, HCI_CONFIG); hci_dev_set_flag(hdev, HCI_AUTO_OFF); queue_work(hdev->req_workqueue, &hdev->power_on); } unlock: hci_dev_unlock(hdev); return err; } static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp; struct mgmt_rp_read_local_oob_ext_data *mgmt_rp; u8 *h192, *r192, *h256, *r256; struct mgmt_pending_cmd *cmd; u16 eir_len; int err; bt_dev_dbg(hdev, "status %u", status); cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev); if (!cmd) return; mgmt_cp = cmd->param; if (status) { status = mgmt_status(status); eir_len = 0; h192 = NULL; r192 = NULL; h256 = NULL; r256 = NULL; } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) { struct hci_rp_read_local_oob_data *rp; if (skb->len != sizeof(*rp)) { status = MGMT_STATUS_FAILED; eir_len = 0; } else { status = MGMT_STATUS_SUCCESS; rp = (void *)skb->data; eir_len = 5 + 18 + 18; h192 = rp->hash; r192 = rp->rand; h256 = NULL; r256 = NULL; } } else { struct hci_rp_read_local_oob_ext_data *rp; if (skb->len != sizeof(*rp)) { status = MGMT_STATUS_FAILED; eir_len = 0; } else { status = MGMT_STATUS_SUCCESS; rp = (void *)skb->data; if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { eir_len = 5 + 18 + 18; h192 = NULL; r192 = NULL; } else { eir_len = 5 + 18 + 18 + 18 + 18; h192 = rp->hash192; r192 = rp->rand192; } h256 = rp->hash256; r256 = rp->rand256; } } mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL); if (!mgmt_rp) goto done; if (eir_len == 0) goto send_rsp; eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV, hdev->dev_class, 3); if (h192 && r192) { eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_HASH_C192, h192, 16); eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_RAND_R192, r192, 16); } if (h256 && r256) { eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_HASH_C256, h256, 16); eir_len = eir_append_data(mgmt_rp->eir, eir_len, EIR_SSP_RAND_R256, r256, 16); } send_rsp: mgmt_rp->type = mgmt_cp->type; mgmt_rp->eir_len = cpu_to_le16(eir_len); err = mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status, mgmt_rp, sizeof(*mgmt_rp) + eir_len); if (err < 0 || status) goto done; hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS); err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, mgmt_rp, sizeof(*mgmt_rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); done: kfree(mgmt_rp); mgmt_pending_remove(cmd); } static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, struct mgmt_cp_read_local_oob_ext_data *cp) { struct mgmt_pending_cmd *cmd; struct hci_request req; int err; cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, cp, sizeof(*cp)); if (!cmd) return -ENOMEM; hci_req_init(&req, hdev); if (bredr_sc_enabled(hdev)) hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL); else hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL); err = hci_req_run_skb(&req, read_local_oob_ext_data_complete); if (err < 0) { mgmt_pending_remove(cmd); return err; } return 0; } static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_read_local_oob_ext_data *cp = data; struct mgmt_rp_read_local_oob_ext_data *rp; size_t rp_len; u16 eir_len; u8 status, flags, role, addr[7], hash[16], rand[16]; int err; bt_dev_dbg(hdev, "sock %p", sk); if (hdev_is_powered(hdev)) { switch (cp->type) { case BIT(BDADDR_BREDR): status = mgmt_bredr_support(hdev); if (status) eir_len = 0; else eir_len = 5; break; case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): status = mgmt_le_support(hdev); if (status) eir_len = 0; else eir_len = 9 + 3 + 18 + 18 + 3; break; default: status = MGMT_STATUS_INVALID_PARAMS; eir_len = 0; break; } } else { status = MGMT_STATUS_NOT_POWERED; eir_len = 0; } rp_len = sizeof(*rp) + eir_len; rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) return -ENOMEM; if (status) goto complete; hci_dev_lock(hdev); eir_len = 0; switch (cp->type) { case BIT(BDADDR_BREDR): if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { err = read_local_ssp_oob_req(hdev, sk, cp); hci_dev_unlock(hdev); if (!err) goto done; status = MGMT_STATUS_FAILED; goto complete; } else { eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV, hdev->dev_class, 3); } break; case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && smp_generate_oob(hdev, hash, rand) < 0) { hci_dev_unlock(hdev); status = MGMT_STATUS_FAILED; goto complete; } /* This should return the active RPA, but since the RPA * is only programmed on demand, it is really hard to fill * this in at the moment. For now disallow retrieving * local out-of-band data when privacy is in use. * * Returning the identity address will not help here since * pairing happens before the identity resolving key is * known and thus the connection establishment happens * based on the RPA and not the identity address. */ if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { hci_dev_unlock(hdev); status = MGMT_STATUS_REJECTED; goto complete; } if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || !bacmp(&hdev->bdaddr, BDADDR_ANY) || (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && bacmp(&hdev->static_addr, BDADDR_ANY))) { memcpy(addr, &hdev->static_addr, 6); addr[6] = 0x01; } else { memcpy(addr, &hdev->bdaddr, 6); addr[6] = 0x00; } eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR, addr, sizeof(addr)); if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) role = 0x02; else role = 0x01; eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE, &role, sizeof(role)); if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) { eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_CONFIRM, hash, sizeof(hash)); eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_SC_RANDOM, rand, sizeof(rand)); } flags = mgmt_get_adv_discov_flags(hdev); if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) flags |= LE_AD_NO_BREDR; eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS, &flags, sizeof(flags)); break; } hci_dev_unlock(hdev); hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); status = MGMT_STATUS_SUCCESS; complete: rp->type = cp->type; rp->eir_len = cpu_to_le16(eir_len); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status, rp, sizeof(*rp) + eir_len); if (err < 0 || status) goto done; err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, rp, sizeof(*rp) + eir_len, HCI_MGMT_OOB_DATA_EVENTS, sk); done: kfree(rp); return err; } static u32 get_supported_adv_flags(struct hci_dev *hdev) { u32 flags = 0; flags |= MGMT_ADV_FLAG_CONNECTABLE; flags |= MGMT_ADV_FLAG_DISCOV; flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; flags |= MGMT_ADV_FLAG_MANAGED_FLAGS; flags |= MGMT_ADV_FLAG_APPEARANCE; flags |= MGMT_ADV_FLAG_LOCAL_NAME; flags |= MGMT_ADV_PARAM_DURATION; flags |= MGMT_ADV_PARAM_TIMEOUT; flags |= MGMT_ADV_PARAM_INTERVALS; flags |= MGMT_ADV_PARAM_TX_POWER; flags |= MGMT_ADV_PARAM_SCAN_RSP; /* In extended adv TX_POWER returned from Set Adv Param * will be always valid. */ if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) || ext_adv_capable(hdev)) flags |= MGMT_ADV_FLAG_TX_POWER; if (ext_adv_capable(hdev)) { flags |= MGMT_ADV_FLAG_SEC_1M; flags |= MGMT_ADV_FLAG_HW_OFFLOAD; flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER; if (hdev->le_features[1] & HCI_LE_PHY_2M) flags |= MGMT_ADV_FLAG_SEC_2M; if (hdev->le_features[1] & HCI_LE_PHY_CODED) flags |= MGMT_ADV_FLAG_SEC_CODED; } return flags; } static int read_adv_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_rp_read_adv_features *rp; size_t rp_len; int err; struct adv_info *adv_instance; u32 supported_flags; u8 *instance; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); /* Enabling the experimental LL Privay support disables support for * advertising. */ if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; rp = kmalloc(rp_len, GFP_ATOMIC); if (!rp) { hci_dev_unlock(hdev); return -ENOMEM; } supported_flags = get_supported_adv_flags(hdev); rp->supported_flags = cpu_to_le32(supported_flags); rp->max_adv_data_len = HCI_MAX_AD_LENGTH; rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH; rp->max_instances = hdev->le_num_of_adv_sets; rp->num_instances = hdev->adv_instance_cnt; instance = rp->instance; list_for_each_entry(adv_instance, &hdev->adv_instances, list) { *instance = adv_instance->instance; instance++; } hci_dev_unlock(hdev); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_SUCCESS, rp, rp_len); kfree(rp); return err; } static u8 calculate_name_len(struct hci_dev *hdev) { u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; return append_local_name(hdev, buf, 0); } static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, bool is_adv_data) { u8 max_len = HCI_MAX_AD_LENGTH; if (is_adv_data) { if (adv_flags & (MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV | MGMT_ADV_FLAG_MANAGED_FLAGS)) max_len -= 3; if (adv_flags & MGMT_ADV_FLAG_TX_POWER) max_len -= 3; } else { if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) max_len -= calculate_name_len(hdev); if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) max_len -= 4; } return max_len; } static bool flags_managed(u32 adv_flags) { return adv_flags & (MGMT_ADV_FLAG_DISCOV | MGMT_ADV_FLAG_LIMITED_DISCOV | MGMT_ADV_FLAG_MANAGED_FLAGS); } static bool tx_power_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_TX_POWER; } static bool name_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME; } static bool appearance_managed(u32 adv_flags) { return adv_flags & MGMT_ADV_FLAG_APPEARANCE; } static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, u8 len, bool is_adv_data) { int i, cur_len; u8 max_len; max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data); if (len > max_len) return false; /* Make sure that the data is correctly formatted. */ for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) { cur_len = data[i]; if (!cur_len) continue; if (data[i + 1] == EIR_FLAGS && (!is_adv_data || flags_managed(adv_flags))) return false; if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags)) return false; if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags)) return false; if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags)) return false; if (data[i + 1] == EIR_APPEARANCE && appearance_managed(adv_flags)) return false; /* If the current field length would exceed the total data * length, then it's invalid. */ if (i + cur_len >= len) return false; } return true; } static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags) { u32 supported_flags, phy_flags; /* The current implementation only supports a subset of the specified * flags. Also need to check mutual exclusiveness of sec flags. */ supported_flags = get_supported_adv_flags(hdev); phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK; if (adv_flags & ~supported_flags || ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) return false; return true; } static bool adv_busy(struct hci_dev *hdev) { return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || pending_find(MGMT_OP_SET_LE, hdev) || pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) || pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev)); } static void add_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; struct mgmt_cp_add_advertising *cp; struct mgmt_rp_add_advertising rp; struct adv_info *adv_instance, *n; u8 instance; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev); if (!cmd) cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev); list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { if (!adv_instance->pending) continue; if (!status) { adv_instance->pending = false; continue; } instance = adv_instance->instance; if (hdev->cur_adv_instance == instance) cancel_adv_timeout(hdev); hci_remove_adv_instance(hdev, instance); mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance); } if (!cmd) goto unlock; cp = cmd->param; rp.instance = cp->instance; if (status) mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status)); else mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int add_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_advertising *cp = data; struct mgmt_rp_add_advertising rp; u32 flags; u8 status; u16 timeout, duration; unsigned int prev_instance_cnt = hdev->adv_instance_cnt; u8 schedule_instance = 0; struct adv_info *next_instance; int err; struct mgmt_pending_cmd *cmd; struct hci_request req; bt_dev_dbg(hdev, "sock %p", sk); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); /* Enabling the experimental LL Privay support disables support for * advertising. */ if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_NOT_SUPPORTED); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); timeout = __le16_to_cpu(cp->timeout); duration = __le16_to_cpu(cp->duration); if (!requested_adv_flags_are_valid(hdev, flags)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); if (timeout && !hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_REJECTED); goto unlock; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) || !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len, cp->scan_rsp_len, false)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } err = hci_add_adv_instance(hdev, cp->instance, flags, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len, timeout, duration, HCI_ADV_TX_POWER_NO_PREFERENCE, hdev->le_adv_min_interval, hdev->le_adv_max_interval); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); goto unlock; } /* Only trigger an advertising added event if a new instance was * actually added. */ if (hdev->adv_instance_cnt > prev_instance_cnt) mgmt_advertising_added(sk, hdev, cp->instance); if (hdev->cur_adv_instance == cp->instance) { /* If the currently advertised instance is being changed then * cancel the current advertising and schedule the next * instance. If there is only one instance then the overridden * advertising data will be visible right away. */ cancel_adv_timeout(hdev); next_instance = hci_get_next_instance(hdev, cp->instance); if (next_instance) schedule_instance = next_instance->instance; } else if (!hdev->adv_instance_timeout) { /* Immediately advertise the new instance if no other * instance is currently being advertised. */ schedule_instance = cp->instance; } /* If the HCI_ADVERTISING flag is set or the device isn't powered or * there is no instance to be advertised then we have no HCI * communication to make. Simply return. */ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { rp.instance = cp->instance; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); goto unlock; } /* We're good to go, update advertising data, parameters, and start * advertising. */ cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } hci_req_init(&req, hdev); err = __hci_req_schedule_adv_instance(&req, schedule_instance, true); if (!err) err = hci_req_run(&req, add_advertising_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); } unlock: hci_dev_unlock(hdev); return err; } static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; struct mgmt_cp_add_ext_adv_params *cp; struct mgmt_rp_add_ext_adv_params rp; struct adv_info *adv_instance; u32 flags; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev); if (!cmd) goto unlock; cp = cmd->param; adv_instance = hci_find_adv_instance(hdev, cp->instance); if (!adv_instance) goto unlock; rp.instance = cp->instance; rp.tx_power = adv_instance->tx_power; /* While we're at it, inform userspace of the available space for this * advertisement, given the flags that will be used. */ flags = __le32_to_cpu(cp->flags); rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); if (status) { /* If this advertisement was previously advertising and we * failed to update it, we signal that it has been removed and * delete its structure */ if (!adv_instance->pending) mgmt_advertising_removed(cmd->sk, hdev, cp->instance); hci_remove_adv_instance(hdev, cp->instance); mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status)); } else { mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status), &rp, sizeof(rp)); } unlock: if (cmd) mgmt_pending_remove(cmd); hci_dev_unlock(hdev); } static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_params *cp = data; struct mgmt_rp_add_ext_adv_params rp; struct mgmt_pending_cmd *cmd = NULL; struct adv_info *adv_instance; struct hci_request req; u32 flags, min_interval, max_interval; u16 timeout, duration; u8 status; s8 tx_power; int err; BT_DBG("%s", hdev->name); status = mgmt_le_support(hdev); if (status) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, status); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); /* The purpose of breaking add_advertising into two separate MGMT calls * for params and data is to allow more parameters to be added to this * structure in the future. For this reason, we verify that we have the * bare minimum structure we know of when the interface was defined. Any * extra parameters we don't know about will be ignored in this request. */ if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); if (!requested_adv_flags_are_valid(hdev, flags)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_INVALID_PARAMS); hci_dev_lock(hdev); /* In new interface, we require that we are powered to register */ if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_REJECTED); goto unlock; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_BUSY); goto unlock; } /* Parse defined parameters from request, use defaults otherwise */ timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ? __le16_to_cpu(cp->timeout) : 0; duration = (flags & MGMT_ADV_PARAM_DURATION) ? __le16_to_cpu(cp->duration) : hdev->def_multi_adv_rotation_duration; min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? __le32_to_cpu(cp->min_interval) : hdev->le_adv_min_interval; max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? __le32_to_cpu(cp->max_interval) : hdev->le_adv_max_interval; tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ? cp->tx_power : HCI_ADV_TX_POWER_NO_PREFERENCE; /* Create advertising instance with no advertising or response data */ err = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL, timeout, duration, tx_power, min_interval, max_interval); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_FAILED); goto unlock; } /* Submit request for advertising params if ext adv available */ if (ext_adv_capable(hdev)) { hci_req_init(&req, hdev); adv_instance = hci_find_adv_instance(hdev, cp->instance); /* Updating parameters of an active instance will return a * Command Disallowed error, so we must first disable the * instance if it is active. */ if (!adv_instance->pending) __hci_req_disable_ext_adv_instance(&req, cp->instance); __hci_req_setup_ext_adv_instance(&req, cp->instance); err = hci_req_run(&req, add_ext_adv_params_complete); if (!err) cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev, data, data_len); if (!cmd) { err = -ENOMEM; hci_remove_adv_instance(hdev, cp->instance); goto unlock; } } else { rp.instance = cp->instance; rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); } unlock: hci_dev_unlock(hdev); return err; } static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_add_ext_adv_data *cp = data; struct mgmt_rp_add_ext_adv_data rp; u8 schedule_instance = 0; struct adv_info *next_instance; struct adv_info *adv_instance; int err = 0; struct mgmt_pending_cmd *cmd; struct hci_request req; BT_DBG("%s", hdev->name); hci_dev_lock(hdev); adv_instance = hci_find_adv_instance(hdev, cp->instance); if (!adv_instance) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_INVALID_PARAMS); goto unlock; } /* In new interface, we require that we are powered to register */ if (!hdev_is_powered(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_REJECTED); goto clear_new_instance; } if (adv_busy(hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_BUSY); goto clear_new_instance; } /* Validate new data */ if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data, cp->adv_data_len, true) || !tlv_data_is_valid(hdev, adv_instance->flags, cp->data + cp->adv_data_len, cp->scan_rsp_len, false)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_INVALID_PARAMS); goto clear_new_instance; } /* Set the data in the advertising instance */ hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len, cp->data, cp->scan_rsp_len, cp->data + cp->adv_data_len); /* We're good to go, update advertising data, parameters, and start * advertising. */ hci_req_init(&req, hdev); hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL); if (ext_adv_capable(hdev)) { __hci_req_update_adv_data(&req, cp->instance); __hci_req_update_scan_rsp_data(&req, cp->instance); __hci_req_enable_ext_advertising(&req, cp->instance); } else { /* If using software rotation, determine next instance to use */ if (hdev->cur_adv_instance == cp->instance) { /* If the currently advertised instance is being changed * then cancel the current advertising and schedule the * next instance. If there is only one instance then the * overridden advertising data will be visible right * away */ cancel_adv_timeout(hdev); next_instance = hci_get_next_instance(hdev, cp->instance); if (next_instance) schedule_instance = next_instance->instance; } else if (!hdev->adv_instance_timeout) { /* Immediately advertise the new instance if no other * instance is currently being advertised. */ schedule_instance = cp->instance; } /* If the HCI_ADVERTISING flag is set or there is no instance to * be advertised then we have no HCI communication to make. * Simply return. */ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { if (adv_instance->pending) { mgmt_advertising_added(sk, hdev, cp->instance); adv_instance->pending = false; } rp.instance = cp->instance; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); goto unlock; } err = __hci_req_schedule_adv_instance(&req, schedule_instance, true); } cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto clear_new_instance; } if (!err) err = hci_req_run(&req, add_advertising_complete); if (err < 0) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, MGMT_STATUS_FAILED); mgmt_pending_remove(cmd); goto clear_new_instance; } /* We were successful in updating data, so trigger advertising_added * event if this is an instance that wasn't previously advertising. If * a failure occurs in the requests we initiated, we will remove the * instance again in add_advertising_complete */ if (adv_instance->pending) mgmt_advertising_added(sk, hdev, cp->instance); goto unlock; clear_new_instance: hci_remove_adv_instance(hdev, cp->instance); unlock: hci_dev_unlock(hdev); return err; } static void remove_advertising_complete(struct hci_dev *hdev, u8 status, u16 opcode) { struct mgmt_pending_cmd *cmd; struct mgmt_cp_remove_advertising *cp; struct mgmt_rp_remove_advertising rp; bt_dev_dbg(hdev, "status %u", status); hci_dev_lock(hdev); /* A failure status here only means that we failed to disable * advertising. Otherwise, the advertising instance has been removed, * so report success. */ cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev); if (!cmd) goto unlock; cp = cmd->param; rp.instance = cp->instance; mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); } static int remove_advertising(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_remove_advertising *cp = data; struct mgmt_rp_remove_advertising rp; struct mgmt_pending_cmd *cmd; struct hci_request req; int err; bt_dev_dbg(hdev, "sock %p", sk); /* Enabling the experimental LL Privay support disables support for * advertising. */ if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_NOT_SUPPORTED); hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) || pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) || pending_find(MGMT_OP_SET_LE, hdev)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_BUSY); goto unlock; } if (list_empty(&hdev->adv_instances)) { err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); goto unlock; } hci_req_init(&req, hdev); /* If we use extended advertising, instance is disabled and removed */ if (ext_adv_capable(hdev)) { __hci_req_disable_ext_adv_instance(&req, cp->instance); __hci_req_remove_ext_adv_instance(&req, cp->instance); } hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true); if (list_empty(&hdev->adv_instances)) __hci_req_disable_advertising(&req); /* If no HCI commands have been collected so far or the HCI_ADVERTISING * flag is set or the device isn't powered then we have no HCI * communication to make. Simply return. */ if (skb_queue_empty(&req.cmd_q) || !hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) { hci_req_purge(&req); rp.instance = cp->instance; err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); goto unlock; } cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, data_len); if (!cmd) { err = -ENOMEM; goto unlock; } err = hci_req_run(&req, remove_advertising_complete); if (err < 0) mgmt_pending_remove(cmd); unlock: hci_dev_unlock(hdev); return err; } static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { struct mgmt_cp_get_adv_size_info *cp = data; struct mgmt_rp_get_adv_size_info rp; u32 flags, supported_flags; int err; bt_dev_dbg(hdev, "sock %p", sk); if (!lmp_le_capable(hdev)) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_REJECTED); if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_INVALID_PARAMS); flags = __le32_to_cpu(cp->flags); /* The current implementation only supports a subset of the specified * flags. */ supported_flags = get_supported_adv_flags(hdev); if (flags & ~supported_flags) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_INVALID_PARAMS); rp.instance = cp->instance; rp.flags = cp->flags; rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); return err; } static const struct hci_mgmt_handler mgmt_handlers[] = { { NULL }, /* 0x0000 (no command) */ { read_version, MGMT_READ_VERSION_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_commands, MGMT_READ_COMMANDS_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_index_list, MGMT_READ_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_controller_info, MGMT_READ_INFO_SIZE, HCI_MGMT_UNTRUSTED }, { set_powered, MGMT_SETTING_SIZE }, { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE }, { set_connectable, MGMT_SETTING_SIZE }, { set_fast_connectable, MGMT_SETTING_SIZE }, { set_bondable, MGMT_SETTING_SIZE }, { set_link_security, MGMT_SETTING_SIZE }, { set_ssp, MGMT_SETTING_SIZE }, { set_hs, MGMT_SETTING_SIZE }, { set_le, MGMT_SETTING_SIZE }, { set_dev_class, MGMT_SET_DEV_CLASS_SIZE }, { set_local_name, MGMT_SET_LOCAL_NAME_SIZE }, { add_uuid, MGMT_ADD_UUID_SIZE }, { remove_uuid, MGMT_REMOVE_UUID_SIZE }, { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { disconnect, MGMT_DISCONNECT_SIZE }, { get_connections, MGMT_GET_CONNECTIONS_SIZE }, { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE }, { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE }, { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE }, { pair_device, MGMT_PAIR_DEVICE_SIZE }, { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE }, { unpair_device, MGMT_UNPAIR_DEVICE_SIZE }, { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE }, { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE }, { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE }, { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE, HCI_MGMT_VAR_LEN }, { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, { start_discovery, MGMT_START_DISCOVERY_SIZE }, { stop_discovery, MGMT_STOP_DISCOVERY_SIZE }, { confirm_name, MGMT_CONFIRM_NAME_SIZE }, { block_device, MGMT_BLOCK_DEVICE_SIZE }, { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE }, { set_device_id, MGMT_SET_DEVICE_ID_SIZE }, { set_advertising, MGMT_SETTING_SIZE }, { set_bredr, MGMT_SETTING_SIZE }, { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE }, { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE }, { set_secure_conn, MGMT_SETTING_SIZE }, { set_debug_keys, MGMT_SETTING_SIZE }, { set_privacy, MGMT_SET_PRIVACY_SIZE }, { load_irks, MGMT_LOAD_IRKS_SIZE, HCI_MGMT_VAR_LEN }, { get_conn_info, MGMT_GET_CONN_INFO_SIZE }, { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE }, { add_device, MGMT_ADD_DEVICE_SIZE }, { remove_device, MGMT_REMOVE_DEVICE_SIZE }, { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE, HCI_MGMT_VAR_LEN }, { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_config_info, MGMT_READ_CONFIG_INFO_SIZE, HCI_MGMT_UNCONFIGURED | HCI_MGMT_UNTRUSTED }, { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE, HCI_MGMT_UNCONFIGURED }, { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE, HCI_MGMT_UNCONFIGURED }, { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE, HCI_MGMT_VAR_LEN }, { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE }, { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE, HCI_MGMT_NO_HDEV | HCI_MGMT_UNTRUSTED }, { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE }, { add_advertising, MGMT_ADD_ADVERTISING_SIZE, HCI_MGMT_VAR_LEN }, { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE }, { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE }, { start_limited_discovery, MGMT_START_DISCOVERY_SIZE }, { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, HCI_MGMT_UNTRUSTED }, { set_appearance, MGMT_SET_APPEARANCE_SIZE }, { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE, HCI_MGMT_VAR_LEN }, { set_wideband_speech, MGMT_SETTING_SIZE }, { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE, HCI_MGMT_UNTRUSTED }, { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE, HCI_MGMT_UNTRUSTED | HCI_MGMT_HDEV_OPTIONAL }, { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE, HCI_MGMT_VAR_LEN | HCI_MGMT_HDEV_OPTIONAL }, { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE, HCI_MGMT_UNTRUSTED }, { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_UNTRUSTED }, { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE, HCI_MGMT_VAR_LEN }, { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE, HCI_MGMT_VAR_LEN }, { add_adv_patterns_monitor_rssi, MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; switch (hdev->dev_type) { case HCI_PRIMARY: if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); ev.type = 0x01; } else { mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, HCI_MGMT_INDEX_EVENTS); ev.type = 0x00; } break; case HCI_AMP: ev.type = 0x02; break; default: return; } ev.bus = hdev->bus; mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); } void mgmt_index_removed(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; u8 status = MGMT_STATUS_INVALID_INDEX; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; switch (hdev->dev_type) { case HCI_PRIMARY: mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); ev.type = 0x01; } else { mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, HCI_MGMT_INDEX_EVENTS); ev.type = 0x00; } break; case HCI_AMP: ev.type = 0x02; break; default: return; } ev.bus = hdev->bus; mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), HCI_MGMT_EXT_INDEX_EVENTS); } /* This function requires the caller holds hdev->lock */ static void restart_le_actions(struct hci_dev *hdev) { struct hci_conn_params *p; list_for_each_entry(p, &hdev->le_conn_params, list) { /* Needed for AUTO_OFF case where might not "really" * have been powered off. */ list_del_init(&p->action); switch (p->auto_connect) { case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: list_add(&p->action, &hdev->pend_le_conns); break; case HCI_AUTO_CONN_REPORT: list_add(&p->action, &hdev->pend_le_reports); break; default: break; } } } void mgmt_power_on(struct hci_dev *hdev, int err) { struct cmd_lookup match = { NULL, hdev }; bt_dev_dbg(hdev, "err %d", err); hci_dev_lock(hdev); if (!err) { restart_le_actions(hdev); hci_update_background_scan(hdev); } mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); hci_dev_unlock(hdev); } void __mgmt_power_off(struct hci_dev *hdev) { struct cmd_lookup match = { NULL, hdev }; u8 status, zero_cod[] = { 0, 0, 0 }; mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); /* If the power off is because of hdev unregistration let * use the appropriate INVALID_INDEX status. Otherwise use * NOT_POWERED. We cover both scenarios here since later in * mgmt_index_removed() any hci_conn callbacks will have already * been triggered, potentially causing misleading DISCONNECTED * status responses. */ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) status = MGMT_STATUS_INVALID_INDEX; else status = MGMT_STATUS_NOT_POWERED; mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, zero_cod, sizeof(zero_cod), HCI_MGMT_DEV_CLASS_EVENTS, NULL); ext_info_changed(hdev, NULL); } new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); } void mgmt_set_powered_failed(struct hci_dev *hdev, int err) { struct mgmt_pending_cmd *cmd; u8 status; cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return; if (err == -ERFKILL) status = MGMT_STATUS_RFKILLED; else status = MGMT_STATUS_FAILED; mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); mgmt_pending_remove(cmd); } void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) { struct mgmt_ev_new_link_key ev; memset(&ev, 0, sizeof(ev)); ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); ev.key.pin_len = key->pin_len; mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); } static u8 mgmt_ltk_type(struct smp_ltk *ltk) { switch (ltk->type) { case SMP_LTK: case SMP_LTK_RESPONDER: if (ltk->authenticated) return MGMT_LTK_AUTHENTICATED; return MGMT_LTK_UNAUTHENTICATED; case SMP_LTK_P256: if (ltk->authenticated) return MGMT_LTK_P256_AUTH; return MGMT_LTK_P256_UNAUTH; case SMP_LTK_P256_DEBUG: return MGMT_LTK_P256_DEBUG; } return MGMT_LTK_UNAUTHENTICATED; } void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) { struct mgmt_ev_new_long_term_key ev; memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses * without providing an identity resolving key don't require * to store long term keys. Their addresses will change the * next time around. * * Only when a remote device provides an identity address * make sure the long term key is stored. If the remote * identity is known, the long term keys are internally * mapped to the identity address. So allow static random * and public addresses here. */ if (key->bdaddr_type == ADDR_LE_DEV_RANDOM && (key->bdaddr.b[5] & 0xc0) != 0xc0) ev.store_hint = 0x00; else ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type); ev.key.type = mgmt_ltk_type(key); ev.key.enc_size = key->enc_size; ev.key.ediv = key->ediv; ev.key.rand = key->rand; if (key->type == SMP_LTK) ev.key.initiator = 1; /* Make sure we copy only the significant bytes based on the * encryption key size, and set the rest of the value to zeroes. */ memcpy(ev.key.val, key->val, key->enc_size); memset(ev.key.val + key->enc_size, 0, sizeof(ev.key.val) - key->enc_size); mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent) { struct mgmt_ev_new_irk ev; memset(&ev, 0, sizeof(ev)); ev.store_hint = persistent; bacpy(&ev.rpa, &irk->rpa); bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type); memcpy(ev.irk.val, irk->val, sizeof(irk->val)); mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, bool persistent) { struct mgmt_ev_new_csrk ev; memset(&ev, 0, sizeof(ev)); /* Devices using resolvable or non-resolvable random addresses * without providing an identity resolving key don't require * to store signature resolving keys. Their addresses will change * the next time around. * * Only when a remote device provides an identity address * make sure the signature resolving key is stored. So allow * static random and public addresses here. */ if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM && (csrk->bdaddr.b[5] & 0xc0) != 0xc0) ev.store_hint = 0x00; else ev.store_hint = persistent; bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type); ev.key.type = csrk->type; memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); } void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type, u8 store_hint, u16 min_interval, u16 max_interval, u16 latency, u16 timeout) { struct mgmt_ev_new_conn_param ev; if (!hci_is_identity_address(bdaddr, bdaddr_type)) return; memset(&ev, 0, sizeof(ev)); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type); ev.store_hint = store_hint; ev.min_interval = cpu_to_le16(min_interval); ev.max_interval = cpu_to_le16(max_interval); ev.latency = cpu_to_le16(latency); ev.timeout = cpu_to_le16(timeout); mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL); } void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, u8 *name, u8 name_len) { char buf[512]; struct mgmt_ev_device_connected *ev = (void *) buf; u16 eir_len = 0; u32 flags = 0; bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); if (conn->out) flags |= MGMT_DEV_FOUND_INITIATED_CONN; ev->flags = __cpu_to_le32(flags); /* We must ensure that the EIR Data fields are ordered and * unique. Keep it simple for now and avoid the problem by not * adding any BR/EDR data to the LE adv. */ if (conn->le_adv_data_len > 0) { memcpy(&ev->eir[eir_len], conn->le_adv_data, conn->le_adv_data_len); eir_len = conn->le_adv_data_len; } else { if (name_len > 0) eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, name_len); if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, conn->dev_class, 3); } ev->eir_len = cpu_to_le16(eir_len); mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf, sizeof(*ev) + eir_len, NULL); } static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct sock **sk = data; cmd->cmd_complete(cmd, 0); *sk = cmd->sk; sock_hold(*sk); mgmt_pending_remove(cmd); } static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) { struct hci_dev *hdev = data; struct mgmt_cp_unpair_device *cp = cmd->param; device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); cmd->cmd_complete(cmd, 0); mgmt_pending_remove(cmd); } bool mgmt_powering_down(struct hci_dev *hdev) { struct mgmt_pending_cmd *cmd; struct mgmt_mode *cp; cmd = pending_find(MGMT_OP_SET_POWERED, hdev); if (!cmd) return false; cp = cmd->param; if (!cp->val) return true; return false; } void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 reason, bool mgmt_connected) { struct mgmt_ev_device_disconnected ev; struct sock *sk = NULL; /* The connection is still in hci_conn_hash so test for 1 * instead of 0 to know if this is the last one. */ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); } if (!mgmt_connected) return; if (link_type != ACL_LINK && link_type != LE_LINK) return; mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.reason = reason; /* Report disconnects due to suspend */ if (hdev->suspended) ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND; mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk); if (sk) sock_put(sk); mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); } void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); struct mgmt_cp_disconnect *cp; struct mgmt_pending_cmd *cmd; mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, hdev); cmd = pending_find(MGMT_OP_DISCONNECT, hdev); if (!cmd) return; cp = cmd->param; if (bacmp(bdaddr, &cp->addr.bdaddr)) return; if (cp->addr.type != bdaddr_type) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { struct mgmt_ev_connect_failed ev; /* The connection is still in hci_conn_hash so test for 1 * instead of 0 to know if this is the last one. */ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { cancel_delayed_work(&hdev->power_off); queue_work(hdev->req_workqueue, &hdev->power_off.work); } bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.status = mgmt_status(status); mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); } void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) { struct mgmt_ev_pin_code_request ev; bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = BDADDR_BREDR; ev.secure = secure; mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL); } void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct mgmt_pending_cmd *cmd; cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); if (!cmd) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 status) { struct mgmt_pending_cmd *cmd; cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); if (!cmd) return; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); } int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 value, u8 confirm_hint) { struct mgmt_ev_user_confirm_request ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.confirm_hint = confirm_hint; ev.value = cpu_to_le32(value); return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), NULL); } int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type) { struct mgmt_ev_user_passkey_request ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), NULL); } static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status, u8 opcode) { struct mgmt_pending_cmd *cmd; cmd = pending_find(opcode, hdev); if (!cmd) return -ENOENT; cmd->cmd_complete(cmd, mgmt_status(status)); mgmt_pending_remove(cmd); return 0; } int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_CONFIRM_REPLY); } int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_PASSKEY_REPLY); } int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, status, MGMT_OP_USER_PASSKEY_NEG_REPLY); } int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 passkey, u8 entered) { struct mgmt_ev_passkey_notify ev; bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = link_to_bdaddr(link_type, addr_type); ev.passkey = __cpu_to_le32(passkey); ev.entered = entered; return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); } void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) { struct mgmt_ev_auth_failed ev; struct mgmt_pending_cmd *cmd; u8 status = mgmt_status(hci_status); bacpy(&ev.addr.bdaddr, &conn->dst); ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); ev.status = status; cmd = find_pairing(conn); mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev), cmd ? cmd->sk : NULL); if (cmd) { cmd->cmd_complete(cmd, status); mgmt_pending_remove(cmd); } } void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) { struct cmd_lookup match = { NULL, hdev }; bool changed; if (status) { u8 mgmt_err = mgmt_status(status); mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, cmd_status_rsp, &mgmt_err); return; } if (test_bit(HCI_AUTH, &hdev->flags)) changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY); else changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); } static void clear_eir(struct hci_request *req) { struct hci_dev *hdev = req->hdev; struct hci_cp_write_eir cp; if (!lmp_ext_inq_capable(hdev)) return; memset(hdev->eir, 0, sizeof(hdev->eir)); memset(&cp, 0, sizeof(cp)); hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); } void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status) { struct cmd_lookup match = { NULL, hdev }; struct hci_request req; bool changed = false; if (status) { u8 mgmt_err = mgmt_status(status); if (enable && hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED)) { hci_dev_clear_flag(hdev, HCI_HS_ENABLED); new_settings(hdev, NULL); } mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, &mgmt_err); return; } if (enable) { changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); } else { changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); if (!changed) changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); else hci_dev_clear_flag(hdev, HCI_HS_ENABLED); } mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); if (changed) new_settings(hdev, match.sk); if (match.sk) sock_put(match.sk); hci_req_init(&req, hdev); if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(enable), &enable); __hci_req_update_eir(&req); } else { clear_eir(&req); } hci_req_run(&req, NULL); } static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) { struct cmd_lookup *match = data; if (match->sk == NULL) { match->sk = cmd->sk; sock_hold(match->sk); } } void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status) { struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); if (!status) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL); ext_info_changed(hdev, NULL); } if (match.sk) sock_put(match.sk); } void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) { struct mgmt_cp_set_local_name ev; struct mgmt_pending_cmd *cmd; if (status) return; memset(&ev, 0, sizeof(ev)); memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); if (!cmd) { memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); /* If this is a HCI command related to powering on the * HCI dev don't send any mgmt signals. */ if (pending_find(MGMT_OP_SET_POWERED, hdev)) return; } mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL); ext_info_changed(hdev, cmd ? cmd->sk : NULL); } static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) { int i; for (i = 0; i < uuid_count; i++) { if (!memcmp(uuid, uuids[i], 16)) return true; } return false; } static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) { u16 parsed = 0; while (parsed < eir_len) { u8 field_len = eir[0]; u8 uuid[16]; int i; if (field_len == 0) break; if (eir_len - parsed < field_len + 1) break; switch (eir[1]) { case EIR_UUID16_ALL: case EIR_UUID16_SOME: for (i = 0; i + 3 <= field_len; i += 2) { memcpy(uuid, bluetooth_base_uuid, 16); uuid[13] = eir[i + 3]; uuid[12] = eir[i + 2]; if (has_uuid(uuid, uuid_count, uuids)) return true; } break; case EIR_UUID32_ALL: case EIR_UUID32_SOME: for (i = 0; i + 5 <= field_len; i += 4) { memcpy(uuid, bluetooth_base_uuid, 16); uuid[15] = eir[i + 5]; uuid[14] = eir[i + 4]; uuid[13] = eir[i + 3]; uuid[12] = eir[i + 2]; if (has_uuid(uuid, uuid_count, uuids)) return true; } break; case EIR_UUID128_ALL: case EIR_UUID128_SOME: for (i = 0; i + 17 <= field_len; i += 16) { memcpy(uuid, eir + i + 2, 16); if (has_uuid(uuid, uuid_count, uuids)) return true; } break; } parsed += field_len + 1; eir += field_len + 1; } return false; } static void restart_le_scan(struct hci_dev *hdev) { /* If controller is not scanning we are done. */ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, hdev->discovery.scan_start + hdev->discovery.scan_duration)) return; queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart, DISCOV_LE_RESTART_DELAY); } static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { /* If a RSSI threshold has been specified, and * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with * a RSSI smaller than the RSSI threshold will be dropped. If the quirk * is set, let it through for further processing, as we might need to * restart the scan. * * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, * the results are also dropped. */ if (hdev->discovery.rssi != HCI_RSSI_INVALID && (rssi == HCI_RSSI_INVALID || (rssi < hdev->discovery.rssi && !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) return false; if (hdev->discovery.uuid_count != 0) { /* If a list of UUIDs is provided in filter, results with no * matching UUID should be dropped. */ if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count, hdev->discovery.uuids) && !eir_has_uuids(scan_rsp, scan_rsp_len, hdev->discovery.uuid_count, hdev->discovery.uuids)) return false; } /* If duplicate filtering does not report RSSI changes, then restart * scanning to ensure updated result with updated RSSI values. */ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) { restart_le_scan(hdev); /* Validate RSSI value against the RSSI threshold once more. */ if (hdev->discovery.rssi != HCI_RSSI_INVALID && rssi < hdev->discovery.rssi) return false; } return true; } void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) { char buf[512]; struct mgmt_ev_device_found *ev = (void *)buf; size_t ev_size; /* Don't send events for a non-kernel initiated discovery. With * LE one exception is if we have pend_le_reports > 0 in which * case we're doing passive scanning and want these events. */ if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports) && !hci_is_adv_monitoring(hdev)) { return; } } if (hdev->discovery.result_filtering) { /* We are using service discovery */ if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp, scan_rsp_len)) return; } if (hdev->discovery.limited) { /* Check for limited discoverable bit */ if (dev_class) { if (!(dev_class[1] & 0x20)) return; } else { u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL); if (!flags || !(flags[0] & LE_AD_LIMITED)) return; } } /* Make sure that the buffer is big enough. The 5 extra bytes * are for the potential CoD field. */ if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf)) return; memset(buf, 0, sizeof(buf)); /* In case of device discovery with BR/EDR devices (pre 1.2), the * RSSI value was reported as 0 when not available. This behavior * is kept when using device discovery. This is required for full * backwards compatibility with the API. * * However when using service discovery, the value 127 will be * returned when the RSSI is not available. */ if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi && link_type == ACL_LINK) rssi = 0; bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; ev->flags = cpu_to_le32(flags); if (eir_len > 0) /* Copy EIR or advertising data into event */ memcpy(ev->eir, eir, eir_len); if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL)) eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, dev_class, 3); if (scan_rsp_len > 0) /* Append scan response data to event */ memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len); ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); ev_size = sizeof(*ev) + eir_len + scan_rsp_len; mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL); } void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, s8 rssi, u8 *name, u8 name_len) { struct mgmt_ev_device_found *ev; char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2]; u16 eir_len; ev = (struct mgmt_ev_device_found *) buf; memset(buf, 0, sizeof(buf)); bacpy(&ev->addr.bdaddr, bdaddr); ev->addr.type = link_to_bdaddr(link_type, addr_type); ev->rssi = rssi; eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name, name_len); ev->eir_len = cpu_to_le16(eir_len); mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL); } void mgmt_discovering(struct hci_dev *hdev, u8 discovering) { struct mgmt_ev_discovering ev; bt_dev_dbg(hdev, "discovering %u", discovering); memset(&ev, 0, sizeof(ev)); ev.type = hdev->discovery.type; ev.discovering = discovering; mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); } void mgmt_suspending(struct hci_dev *hdev, u8 state) { struct mgmt_ev_controller_suspend ev; ev.suspend_state = state; mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL); } void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, u8 addr_type) { struct mgmt_ev_controller_resume ev; ev.wake_reason = reason; if (bdaddr) { bacpy(&ev.addr.bdaddr, bdaddr); ev.addr.type = addr_type; } else { memset(&ev.addr, 0, sizeof(ev.addr)); } mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL); } static struct hci_mgmt_chan chan = { .channel = HCI_CHANNEL_CONTROL, .handler_count = ARRAY_SIZE(mgmt_handlers), .handlers = mgmt_handlers, .hdev_init = mgmt_init_hdev, }; int mgmt_init(void) { return hci_mgmt_chan_register(&chan); } void mgmt_exit(void) { hci_mgmt_chan_unregister(&chan); } |
35 35 35 34 35 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | // SPDX-License-Identifier: GPL-2.0-only /* * File: sysctl.c * * Phonet /proc/sys/net/phonet interface implementation * * Copyright (C) 2008 Nokia Corporation. * * Author: Rémi Denis-Courmont */ #include <linux/seqlock.h> #include <linux/sysctl.h> #include <linux/errno.h> #include <linux/init.h> #include <net/sock.h> #include <linux/phonet.h> #include <net/phonet/phonet.h> #define DYNAMIC_PORT_MIN 0x40 #define DYNAMIC_PORT_MAX 0x7f static DEFINE_SEQLOCK(local_port_range_lock); static int local_port_range_min[2] = {0, 0}; static int local_port_range_max[2] = {1023, 1023}; static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX}; static struct ctl_table_header *phonet_table_hrd; static void set_local_port_range(int range[2]) { write_seqlock(&local_port_range_lock); local_port_range[0] = range[0]; local_port_range[1] = range[1]; write_sequnlock(&local_port_range_lock); } void phonet_get_local_port_range(int *min, int *max) { unsigned int seq; do { seq = read_seqbegin(&local_port_range_lock); if (min) *min = local_port_range[0]; if (max) *max = local_port_range[1]; } while (read_seqretry(&local_port_range_lock, seq)); } static int proc_local_port_range(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; int range[2] = {local_port_range[0], local_port_range[1]}; struct ctl_table tmp = { .data = &range, .maxlen = sizeof(range), .mode = table->mode, .extra1 = &local_port_range_min, .extra2 = &local_port_range_max, }; ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); if (write && ret == 0) { if (range[1] < range[0]) ret = -EINVAL; else set_local_port_range(range); } return ret; } static struct ctl_table phonet_table[] = { { .procname = "local_port_range", .data = &local_port_range, .maxlen = sizeof(local_port_range), .mode = 0644, .proc_handler = proc_local_port_range, }, { } }; int __init phonet_sysctl_init(void) { phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; } void phonet_sysctl_exit(void) { unregister_net_sysctl_table(phonet_table_hrd); } |
429 308 309 309 17 309 3 424 7 1 423 424 424 387 410 3 1 1 325 417 1 3 1 342 386 257 395 235 255 1 380 334 380 2 224 2 1 14 205 91 4 192 1 293 1 189 3 16 13 13 7 2 155 1 381 9 24 10 422 263 13 393 298 393 28 165 377 28 28 155 356 420 301 172 47 95 428 461 429 92 3 453 151 270 270 268 4 1 267 266 5 257 15 15 265 266 266 2 8 29 7 29 233 77 155 265 78 251 251 1 1 1 244 241 90 7 236 18 4 14 2 2 4 4 7 4 6 1 1 1 1 56 31 31 25 11 11 89 30 30 30 30 30 30 6 29 92 15 19 1 2 16 16 14 102 2 19 17 21 42 3 3 3 8 29 67 86 74 21 10 28 8 8 16 3 8 8 9 16 5 15 11 57 7 36 38 77 5 5 1 6 17 83 15 38 3 35 25 3 2 3 3 2 364 37 342 11 1 8 2 10 10 10 8 11 1 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 | // SPDX-License-Identifier: GPL-2.0 /* * * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved. * */ #include <linux/buffer_head.h> #include <linux/fs.h> #include <linux/mpage.h> #include <linux/namei.h> #include <linux/nls.h> #include <linux/uio.h> #include <linux/writeback.h> #include "debug.h" #include "ntfs.h" #include "ntfs_fs.h" /* * ntfs_read_mft - Read record and parses MFT. */ static struct inode *ntfs_read_mft(struct inode *inode, const struct cpu_str *name, const struct MFT_REF *ref) { int err = 0; struct ntfs_inode *ni = ntfs_i(inode); struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; mode_t mode = 0; struct ATTR_STD_INFO5 *std5 = NULL; struct ATTR_LIST_ENTRY *le; struct ATTRIB *attr; bool is_match = false; bool is_root = false; bool is_dir; unsigned long ino = inode->i_ino; u32 rp_fa = 0, asize, t32; u16 roff, rsize, names = 0, links = 0; const struct ATTR_FILE_NAME *fname = NULL; const struct INDEX_ROOT *root; struct REPARSE_DATA_BUFFER rp; // 0x18 bytes u64 t64; struct MFT_REC *rec; struct runs_tree *run; inode->i_op = NULL; /* Setup 'uid' and 'gid' */ inode->i_uid = sbi->options->fs_uid; inode->i_gid = sbi->options->fs_gid; err = mi_init(&ni->mi, sbi, ino); if (err) goto out; if (!sbi->mft.ni && ino == MFT_REC_MFT && !sb->s_root) { t64 = sbi->mft.lbo >> sbi->cluster_bits; t32 = bytes_to_cluster(sbi, MFT_REC_VOL * sbi->record_size); sbi->mft.ni = ni; init_rwsem(&ni->file.run_lock); if (!run_add_entry(&ni->file.run, 0, t64, t32, true)) { err = -ENOMEM; goto out; } } err = mi_read(&ni->mi, ino == MFT_REC_MFT); if (err) goto out; rec = ni->mi.mrec; if (sbi->flags & NTFS_FLAGS_LOG_REPLAYING) { ; } else if (ref->seq != rec->seq) { err = -EINVAL; ntfs_err(sb, "MFT: r=%lx, expect seq=%x instead of %x!", ino, le16_to_cpu(ref->seq), le16_to_cpu(rec->seq)); goto out; } else if (!is_rec_inuse(rec)) { err = -ESTALE; ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino); goto out; } if (le32_to_cpu(rec->total) != sbi->record_size) { /* Bad inode? */ err = -EINVAL; goto out; } if (!is_rec_base(rec)) { err = -EINVAL; goto out; } /* Record should contain $I30 root. */ is_dir = rec->flags & RECORD_FLAG_DIR; /* MFT_REC_MFT is not a dir */ if (is_dir && ino == MFT_REC_MFT) { err = -EINVAL; goto out; } inode->i_generation = le16_to_cpu(rec->seq); /* Enumerate all struct Attributes MFT. */ le = NULL; attr = NULL; /* * To reduce tab pressure use goto instead of * while( (attr = ni_enum_attr_ex(ni, attr, &le, NULL) )) */ next_attr: run = NULL; err = -EINVAL; attr = ni_enum_attr_ex(ni, attr, &le, NULL); if (!attr) goto end_enum; if (le && le->vcn) { /* This is non primary attribute segment. Ignore if not MFT. */ if (ino != MFT_REC_MFT || attr->type != ATTR_DATA) goto next_attr; run = &ni->file.run; asize = le32_to_cpu(attr->size); goto attr_unpack_run; } roff = attr->non_res ? 0 : le16_to_cpu(attr->res.data_off); rsize = attr->non_res ? 0 : le32_to_cpu(attr->res.data_size); asize = le32_to_cpu(attr->size); if (le16_to_cpu(attr->name_off) + attr->name_len > asize) goto out; if (attr->non_res) { t64 = le64_to_cpu(attr->nres.alloc_size); if (le64_to_cpu(attr->nres.data_size) > t64 || le64_to_cpu(attr->nres.valid_size) > t64) goto out; } switch (attr->type) { case ATTR_STD: if (attr->non_res || asize < sizeof(struct ATTR_STD_INFO) + roff || rsize < sizeof(struct ATTR_STD_INFO)) goto out; if (std5) goto next_attr; std5 = Add2Ptr(attr, roff); #ifdef STATX_BTIME nt2kernel(std5->cr_time, &ni->i_crtime); #endif nt2kernel(std5->a_time, &inode->i_atime); nt2kernel(std5->c_time, &inode->i_ctime); nt2kernel(std5->m_time, &inode->i_mtime); ni->std_fa = std5->fa; if (asize >= sizeof(struct ATTR_STD_INFO5) + roff && rsize >= sizeof(struct ATTR_STD_INFO5)) ni->std_security_id = std5->security_id; goto next_attr; case ATTR_LIST: if (attr->name_len || le || ino == MFT_REC_LOG) goto out; err = ntfs_load_attr_list(ni, attr); if (err) goto out; le = NULL; attr = NULL; goto next_attr; case ATTR_NAME: if (attr->non_res || asize < SIZEOF_ATTRIBUTE_FILENAME + roff || rsize < SIZEOF_ATTRIBUTE_FILENAME) goto out; names += 1; fname = Add2Ptr(attr, roff); if (fname->type == FILE_NAME_DOS) goto next_attr; links += 1; if (name && name->len == fname->name_len && !ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len, NULL, false)) is_match = true; goto next_attr; case ATTR_DATA: if (is_dir) { /* Ignore data attribute in dir record. */ goto next_attr; } if (ino == MFT_REC_BADCLUST && !attr->non_res) goto next_attr; if (attr->name_len && ((ino != MFT_REC_BADCLUST || !attr->non_res || attr->name_len != ARRAY_SIZE(BAD_NAME) || memcmp(attr_name(attr), BAD_NAME, sizeof(BAD_NAME))) && (ino != MFT_REC_SECURE || !attr->non_res || attr->name_len != ARRAY_SIZE(SDS_NAME) || memcmp(attr_name(attr), SDS_NAME, sizeof(SDS_NAME))))) { /* File contains stream attribute. Ignore it. */ goto next_attr; } if (is_attr_sparsed(attr)) ni->std_fa |= FILE_ATTRIBUTE_SPARSE_FILE; else ni->std_fa &= ~FILE_ATTRIBUTE_SPARSE_FILE; if (is_attr_compressed(attr)) ni->std_fa |= FILE_ATTRIBUTE_COMPRESSED; else ni->std_fa &= ~FILE_ATTRIBUTE_COMPRESSED; if (is_attr_encrypted(attr)) ni->std_fa |= FILE_ATTRIBUTE_ENCRYPTED; else ni->std_fa &= ~FILE_ATTRIBUTE_ENCRYPTED; if (!attr->non_res) { ni->i_valid = inode->i_size = rsize; inode_set_bytes(inode, rsize); } mode = S_IFREG | (0777 & sbi->options->fs_fmask_inv); if (!attr->non_res) { ni->ni_flags |= NI_FLAG_RESIDENT; goto next_attr; } inode_set_bytes(inode, attr_ondisk_size(attr)); ni->i_valid = le64_to_cpu(attr->nres.valid_size); inode->i_size = le64_to_cpu(attr->nres.data_size); if (!attr->nres.alloc_size) goto next_attr; run = ino == MFT_REC_BITMAP ? &sbi->used.bitmap.run : &ni->file.run; break; case ATTR_ROOT: if (attr->non_res) goto out; root = Add2Ptr(attr, roff); if (attr->name_len != ARRAY_SIZE(I30_NAME) || memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) goto next_attr; if (root->type != ATTR_NAME || root->rule != NTFS_COLLATION_TYPE_FILENAME) goto out; if (!is_dir) goto next_attr; is_root = true; ni->ni_flags |= NI_FLAG_DIR; err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30); if (err) goto out; mode = sb->s_root ? (S_IFDIR | (0777 & sbi->options->fs_dmask_inv)) : (S_IFDIR | 0777); goto next_attr; case ATTR_ALLOC: if (!is_root || attr->name_len != ARRAY_SIZE(I30_NAME) || memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME))) goto next_attr; inode->i_size = le64_to_cpu(attr->nres.data_size); ni->i_valid = le64_to_cpu(attr->nres.valid_size); inode_set_bytes(inode, le64_to_cpu(attr->nres.alloc_size)); run = &ni->dir.alloc_run; break; case ATTR_BITMAP: if (ino == MFT_REC_MFT) { if (!attr->non_res) goto out; #ifndef CONFIG_NTFS3_64BIT_CLUSTER /* 0x20000000 = 2^32 / 8 */ if (le64_to_cpu(attr->nres.alloc_size) >= 0x20000000) goto out; #endif run = &sbi->mft.bitmap.run; break; } else if (is_dir && attr->name_len == ARRAY_SIZE(I30_NAME) && !memcmp(attr_name(attr), I30_NAME, sizeof(I30_NAME)) && attr->non_res) { run = &ni->dir.bitmap_run; break; } goto next_attr; case ATTR_REPARSE: if (attr->name_len) goto next_attr; rp_fa = ni_parse_reparse(ni, attr, &rp); switch (rp_fa) { case REPARSE_LINK: /* * Normal symlink. * Assume one unicode symbol == one utf8. */ inode->i_size = le16_to_cpu(rp.SymbolicLinkReparseBuffer .PrintNameLength) / sizeof(u16); ni->i_valid = inode->i_size; /* Clear directory bit. */ if (ni->ni_flags & NI_FLAG_DIR) { indx_clear(&ni->dir); memset(&ni->dir, 0, sizeof(ni->dir)); ni->ni_flags &= ~NI_FLAG_DIR; } else { run_close(&ni->file.run); } mode = S_IFLNK | 0777; is_dir = false; if (attr->non_res) { run = &ni->file.run; goto attr_unpack_run; // Double break. } break; case REPARSE_COMPRESSED: break; case REPARSE_DEDUPLICATED: break; } goto next_attr; case ATTR_EA_INFO: if (!attr->name_len && resident_data_ex(attr, sizeof(struct EA_INFO))) { ni->ni_flags |= NI_FLAG_EA; /* * ntfs_get_wsl_perm updates inode->i_uid, inode->i_gid, inode->i_mode */ inode->i_mode = mode; ntfs_get_wsl_perm(inode); mode = inode->i_mode; } goto next_attr; default: goto next_attr; } attr_unpack_run: roff = le16_to_cpu(attr->nres.run_off); if (roff > asize) { err = -EINVAL; goto out; } t64 = le64_to_cpu(attr->nres.svcn); err = run_unpack_ex(run, sbi, ino, t64, le64_to_cpu(attr->nres.evcn), t64, Add2Ptr(attr, roff), asize - roff); if (err < 0) goto out; err = 0; goto next_attr; end_enum: if (!std5) goto out; if (!is_match && name) { err = -ENOENT; goto out; } if (std5->fa & FILE_ATTRIBUTE_READONLY) mode &= ~0222; if (!names) { err = -EINVAL; goto out; } if (names != le16_to_cpu(rec->hard_links)) { /* Correct minor error on the fly. Do not mark inode as dirty. */ ntfs_inode_warn(inode, "Correct links count -> %u.", names); rec->hard_links = cpu_to_le16(names); ni->mi.dirty = true; } set_nlink(inode, links); if (S_ISDIR(mode)) { ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY; /* * Dot and dot-dot should be included in count but was not * included in enumeration. * Usually a hard links to directories are disabled. */ inode->i_op = &ntfs_dir_inode_operations; inode->i_fop = &ntfs_dir_operations; ni->i_valid = 0; } else if (S_ISLNK(mode)) { ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; inode->i_op = &ntfs_link_inode_operations; inode->i_fop = NULL; inode_nohighmem(inode); } else if (S_ISREG(mode)) { ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY; inode->i_op = &ntfs_file_inode_operations; inode->i_fop = &ntfs_file_operations; inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; if (ino != MFT_REC_MFT) init_rwsem(&ni->file.run_lock); } else if (S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) { inode->i_op = &ntfs_special_inode_operations; init_special_inode(inode, mode, inode->i_rdev); } else if (fname && fname->home.low == cpu_to_le32(MFT_REC_EXTEND) && fname->home.seq == cpu_to_le16(MFT_REC_EXTEND)) { /* Records in $Extend are not a files or general directories. */ inode->i_op = &ntfs_file_inode_operations; } else { err = -EINVAL; goto out; } if ((sbi->options->sys_immutable && (std5->fa & FILE_ATTRIBUTE_SYSTEM)) && !S_ISFIFO(mode) && !S_ISSOCK(mode) && !S_ISLNK(mode)) { inode->i_flags |= S_IMMUTABLE; } else { inode->i_flags &= ~S_IMMUTABLE; } inode->i_mode = mode; if (!(ni->ni_flags & NI_FLAG_EA)) { /* If no xattr then no security (stored in xattr). */ inode->i_flags |= S_NOSEC; } if (ino == MFT_REC_MFT && !sb->s_root) sbi->mft.ni = NULL; unlock_new_inode(inode); return inode; out: if (ino == MFT_REC_MFT && !sb->s_root) sbi->mft.ni = NULL; iget_failed(inode); return ERR_PTR(err); } /* * ntfs_test_inode * * Return: 1 if match. */ static int ntfs_test_inode(struct inode *inode, void *data) { struct MFT_REF *ref = data; return ino_get(ref) == inode->i_ino; } static int ntfs_set_inode(struct inode *inode, void *data) { const struct MFT_REF *ref = data; inode->i_ino = ino_get(ref); return 0; } struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, const struct cpu_str *name) { struct inode *inode; inode = iget5_locked(sb, ino_get(ref), ntfs_test_inode, ntfs_set_inode, (void *)ref); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); /* If this is a freshly allocated inode, need to read it now. */ if (inode->i_state & I_NEW) inode = ntfs_read_mft(inode, name, ref); else if (ref->seq != ntfs_i(inode)->mi.mrec->seq) { /* Inode overlaps? */ make_bad_inode(inode); } if (IS_ERR(inode) && name) ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR); return inode; } enum get_block_ctx { GET_BLOCK_GENERAL = 0, GET_BLOCK_WRITE_BEGIN = 1, GET_BLOCK_DIRECT_IO_R = 2, GET_BLOCK_DIRECT_IO_W = 3, GET_BLOCK_BMAP = 4, }; static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, struct buffer_head *bh, int create, enum get_block_ctx ctx) { struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; struct ntfs_inode *ni = ntfs_i(inode); struct page *page = bh->b_page; u8 cluster_bits = sbi->cluster_bits; u32 block_size = sb->s_blocksize; u64 bytes, lbo, valid; u32 off; int err; CLST vcn, lcn, len; bool new; /* Clear previous state. */ clear_buffer_new(bh); clear_buffer_uptodate(bh); /* Direct write uses 'create=0'. */ if (!create && vbo >= ni->i_valid) { /* Out of valid. */ return 0; } if (vbo >= inode->i_size) { /* Out of size. */ return 0; } if (is_resident(ni)) { ni_lock(ni); err = attr_data_read_resident(ni, page); ni_unlock(ni); if (!err) set_buffer_uptodate(bh); bh->b_size = block_size; return err; } vcn = vbo >> cluster_bits; off = vbo & sbi->cluster_mask; new = false; err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL); if (err) goto out; if (!len) return 0; bytes = ((u64)len << cluster_bits) - off; if (lcn == SPARSE_LCN) { if (!create) { if (bh->b_size > bytes) bh->b_size = bytes; return 0; } WARN_ON(1); } if (new) { set_buffer_new(bh); if ((len << cluster_bits) > block_size) ntfs_sparse_cluster(inode, page, vcn, len); } lbo = ((u64)lcn << cluster_bits) + off; set_buffer_mapped(bh); bh->b_bdev = sb->s_bdev; bh->b_blocknr = lbo >> sb->s_blocksize_bits; valid = ni->i_valid; if (ctx == GET_BLOCK_DIRECT_IO_W) { /* ntfs_direct_IO will update ni->i_valid. */ if (vbo >= valid) set_buffer_new(bh); } else if (create) { /* Normal write. */ if (bytes > bh->b_size) bytes = bh->b_size; if (vbo >= valid) set_buffer_new(bh); if (vbo + bytes > valid) { ni->i_valid = vbo + bytes; mark_inode_dirty(inode); } } else if (vbo >= valid) { /* Read out of valid data. */ /* Should never be here 'cause already checked. */ clear_buffer_mapped(bh); } else if (vbo + bytes <= valid) { /* Normal read. */ } else if (vbo + block_size <= valid) { /* Normal short read. */ bytes = block_size; } else { /* * Read across valid size: vbo < valid && valid < vbo + block_size */ bytes = block_size; if (page) { u32 voff = valid - vbo; bh->b_size = block_size; off = vbo & (PAGE_SIZE - 1); set_bh_page(bh, page, off); ll_rw_block(REQ_OP_READ, 0, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) { err = -EIO; goto out; } zero_user_segment(page, off + voff, off + block_size); } } if (bh->b_size > bytes) bh->b_size = bytes; #ifndef __LP64__ if (ctx == GET_BLOCK_DIRECT_IO_W || ctx == GET_BLOCK_DIRECT_IO_R) { static_assert(sizeof(size_t) < sizeof(loff_t)); if (bytes > 0x40000000u) bh->b_size = 0x40000000u; } #endif return 0; out: return err; } int ntfs_get_block(struct inode *inode, sector_t vbn, struct buffer_head *bh_result, int create) { return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, bh_result, create, GET_BLOCK_GENERAL); } static int ntfs_get_block_bmap(struct inode *inode, sector_t vsn, struct buffer_head *bh_result, int create) { return ntfs_get_block_vbo(inode, (u64)vsn << inode->i_sb->s_blocksize_bits, bh_result, create, GET_BLOCK_BMAP); } static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping, block, ntfs_get_block_bmap); } static int ntfs_readpage(struct file *file, struct page *page) { int err; struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); if (is_resident(ni)) { ni_lock(ni); err = attr_data_read_resident(ni, page); ni_unlock(ni); if (err != E_NTFS_NONRESIDENT) { unlock_page(page); return err; } } if (is_compressed(ni)) { ni_lock(ni); err = ni_readpage_cmpr(ni, page); ni_unlock(ni); return err; } /* Normal + sparse files. */ return mpage_readpage(page, ntfs_get_block); } static void ntfs_readahead(struct readahead_control *rac) { struct address_space *mapping = rac->mapping; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); u64 valid; loff_t pos; if (is_resident(ni)) { /* No readahead for resident. */ return; } if (is_compressed(ni)) { /* No readahead for compressed. */ return; } valid = ni->i_valid; pos = readahead_pos(rac); if (valid < i_size_read(inode) && pos <= valid && valid < pos + readahead_length(rac)) { /* Range cross 'valid'. Read it page by page. */ return; } mpage_readahead(rac, ntfs_get_block); } static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, bh_result, create, GET_BLOCK_DIRECT_IO_R); } static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits, bh_result, create, GET_BLOCK_DIRECT_IO_W); } static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); loff_t vbo = iocb->ki_pos; loff_t end; int wr = iov_iter_rw(iter) & WRITE; size_t iter_count = iov_iter_count(iter); loff_t valid; ssize_t ret; if (is_resident(ni)) { /* Switch to buffered write. */ ret = 0; goto out; } ret = blockdev_direct_IO(iocb, inode, iter, wr ? ntfs_get_block_direct_IO_W : ntfs_get_block_direct_IO_R); if (ret > 0) end = vbo + ret; else if (wr && ret == -EIOCBQUEUED) end = vbo + iter_count; else goto out; valid = ni->i_valid; if (wr) { if (end > valid && !S_ISBLK(inode->i_mode)) { ni->i_valid = end; mark_inode_dirty(inode); } } else if (vbo < valid && valid < end) { /* Fix page. */ iov_iter_revert(iter, end - valid); iov_iter_zero(end - valid, iter); } out: return ret; } int ntfs_set_size(struct inode *inode, u64 new_size) { struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; struct ntfs_inode *ni = ntfs_i(inode); int err; /* Check for maximum file size. */ if (is_sparsed(ni) || is_compressed(ni)) { if (new_size > sbi->maxbytes_sparse) { err = -EFBIG; goto out; } } else if (new_size > sbi->maxbytes) { err = -EFBIG; goto out; } ni_lock(ni); down_write(&ni->file.run_lock); err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size, &ni->i_valid, true, NULL); up_write(&ni->file.run_lock); ni_unlock(ni); mark_inode_dirty(inode); out: return err; } static int ntfs_writepage(struct page *page, struct writeback_control *wbc) { struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); int err; if (is_resident(ni)) { ni_lock(ni); err = attr_data_write_resident(ni, page); ni_unlock(ni); if (err != E_NTFS_NONRESIDENT) { unlock_page(page); return err; } } return block_write_full_page(page, ntfs_get_block, wbc); } static int ntfs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); /* Redirect call to 'ntfs_writepage' for resident files. */ get_block_t *get_block = is_resident(ni) ? NULL : &ntfs_get_block; return mpage_writepages(mapping, wbc, get_block); } static int ntfs_get_block_write_begin(struct inode *inode, sector_t vbn, struct buffer_head *bh_result, int create) { return ntfs_get_block_vbo(inode, (u64)vbn << inode->i_blkbits, bh_result, create, GET_BLOCK_WRITE_BEGIN); } static int ntfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, u32 len, u32 flags, struct page **pagep, void **fsdata) { int err; struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); *pagep = NULL; if (is_resident(ni)) { struct page *page = grab_cache_page_write_begin( mapping, pos >> PAGE_SHIFT, flags); if (!page) { err = -ENOMEM; goto out; } ni_lock(ni); err = attr_data_read_resident(ni, page); ni_unlock(ni); if (!err) { *pagep = page; goto out; } unlock_page(page); put_page(page); if (err != E_NTFS_NONRESIDENT) goto out; } err = block_write_begin(mapping, pos, len, flags, pagep, ntfs_get_block_write_begin); out: return err; } /* * ntfs_write_end - Address_space_operations::write_end. */ static int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, u32 len, u32 copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; struct ntfs_inode *ni = ntfs_i(inode); u64 valid = ni->i_valid; bool dirty = false; int err; if (is_resident(ni)) { ni_lock(ni); err = attr_data_write_resident(ni, page); ni_unlock(ni); if (!err) { dirty = true; /* Clear any buffers in page. */ if (page_has_buffers(page)) { struct buffer_head *head, *bh; bh = head = page_buffers(page); do { clear_buffer_dirty(bh); clear_buffer_mapped(bh); set_buffer_uptodate(bh); } while (head != (bh = bh->b_this_page)); } SetPageUptodate(page); err = copied; } unlock_page(page); put_page(page); } else { err = generic_write_end(file, mapping, pos, len, copied, page, fsdata); } if (err >= 0) { if (!(ni->std_fa & FILE_ATTRIBUTE_ARCHIVE)) { inode->i_ctime = inode->i_mtime = current_time(inode); ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE; dirty = true; } if (valid != ni->i_valid) { /* ni->i_valid is changed in ntfs_get_block_vbo. */ dirty = true; } if (dirty) mark_inode_dirty(inode); } return err; } int reset_log_file(struct inode *inode) { int err; loff_t pos = 0; u32 log_size = inode->i_size; struct address_space *mapping = inode->i_mapping; for (;;) { u32 len; void *kaddr; struct page *page; len = pos + PAGE_SIZE > log_size ? (log_size - pos) : PAGE_SIZE; err = block_write_begin(mapping, pos, len, 0, &page, ntfs_get_block_write_begin); if (err) goto out; kaddr = kmap_atomic(page); memset(kaddr, -1, len); kunmap_atomic(kaddr); flush_dcache_page(page); err = block_write_end(NULL, mapping, pos, len, len, page, NULL); if (err < 0) goto out; pos += len; if (pos >= log_size) break; balance_dirty_pages_ratelimited(mapping); } out: mark_inode_dirty_sync(inode); return err; } int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc) { return _ni_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } int ntfs_sync_inode(struct inode *inode) { return _ni_write_inode(inode, 1); } /* * writeback_inode - Helper function for ntfs_flush_inodes(). * * This writes both the inode and the file data blocks, waiting * for in flight data blocks before the start of the call. It * does not wait for any io started during the call. */ static int writeback_inode(struct inode *inode) { int ret = sync_inode_metadata(inode, 0); if (!ret) ret = filemap_fdatawrite(inode->i_mapping); return ret; } /* * ntfs_flush_inodes * * Write data and metadata corresponding to i1 and i2. The io is * started but we do not wait for any of it to finish. * * filemap_flush() is used for the block device, so if there is a dirty * page for a block already in flight, we will not wait and start the * io over again. */ int ntfs_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2) { int ret = 0; if (i1) ret = writeback_inode(i1); if (!ret && i2) ret = writeback_inode(i2); if (!ret) ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping); return ret; } int inode_write_data(struct inode *inode, const void *data, size_t bytes) { pgoff_t idx; /* Write non resident data. */ for (idx = 0; bytes; idx++) { size_t op = bytes > PAGE_SIZE ? PAGE_SIZE : bytes; struct page *page = ntfs_map_page(inode->i_mapping, idx); if (IS_ERR(page)) return PTR_ERR(page); lock_page(page); WARN_ON(!PageUptodate(page)); ClearPageUptodate(page); memcpy(page_address(page), data, op); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); ntfs_unmap_page(page); bytes -= op; data = Add2Ptr(data, PAGE_SIZE); } return 0; } /* * ntfs_reparse_bytes * * Number of bytes for REPARSE_DATA_BUFFER(IO_REPARSE_TAG_SYMLINK) * for unicode string of @uni_len length. */ static inline u32 ntfs_reparse_bytes(u32 uni_len) { /* Header + unicode string + decorated unicode string. */ return sizeof(short) * (2 * uni_len + 4) + offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer.PathBuffer); } static struct REPARSE_DATA_BUFFER * ntfs_create_reparse_buffer(struct ntfs_sb_info *sbi, const char *symname, u32 size, u16 *nsize) { int i, err; struct REPARSE_DATA_BUFFER *rp; __le16 *rp_name; typeof(rp->SymbolicLinkReparseBuffer) *rs; rp = kzalloc(ntfs_reparse_bytes(2 * size + 2), GFP_NOFS); if (!rp) return ERR_PTR(-ENOMEM); rs = &rp->SymbolicLinkReparseBuffer; rp_name = rs->PathBuffer; /* Convert link name to UTF-16. */ err = ntfs_nls_to_utf16(sbi, symname, size, (struct cpu_str *)(rp_name - 1), 2 * size, UTF16_LITTLE_ENDIAN); if (err < 0) goto out; /* err = the length of unicode name of symlink. */ *nsize = ntfs_reparse_bytes(err); if (*nsize > sbi->reparse.max_size) { err = -EFBIG; goto out; } /* Translate Linux '/' into Windows '\'. */ for (i = 0; i < err; i++) { if (rp_name[i] == cpu_to_le16('/')) rp_name[i] = cpu_to_le16('\\'); } rp->ReparseTag = IO_REPARSE_TAG_SYMLINK; rp->ReparseDataLength = cpu_to_le16(*nsize - offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer)); /* PrintName + SubstituteName. */ rs->SubstituteNameOffset = cpu_to_le16(sizeof(short) * err); rs->SubstituteNameLength = cpu_to_le16(sizeof(short) * err + 8); rs->PrintNameLength = rs->SubstituteNameOffset; /* * TODO: Use relative path if possible to allow Windows to * parse this path. * 0-absolute path 1- relative path (SYMLINK_FLAG_RELATIVE). */ rs->Flags = 0; memmove(rp_name + err + 4, rp_name, sizeof(short) * err); /* Decorate SubstituteName. */ rp_name += err; rp_name[0] = cpu_to_le16('\\'); rp_name[1] = cpu_to_le16('?'); rp_name[2] = cpu_to_le16('?'); rp_name[3] = cpu_to_le16('\\'); return rp; out: kfree(rp); return ERR_PTR(err); } struct inode *ntfs_create_inode(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, const struct cpu_str *uni, umode_t mode, dev_t dev, const char *symname, u32 size, struct ntfs_fnd *fnd) { int err; struct super_block *sb = dir->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; const struct qstr *name = &dentry->d_name; CLST ino = 0; struct ntfs_inode *dir_ni = ntfs_i(dir); struct ntfs_inode *ni = NULL; struct inode *inode = NULL; struct ATTRIB *attr; struct ATTR_STD_INFO5 *std5; struct ATTR_FILE_NAME *fname; struct MFT_REC *rec; u32 asize, dsize, sd_size; enum FILE_ATTRIBUTE fa; __le32 security_id = SECURITY_ID_INVALID; CLST vcn; const void *sd; u16 t16, nsize = 0, aid = 0; struct INDEX_ROOT *root, *dir_root; struct NTFS_DE *e, *new_de = NULL; struct REPARSE_DATA_BUFFER *rp = NULL; bool rp_inserted = false; ni_lock_dir(dir_ni); dir_root = indx_get_root(&dir_ni->dir, dir_ni, NULL, NULL); if (!dir_root) { err = -EINVAL; goto out1; } if (S_ISDIR(mode)) { /* Use parent's directory attributes. */ fa = dir_ni->std_fa | FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_ARCHIVE; /* * By default child directory inherits parent attributes. * Root directory is hidden + system. * Make an exception for children in root. */ if (dir->i_ino == MFT_REC_ROOT) fa &= ~(FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM); } else if (S_ISLNK(mode)) { /* It is good idea that link should be the same type (file/dir) as target */ fa = FILE_ATTRIBUTE_REPARSE_POINT; /* * Linux: there are dir/file/symlink and so on. * NTFS: symlinks are "dir + reparse" or "file + reparse" * It is good idea to create: * dir + reparse if 'symname' points to directory * or * file + reparse if 'symname' points to file * Unfortunately kern_path hangs if symname contains 'dir'. */ /* * struct path path; * * if (!kern_path(symname, LOOKUP_FOLLOW, &path)){ * struct inode *target = d_inode(path.dentry); * * if (S_ISDIR(target->i_mode)) * fa |= FILE_ATTRIBUTE_DIRECTORY; * // if ( target->i_sb == sb ){ * // use relative path? * // } * path_put(&path); * } */ } else if (S_ISREG(mode)) { if (sbi->options->sparse) { /* Sparsed regular file, cause option 'sparse'. */ fa = FILE_ATTRIBUTE_SPARSE_FILE | FILE_ATTRIBUTE_ARCHIVE; } else if (dir_ni->std_fa & FILE_ATTRIBUTE_COMPRESSED) { /* Compressed regular file, if parent is compressed. */ fa = FILE_ATTRIBUTE_COMPRESSED | FILE_ATTRIBUTE_ARCHIVE; } else { /* Regular file, default attributes. */ fa = FILE_ATTRIBUTE_ARCHIVE; } } else { fa = FILE_ATTRIBUTE_ARCHIVE; } if (!(mode & 0222)) fa |= FILE_ATTRIBUTE_READONLY; /* Allocate PATH_MAX bytes. */ new_de = __getname(); if (!new_de) { err = -ENOMEM; goto out1; } /* Mark rw ntfs as dirty. it will be cleared at umount. */ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); /* Step 1: allocate and fill new mft record. */ err = ntfs_look_free_mft(sbi, &ino, false, NULL, NULL); if (err) goto out2; ni = ntfs_new_inode(sbi, ino, fa & FILE_ATTRIBUTE_DIRECTORY); if (IS_ERR(ni)) { err = PTR_ERR(ni); ni = NULL; goto out3; } inode = &ni->vfs_inode; inode_init_owner(mnt_userns, inode, dir, mode); mode = inode->i_mode; inode->i_atime = inode->i_mtime = inode->i_ctime = ni->i_crtime = current_time(inode); rec = ni->mi.mrec; rec->hard_links = cpu_to_le16(1); attr = Add2Ptr(rec, le16_to_cpu(rec->attr_off)); /* Get default security id. */ sd = s_default_security; sd_size = sizeof(s_default_security); if (is_ntfs3(sbi)) { security_id = dir_ni->std_security_id; if (le32_to_cpu(security_id) < SECURITY_ID_FIRST) { security_id = sbi->security.def_security_id; if (security_id == SECURITY_ID_INVALID && !ntfs_insert_security(sbi, sd, sd_size, &security_id, NULL)) sbi->security.def_security_id = security_id; } } /* Insert standard info. */ std5 = Add2Ptr(attr, SIZEOF_RESIDENT); if (security_id == SECURITY_ID_INVALID) { dsize = sizeof(struct ATTR_STD_INFO); } else { dsize = sizeof(struct ATTR_STD_INFO5); std5->security_id = security_id; ni->std_security_id = security_id; } asize = SIZEOF_RESIDENT + dsize; attr->type = ATTR_STD; attr->size = cpu_to_le32(asize); attr->id = cpu_to_le16(aid++); attr->res.data_off = SIZEOF_RESIDENT_LE; attr->res.data_size = cpu_to_le32(dsize); std5->cr_time = std5->m_time = std5->c_time = std5->a_time = kernel2nt(&inode->i_atime); ni->std_fa = fa; std5->fa = fa; attr = Add2Ptr(attr, asize); /* Insert file name. */ err = fill_name_de(sbi, new_de, name, uni); if (err) goto out4; mi_get_ref(&ni->mi, &new_de->ref); fname = (struct ATTR_FILE_NAME *)(new_de + 1); mi_get_ref(&dir_ni->mi, &fname->home); fname->dup.cr_time = fname->dup.m_time = fname->dup.c_time = fname->dup.a_time = std5->cr_time; fname->dup.alloc_size = fname->dup.data_size = 0; fname->dup.fa = std5->fa; fname->dup.ea_size = fname->dup.reparse = 0; dsize = le16_to_cpu(new_de->key_size); asize = ALIGN(SIZEOF_RESIDENT + dsize, 8); attr->type = ATTR_NAME; attr->size = cpu_to_le32(asize); attr->res.data_off = SIZEOF_RESIDENT_LE; attr->res.flags = RESIDENT_FLAG_INDEXED; attr->id = cpu_to_le16(aid++); attr->res.data_size = cpu_to_le32(dsize); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), fname, dsize); attr = Add2Ptr(attr, asize); if (security_id == SECURITY_ID_INVALID) { /* Insert security attribute. */ asize = SIZEOF_RESIDENT + ALIGN(sd_size, 8); attr->type = ATTR_SECURE; attr->size = cpu_to_le32(asize); attr->id = cpu_to_le16(aid++); attr->res.data_off = SIZEOF_RESIDENT_LE; attr->res.data_size = cpu_to_le32(sd_size); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), sd, sd_size); attr = Add2Ptr(attr, asize); } attr->id = cpu_to_le16(aid++); if (fa & FILE_ATTRIBUTE_DIRECTORY) { /* * Regular directory or symlink to directory. * Create root attribute. */ dsize = sizeof(struct INDEX_ROOT) + sizeof(struct NTFS_DE); asize = sizeof(I30_NAME) + SIZEOF_RESIDENT + dsize; attr->type = ATTR_ROOT; attr->size = cpu_to_le32(asize); attr->name_len = ARRAY_SIZE(I30_NAME); attr->name_off = SIZEOF_RESIDENT_LE; attr->res.data_off = cpu_to_le16(sizeof(I30_NAME) + SIZEOF_RESIDENT); attr->res.data_size = cpu_to_le32(dsize); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), I30_NAME, sizeof(I30_NAME)); root = Add2Ptr(attr, sizeof(I30_NAME) + SIZEOF_RESIDENT); memcpy(root, dir_root, offsetof(struct INDEX_ROOT, ihdr)); root->ihdr.de_off = cpu_to_le32(sizeof(struct INDEX_HDR)); // 0x10 root->ihdr.used = cpu_to_le32(sizeof(struct INDEX_HDR) + sizeof(struct NTFS_DE)); root->ihdr.total = root->ihdr.used; e = Add2Ptr(root, sizeof(struct INDEX_ROOT)); e->size = cpu_to_le16(sizeof(struct NTFS_DE)); e->flags = NTFS_IE_LAST; } else if (S_ISLNK(mode)) { /* * Symlink to file. * Create empty resident data attribute. */ asize = SIZEOF_RESIDENT; /* Insert empty ATTR_DATA */ attr->type = ATTR_DATA; attr->size = cpu_to_le32(SIZEOF_RESIDENT); attr->name_off = SIZEOF_RESIDENT_LE; attr->res.data_off = SIZEOF_RESIDENT_LE; } else if (S_ISREG(mode)) { /* * Regular file. Create empty non resident data attribute. */ attr->type = ATTR_DATA; attr->non_res = 1; attr->nres.evcn = cpu_to_le64(-1ll); if (fa & FILE_ATTRIBUTE_SPARSE_FILE) { attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8); attr->name_off = SIZEOF_NONRESIDENT_EX_LE; attr->flags = ATTR_FLAG_SPARSED; asize = SIZEOF_NONRESIDENT_EX + 8; } else if (fa & FILE_ATTRIBUTE_COMPRESSED) { attr->size = cpu_to_le32(SIZEOF_NONRESIDENT_EX + 8); attr->name_off = SIZEOF_NONRESIDENT_EX_LE; attr->flags = ATTR_FLAG_COMPRESSED; attr->nres.c_unit = NTFS_LZNT_CUNIT; asize = SIZEOF_NONRESIDENT_EX + 8; } else { attr->size = cpu_to_le32(SIZEOF_NONRESIDENT + 8); attr->name_off = SIZEOF_NONRESIDENT_LE; asize = SIZEOF_NONRESIDENT + 8; } attr->nres.run_off = attr->name_off; } else { /* * Node. Create empty resident data attribute. */ attr->type = ATTR_DATA; attr->size = cpu_to_le32(SIZEOF_RESIDENT); attr->name_off = SIZEOF_RESIDENT_LE; if (fa & FILE_ATTRIBUTE_SPARSE_FILE) attr->flags = ATTR_FLAG_SPARSED; else if (fa & FILE_ATTRIBUTE_COMPRESSED) attr->flags = ATTR_FLAG_COMPRESSED; attr->res.data_off = SIZEOF_RESIDENT_LE; asize = SIZEOF_RESIDENT; ni->ni_flags |= NI_FLAG_RESIDENT; } if (S_ISDIR(mode)) { ni->ni_flags |= NI_FLAG_DIR; err = indx_init(&ni->dir, sbi, attr, INDEX_MUTEX_I30); if (err) goto out4; } else if (S_ISLNK(mode)) { rp = ntfs_create_reparse_buffer(sbi, symname, size, &nsize); if (IS_ERR(rp)) { err = PTR_ERR(rp); rp = NULL; goto out4; } /* * Insert ATTR_REPARSE. */ attr = Add2Ptr(attr, asize); attr->type = ATTR_REPARSE; attr->id = cpu_to_le16(aid++); /* Resident or non resident? */ asize = ALIGN(SIZEOF_RESIDENT + nsize, 8); t16 = PtrOffset(rec, attr); /* * Below function 'ntfs_save_wsl_perm' requires 0x78 bytes. * It is good idea to keep extened attributes resident. */ if (asize + t16 + 0x78 + 8 > sbi->record_size) { CLST alen; CLST clst = bytes_to_cluster(sbi, nsize); /* Bytes per runs. */ t16 = sbi->record_size - t16 - SIZEOF_NONRESIDENT; attr->non_res = 1; attr->nres.evcn = cpu_to_le64(clst - 1); attr->name_off = SIZEOF_NONRESIDENT_LE; attr->nres.run_off = attr->name_off; attr->nres.data_size = cpu_to_le64(nsize); attr->nres.valid_size = attr->nres.data_size; attr->nres.alloc_size = cpu_to_le64(ntfs_up_cluster(sbi, nsize)); err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0, clst, NULL, 0, &alen, 0, NULL); if (err) goto out5; err = run_pack(&ni->file.run, 0, clst, Add2Ptr(attr, SIZEOF_NONRESIDENT), t16, &vcn); if (err < 0) goto out5; if (vcn != clst) { err = -EINVAL; goto out5; } asize = SIZEOF_NONRESIDENT + ALIGN(err, 8); } else { attr->res.data_off = SIZEOF_RESIDENT_LE; attr->res.data_size = cpu_to_le32(nsize); memcpy(Add2Ptr(attr, SIZEOF_RESIDENT), rp, nsize); nsize = 0; } /* Size of symlink equals the length of input string. */ inode->i_size = size; attr->size = cpu_to_le32(asize); err = ntfs_insert_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); if (err) goto out5; rp_inserted = true; } attr = Add2Ptr(attr, asize); attr->type = ATTR_END; rec->used = cpu_to_le32(PtrOffset(rec, attr) + 8); rec->next_attr_id = cpu_to_le16(aid); /* Step 2: Add new name in index. */ err = indx_insert_entry(&dir_ni->dir, dir_ni, new_de, sbi, fnd, 0); if (err) goto out6; /* Unlock parent directory before ntfs_init_acl. */ ni_unlock(dir_ni); inode->i_generation = le16_to_cpu(rec->seq); dir->i_mtime = dir->i_ctime = inode->i_atime; if (S_ISDIR(mode)) { inode->i_op = &ntfs_dir_inode_operations; inode->i_fop = &ntfs_dir_operations; } else if (S_ISLNK(mode)) { inode->i_op = &ntfs_link_inode_operations; inode->i_fop = NULL; inode->i_mapping->a_ops = &ntfs_aops; inode->i_size = size; inode_nohighmem(inode); } else if (S_ISREG(mode)) { inode->i_op = &ntfs_file_inode_operations; inode->i_fop = &ntfs_file_operations; inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr : &ntfs_aops; init_rwsem(&ni->file.run_lock); } else { inode->i_op = &ntfs_special_inode_operations; init_special_inode(inode, mode, dev); } #ifdef CONFIG_NTFS3_FS_POSIX_ACL if (!S_ISLNK(mode) && (sb->s_flags & SB_POSIXACL)) { err = ntfs_init_acl(mnt_userns, inode, dir); if (err) goto out7; } else #endif { inode->i_flags |= S_NOSEC; } /* Write non resident data. */ if (nsize) { err = ntfs_sb_write_run(sbi, &ni->file.run, 0, rp, nsize, 0); if (err) goto out7; } /* * Call 'd_instantiate' after inode->i_op is set * but before finish_open. */ d_instantiate(dentry, inode); ntfs_save_wsl_perm(inode); mark_inode_dirty(dir); mark_inode_dirty(inode); /* Normal exit. */ goto out2; out7: /* Undo 'indx_insert_entry'. */ ni_lock_dir(dir_ni); indx_delete_entry(&dir_ni->dir, dir_ni, new_de + 1, le16_to_cpu(new_de->key_size), sbi); /* ni_unlock(dir_ni); will be called later. */ out6: if (rp_inserted) ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); out5: if (!S_ISDIR(mode)) run_deallocate(sbi, &ni->file.run, false); out4: clear_rec_inuse(rec); clear_nlink(inode); ni->mi.dirty = false; discard_new_inode(inode); out3: ntfs_mark_rec_free(sbi, ino); out2: __putname(new_de); kfree(rp); out1: if (err) { ni_unlock(dir_ni); return ERR_PTR(err); } unlock_new_inode(inode); return inode; } int ntfs_link_inode(struct inode *inode, struct dentry *dentry) { int err; struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; struct NTFS_DE *de; struct ATTR_FILE_NAME *de_name; /* Allocate PATH_MAX bytes. */ de = __getname(); if (!de) return -ENOMEM; /* Mark rw ntfs as dirty. It will be cleared at umount. */ ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); /* Construct 'de'. */ err = fill_name_de(sbi, de, &dentry->d_name, NULL); if (err) goto out; de_name = (struct ATTR_FILE_NAME *)(de + 1); /* Fill duplicate info. */ de_name->dup.cr_time = de_name->dup.m_time = de_name->dup.c_time = de_name->dup.a_time = kernel2nt(&inode->i_ctime); de_name->dup.alloc_size = de_name->dup.data_size = cpu_to_le64(inode->i_size); de_name->dup.fa = ni->std_fa; de_name->dup.ea_size = de_name->dup.reparse = 0; err = ni_add_name(ntfs_i(d_inode(dentry->d_parent)), ni, de); out: __putname(de); return err; } /* * ntfs_unlink_inode * * inode_operations::unlink * inode_operations::rmdir */ int ntfs_unlink_inode(struct inode *dir, const struct dentry *dentry) { int err; struct ntfs_sb_info *sbi = dir->i_sb->s_fs_info; struct inode *inode = d_inode(dentry); struct ntfs_inode *ni = ntfs_i(inode); struct ntfs_inode *dir_ni = ntfs_i(dir); struct NTFS_DE *de, *de2 = NULL; int undo_remove; if (ntfs_is_meta_file(sbi, ni->mi.rno)) return -EINVAL; /* Allocate PATH_MAX bytes. */ de = __getname(); if (!de) return -ENOMEM; ni_lock(ni); if (S_ISDIR(inode->i_mode) && !dir_is_empty(inode)) { err = -ENOTEMPTY; goto out; } err = fill_name_de(sbi, de, &dentry->d_name, NULL); if (err < 0) goto out; undo_remove = 0; err = ni_remove_name(dir_ni, ni, de, &de2, &undo_remove); if (!err) { drop_nlink(inode); dir->i_mtime = dir->i_ctime = current_time(dir); mark_inode_dirty(dir); inode->i_ctime = dir->i_ctime; if (inode->i_nlink) mark_inode_dirty(inode); } else if (!ni_remove_name_undo(dir_ni, ni, de, de2, undo_remove)) { make_bad_inode(inode); ntfs_inode_err(inode, "failed to undo unlink"); ntfs_set_state(sbi, NTFS_DIRTY_ERROR); } else { if (ni_is_dirty(dir)) mark_inode_dirty(dir); if (ni_is_dirty(inode)) mark_inode_dirty(inode); } out: ni_unlock(ni); __putname(de); return err; } void ntfs_evict_inode(struct inode *inode) { truncate_inode_pages_final(&inode->i_data); if (inode->i_nlink) _ni_write_inode(inode, inode_needs_sync(inode)); invalidate_inode_buffers(inode); clear_inode(inode); ni_clear(ntfs_i(inode)); } static noinline int ntfs_readlink_hlp(struct inode *inode, char *buffer, int buflen) { int i, err = -EINVAL; struct ntfs_inode *ni = ntfs_i(inode); struct super_block *sb = inode->i_sb; struct ntfs_sb_info *sbi = sb->s_fs_info; u64 size; u16 ulen = 0; void *to_free = NULL; struct REPARSE_DATA_BUFFER *rp; const __le16 *uname; struct ATTRIB *attr; /* Reparse data present. Try to parse it. */ static_assert(!offsetof(struct REPARSE_DATA_BUFFER, ReparseTag)); static_assert(sizeof(u32) == sizeof(rp->ReparseTag)); *buffer = 0; attr = ni_find_attr(ni, NULL, NULL, ATTR_REPARSE, NULL, 0, NULL, NULL); if (!attr) goto out; if (!attr->non_res) { rp = resident_data_ex(attr, sizeof(struct REPARSE_DATA_BUFFER)); if (!rp) goto out; size = le32_to_cpu(attr->res.data_size); } else { size = le64_to_cpu(attr->nres.data_size); rp = NULL; } if (size > sbi->reparse.max_size || size <= sizeof(u32)) goto out; if (!rp) { rp = kmalloc(size, GFP_NOFS); if (!rp) { err = -ENOMEM; goto out; } to_free = rp; /* Read into temporal buffer. */ err = ntfs_read_run_nb(sbi, &ni->file.run, 0, rp, size, NULL); if (err) goto out; } /* Microsoft Tag. */ switch (rp->ReparseTag) { case IO_REPARSE_TAG_MOUNT_POINT: /* Mount points and junctions. */ /* Can we use 'Rp->MountPointReparseBuffer.PrintNameLength'? */ if (size <= offsetof(struct REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer)) goto out; uname = Add2Ptr(rp, offsetof(struct REPARSE_DATA_BUFFER, MountPointReparseBuffer.PathBuffer) + le16_to_cpu(rp->MountPointReparseBuffer .PrintNameOffset)); ulen = le16_to_cpu(rp->MountPointReparseBuffer.PrintNameLength); break; case IO_REPARSE_TAG_SYMLINK: /* FolderSymbolicLink */ /* Can we use 'Rp->SymbolicLinkReparseBuffer.PrintNameLength'? */ if (size <= offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer.PathBuffer)) goto out; uname = Add2Ptr( rp, offsetof(struct REPARSE_DATA_BUFFER, SymbolicLinkReparseBuffer.PathBuffer) + le16_to_cpu(rp->SymbolicLinkReparseBuffer .PrintNameOffset)); ulen = le16_to_cpu( rp->SymbolicLinkReparseBuffer.PrintNameLength); break; case IO_REPARSE_TAG_CLOUD: case IO_REPARSE_TAG_CLOUD_1: case IO_REPARSE_TAG_CLOUD_2: case IO_REPARSE_TAG_CLOUD_3: case IO_REPARSE_TAG_CLOUD_4: case IO_REPARSE_TAG_CLOUD_5: case IO_REPARSE_TAG_CLOUD_6: case IO_REPARSE_TAG_CLOUD_7: case IO_REPARSE_TAG_CLOUD_8: case IO_REPARSE_TAG_CLOUD_9: case IO_REPARSE_TAG_CLOUD_A: case IO_REPARSE_TAG_CLOUD_B: case IO_REPARSE_TAG_CLOUD_C: case IO_REPARSE_TAG_CLOUD_D: case IO_REPARSE_TAG_CLOUD_E: case IO_REPARSE_TAG_CLOUD_F: err = sizeof("OneDrive") - 1; if (err > buflen) err = buflen; memcpy(buffer, "OneDrive", err); goto out; default: if (IsReparseTagMicrosoft(rp->ReparseTag)) { /* Unknown Microsoft Tag. */ goto out; } if (!IsReparseTagNameSurrogate(rp->ReparseTag) || size <= sizeof(struct REPARSE_POINT)) { goto out; } /* Users tag. */ uname = Add2Ptr(rp, sizeof(struct REPARSE_POINT)); ulen = le16_to_cpu(rp->ReparseDataLength) - sizeof(struct REPARSE_POINT); } /* Convert nlen from bytes to UNICODE chars. */ ulen >>= 1; /* Check that name is available. */ if (!ulen || uname + ulen > (__le16 *)Add2Ptr(rp, size)) goto out; /* If name is already zero terminated then truncate it now. */ if (!uname[ulen - 1]) ulen -= 1; err = ntfs_utf16_to_nls(sbi, uname, ulen, buffer, buflen); if (err < 0) goto out; /* Translate Windows '\' into Linux '/'. */ for (i = 0; i < err; i++) { if (buffer[i] == '\\') buffer[i] = '/'; } /* Always set last zero. */ buffer[err] = 0; out: kfree(to_free); return err; } static const char *ntfs_get_link(struct dentry *de, struct inode *inode, struct delayed_call *done) { int err; char *ret; if (!de) return ERR_PTR(-ECHILD); ret = kmalloc(PAGE_SIZE, GFP_NOFS); if (!ret) return ERR_PTR(-ENOMEM); err = ntfs_readlink_hlp(inode, ret, PAGE_SIZE); if (err < 0) { kfree(ret); return ERR_PTR(err); } set_delayed_call(done, kfree_link, ret); return ret; } // clang-format off const struct inode_operations ntfs_link_inode_operations = { .get_link = ntfs_get_link, .setattr = ntfs3_setattr, .listxattr = ntfs_listxattr, .permission = ntfs_permission, }; const struct address_space_operations ntfs_aops = { .readpage = ntfs_readpage, .readahead = ntfs_readahead, .writepage = ntfs_writepage, .writepages = ntfs_writepages, .write_begin = ntfs_write_begin, .write_end = ntfs_write_end, .direct_IO = ntfs_direct_IO, .bmap = ntfs_bmap, .set_page_dirty = __set_page_dirty_buffers, }; const struct address_space_operations ntfs_aops_cmpr = { .readpage = ntfs_readpage, .readahead = ntfs_readahead, }; // clang-format on |
3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | // SPDX-License-Identifier: GPL-2.0-only /* * MAC commands interface * * Copyright 2007-2012 Siemens AG * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/cfg802154.h> #include <net/mac802154.h> #include "ieee802154_i.h" #include "driver-ops.h" static int mac802154_mlme_start_req(struct net_device *dev, struct ieee802154_addr *addr, u8 channel, u8 page, u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, u8 coord_realign) { struct ieee802154_llsec_params params; int changed = 0; ASSERT_RTNL(); BUG_ON(addr->mode != IEEE802154_ADDR_SHORT); dev->ieee802154_ptr->pan_id = addr->pan_id; dev->ieee802154_ptr->short_addr = addr->short_addr; mac802154_dev_set_page_channel(dev, page, channel); params.pan_id = addr->pan_id; changed |= IEEE802154_LLSEC_PARAM_PAN_ID; params.hwaddr = ieee802154_devaddr_from_raw(dev->dev_addr); changed |= IEEE802154_LLSEC_PARAM_HWADDR; params.coord_hwaddr = params.hwaddr; changed |= IEEE802154_LLSEC_PARAM_COORD_HWADDR; params.coord_shortaddr = addr->short_addr; changed |= IEEE802154_LLSEC_PARAM_COORD_SHORTADDR; return mac802154_set_params(dev, ¶ms, changed); } static int mac802154_set_mac_params(struct net_device *dev, const struct ieee802154_mac_params *params) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct ieee802154_local *local = sdata->local; struct wpan_dev *wpan_dev = &sdata->wpan_dev; int ret; ASSERT_RTNL(); /* PHY */ wpan_dev->wpan_phy->transmit_power = params->transmit_power; wpan_dev->wpan_phy->cca = params->cca; wpan_dev->wpan_phy->cca_ed_level = params->cca_ed_level; /* MAC */ wpan_dev->min_be = params->min_be; wpan_dev->max_be = params->max_be; wpan_dev->csma_retries = params->csma_retries; wpan_dev->frame_retries = params->frame_retries; wpan_dev->lbt = params->lbt; if (local->hw.phy->flags & WPAN_PHY_FLAG_TXPOWER) { ret = drv_set_tx_power(local, params->transmit_power); if (ret < 0) return ret; } if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_MODE) { ret = drv_set_cca_mode(local, ¶ms->cca); if (ret < 0) return ret; } if (local->hw.phy->flags & WPAN_PHY_FLAG_CCA_ED_LEVEL) { ret = drv_set_cca_ed_level(local, params->cca_ed_level); if (ret < 0) return ret; } return 0; } static void mac802154_get_mac_params(struct net_device *dev, struct ieee802154_mac_params *params) { struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); struct wpan_dev *wpan_dev = &sdata->wpan_dev; ASSERT_RTNL(); /* PHY */ params->transmit_power = wpan_dev->wpan_phy->transmit_power; params->cca = wpan_dev->wpan_phy->cca; params->cca_ed_level = wpan_dev->wpan_phy->cca_ed_level; /* MAC */ params->min_be = wpan_dev->min_be; params->max_be = wpan_dev->max_be; params->csma_retries = wpan_dev->csma_retries; params->frame_retries = wpan_dev->frame_retries; params->lbt = wpan_dev->lbt; } static const struct ieee802154_llsec_ops mac802154_llsec_ops = { .get_params = mac802154_get_params, .set_params = mac802154_set_params, .add_key = mac802154_add_key, .del_key = mac802154_del_key, .add_dev = mac802154_add_dev, .del_dev = mac802154_del_dev, .add_devkey = mac802154_add_devkey, .del_devkey = mac802154_del_devkey, .add_seclevel = mac802154_add_seclevel, .del_seclevel = mac802154_del_seclevel, .lock_table = mac802154_lock_table, .get_table = mac802154_get_table, .unlock_table = mac802154_unlock_table, }; struct ieee802154_mlme_ops mac802154_mlme_wpan = { .start_req = mac802154_mlme_start_req, .llsec = &mac802154_llsec_ops, .set_mac_params = mac802154_set_mac_params, .get_mac_params = mac802154_get_mac_params, }; |
1 7 5 2 5 1 1 1 1 3 3 3 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 | // SPDX-License-Identifier: GPL-2.0-only /* * Module for modifying the secmark field of the skb, for use by * security subsystems. * * Based on the nfmark match by: * (C) 1999-2001 Marc Boucher <marc@mbsi.ca> * * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/security.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_SECMARK.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Morris <jmorris@redhat.com>"); MODULE_DESCRIPTION("Xtables: packet security mark modification"); MODULE_ALIAS("ipt_SECMARK"); MODULE_ALIAS("ip6t_SECMARK"); static u8 mode; static unsigned int secmark_tg(struct sk_buff *skb, const struct xt_secmark_target_info_v1 *info) { u32 secmark = 0; switch (mode) { case SECMARK_MODE_SEL: secmark = info->secid; break; default: BUG(); } skb->secmark = secmark; return XT_CONTINUE; } static int checkentry_lsm(struct xt_secmark_target_info_v1 *info) { int err; info->secctx[SECMARK_SECCTX_MAX - 1] = '\0'; info->secid = 0; err = security_secctx_to_secid(info->secctx, strlen(info->secctx), &info->secid); if (err) { if (err == -EINVAL) pr_info_ratelimited("invalid security context \'%s\'\n", info->secctx); return err; } if (!info->secid) { pr_info_ratelimited("unable to map security context \'%s\'\n", info->secctx); return -ENOENT; } err = security_secmark_relabel_packet(info->secid); if (err) { pr_info_ratelimited("unable to obtain relabeling permission\n"); return err; } security_secmark_refcount_inc(); return 0; } static int secmark_tg_check(const char *table, struct xt_secmark_target_info_v1 *info) { int err; if (strcmp(table, "mangle") != 0 && strcmp(table, "security") != 0) { pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n", table); return -EINVAL; } if (mode && mode != info->mode) { pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n", mode, info->mode); return -EINVAL; } switch (info->mode) { case SECMARK_MODE_SEL: break; default: pr_info_ratelimited("invalid mode: %hu\n", info->mode); return -EINVAL; } err = checkentry_lsm(info); if (err) return err; if (!mode) mode = info->mode; return 0; } static void secmark_tg_destroy(const struct xt_tgdtor_param *par) { switch (mode) { case SECMARK_MODE_SEL: security_secmark_refcount_dec(); } } static int secmark_tg_check_v0(const struct xt_tgchk_param *par) { struct xt_secmark_target_info *info = par->targinfo; struct xt_secmark_target_info_v1 newinfo = { .mode = info->mode, }; int ret; memcpy(newinfo.secctx, info->secctx, SECMARK_SECCTX_MAX); ret = secmark_tg_check(par->table, &newinfo); info->secid = newinfo.secid; return ret; } static unsigned int secmark_tg_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_secmark_target_info *info = par->targinfo; struct xt_secmark_target_info_v1 newinfo = { .secid = info->secid, }; return secmark_tg(skb, &newinfo); } static int secmark_tg_check_v1(const struct xt_tgchk_param *par) { return secmark_tg_check(par->table, par->targinfo); } static unsigned int secmark_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) { return secmark_tg(skb, par->targinfo); } static struct xt_target secmark_tg_reg[] __read_mostly = { { .name = "SECMARK", .revision = 0, .family = NFPROTO_IPV4, .checkentry = secmark_tg_check_v0, .destroy = secmark_tg_destroy, .target = secmark_tg_v0, .targetsize = sizeof(struct xt_secmark_target_info), .me = THIS_MODULE, }, { .name = "SECMARK", .revision = 1, .family = NFPROTO_IPV4, .checkentry = secmark_tg_check_v1, .destroy = secmark_tg_destroy, .target = secmark_tg_v1, .targetsize = sizeof(struct xt_secmark_target_info_v1), .usersize = offsetof(struct xt_secmark_target_info_v1, secid), .me = THIS_MODULE, }, #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) { .name = "SECMARK", .revision = 0, .family = NFPROTO_IPV6, .checkentry = secmark_tg_check_v0, .destroy = secmark_tg_destroy, .target = secmark_tg_v0, .targetsize = sizeof(struct xt_secmark_target_info), .me = THIS_MODULE, }, { .name = "SECMARK", .revision = 1, .family = NFPROTO_IPV6, .checkentry = secmark_tg_check_v1, .destroy = secmark_tg_destroy, .target = secmark_tg_v1, .targetsize = sizeof(struct xt_secmark_target_info_v1), .usersize = offsetof(struct xt_secmark_target_info_v1, secid), .me = THIS_MODULE, }, #endif }; static int __init secmark_tg_init(void) { return xt_register_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg)); } static void __exit secmark_tg_exit(void) { xt_unregister_targets(secmark_tg_reg, ARRAY_SIZE(secmark_tg_reg)); } module_init(secmark_tg_init); module_exit(secmark_tg_exit); |
7876 122 122 122 122 122 127 119 132 122 122 122 122 122 248 248 4 4 20 20 7804 7770 7775 5918 5910 5916 5918 5905 5840 4851 4754 548 4 81 548 545 81 543 587 10 4845 4854 4831 4750 4853 4827 4756 6235 7824 7388 7384 4856 4769 3169 4850 4845 67 6 64 5896 5890 5906 5895 5895 5894 5893 5897 8 2 1 6 5899 5905 5906 1 5896 9 71 5817 6707 6712 6724 6698 6298 6700 6140 6141 5819 5818 1 248 1 9 28 239 240 241 168 168 90 94 95 72 1 71 71 12 12 1 11 11 5 5 1 1 3 3 3 3 7683 7628 7678 7681 7688 7684 525 528 5901 5901 9 5894 5887 4842 4841 4847 4843 4847 4841 4845 4853 4841 4854 4854 4844 10 4845 4738 207 209 208 78 142 5222 3591 4794 21 21 21 2 18 20 20 2 18 20 3 11 9 11 11 3 10 10 1 11 8 22 21 22 11 3 8 11 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 | // SPDX-License-Identifier: GPL-2.0-only /* * fs/kernfs/dir.c - kernfs directory implementation * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> */ #include <linux/sched.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/hash.h> #include "kernfs-internal.h" DECLARE_RWSEM(kernfs_rwsem); static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ /* * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to * call pr_cont() while holding rename_lock. Because sometimes pr_cont() * will perform wakeups when releasing console_sem. Holding rename_lock * will introduce deadlock if the scheduler reads the kernfs_name in the * wakeup path. */ static DEFINE_SPINLOCK(kernfs_pr_cont_lock); static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) static bool kernfs_active(struct kernfs_node *kn) { lockdep_assert_held(&kernfs_rwsem); return atomic_read(&kn->active) >= 0; } static bool kernfs_lockdep(struct kernfs_node *kn) { #ifdef CONFIG_DEBUG_LOCK_ALLOC return kn->flags & KERNFS_LOCKDEP; #else return false; #endif } static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) { if (!kn) return strlcpy(buf, "(null)", buflen); return strlcpy(buf, kn->parent ? kn->name : "/", buflen); } /* kernfs_node_depth - compute depth from @from to @to */ static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to) { size_t depth = 0; while (to->parent && to != from) { depth++; to = to->parent; } return depth; } static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a, struct kernfs_node *b) { size_t da, db; struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b); if (ra != rb) return NULL; da = kernfs_depth(ra->kn, a); db = kernfs_depth(rb->kn, b); while (da > db) { a = a->parent; da--; } while (db > da) { b = b->parent; db--; } /* worst case b and a will be the same at root */ while (b != a) { b = b->parent; a = a->parent; } return a; } /** * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to, * where kn_from is treated as root of the path. * @kn_from: kernfs node which should be treated as root for the path * @kn_to: kernfs node to which path is needed * @buf: buffer to copy the path into * @buflen: size of @buf * * We need to handle couple of scenarios here: * [1] when @kn_from is an ancestor of @kn_to at some level * kn_from: /n1/n2/n3 * kn_to: /n1/n2/n3/n4/n5 * result: /n4/n5 * * [2] when @kn_from is on a different hierarchy and we need to find common * ancestor between @kn_from and @kn_to. * kn_from: /n1/n2/n3/n4 * kn_to: /n1/n2/n5 * result: /../../n5 * OR * kn_from: /n1/n2/n3/n4/n5 [depth=5] * kn_to: /n1/n2/n3 [depth=3] * result: /../.. * * [3] when @kn_to is NULL result will be "(null)" * * Returns the length of the full path. If the full length is equal to or * greater than @buflen, @buf contains the truncated path with the trailing * '\0'. On error, -errno is returned. */ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, struct kernfs_node *kn_from, char *buf, size_t buflen) { struct kernfs_node *kn, *common; const char parent_str[] = "/.."; size_t depth_from, depth_to, len = 0; int i, j; if (!kn_to) return strlcpy(buf, "(null)", buflen); if (!kn_from) kn_from = kernfs_root(kn_to)->kn; if (kn_from == kn_to) return strlcpy(buf, "/", buflen); if (!buf) return -EINVAL; common = kernfs_common_ancestor(kn_from, kn_to); if (WARN_ON(!common)) return -EINVAL; depth_to = kernfs_depth(common, kn_to); depth_from = kernfs_depth(common, kn_from); buf[0] = '\0'; for (i = 0; i < depth_from; i++) len += strlcpy(buf + len, parent_str, len < buflen ? buflen - len : 0); /* Calculate how many bytes we need for the rest */ for (i = depth_to - 1; i >= 0; i--) { for (kn = kn_to, j = 0; j < i; j++) kn = kn->parent; len += strlcpy(buf + len, "/", len < buflen ? buflen - len : 0); len += strlcpy(buf + len, kn->name, len < buflen ? buflen - len : 0); } return len; } /** * kernfs_name - obtain the name of a given node * @kn: kernfs_node of interest * @buf: buffer to copy @kn's name into * @buflen: size of @buf * * Copies the name of @kn into @buf of @buflen bytes. The behavior is * similar to strlcpy(). It returns the length of @kn's name and if @buf * isn't long enough, it's filled upto @buflen-1 and nul terminated. * * Fills buffer with "(null)" if @kn is NULL. * * This function can be called from any context. */ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) { unsigned long flags; int ret; spin_lock_irqsave(&kernfs_rename_lock, flags); ret = kernfs_name_locked(kn, buf, buflen); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return ret; } /** * kernfs_path_from_node - build path of node @to relative to @from. * @from: parent kernfs_node relative to which we need to build the path * @to: kernfs_node of interest * @buf: buffer to copy @to's path into * @buflen: size of @buf * * Builds @to's path relative to @from in @buf. @from and @to must * be on the same kernfs-root. If @from is not parent of @to, then a relative * path (which includes '..'s) as needed to reach from @from to @to is * returned. * * Returns the length of the full path. If the full length is equal to or * greater than @buflen, @buf contains the truncated path with the trailing * '\0'. On error, -errno is returned. */ int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from, char *buf, size_t buflen) { unsigned long flags; int ret; spin_lock_irqsave(&kernfs_rename_lock, flags); ret = kernfs_path_from_node_locked(to, from, buf, buflen); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return ret; } EXPORT_SYMBOL_GPL(kernfs_path_from_node); /** * pr_cont_kernfs_name - pr_cont name of a kernfs_node * @kn: kernfs_node of interest * * This function can be called from any context. */ void pr_cont_kernfs_name(struct kernfs_node *kn) { unsigned long flags; spin_lock_irqsave(&kernfs_pr_cont_lock, flags); kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); pr_cont("%s", kernfs_pr_cont_buf); spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); } /** * pr_cont_kernfs_path - pr_cont path of a kernfs_node * @kn: kernfs_node of interest * * This function can be called from any context. */ void pr_cont_kernfs_path(struct kernfs_node *kn) { unsigned long flags; int sz; spin_lock_irqsave(&kernfs_pr_cont_lock, flags); sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); if (sz < 0) { pr_cont("(error)"); goto out; } if (sz >= sizeof(kernfs_pr_cont_buf)) { pr_cont("(name too long)"); goto out; } pr_cont("%s", kernfs_pr_cont_buf); out: spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); } /** * kernfs_get_parent - determine the parent node and pin it * @kn: kernfs_node of interest * * Determines @kn's parent, pins and returns it. This function can be * called from any context. */ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) { struct kernfs_node *parent; unsigned long flags; spin_lock_irqsave(&kernfs_rename_lock, flags); parent = kn->parent; kernfs_get(parent); spin_unlock_irqrestore(&kernfs_rename_lock, flags); return parent; } /** * kernfs_name_hash * @name: Null terminated string to hash * @ns: Namespace tag to hash * * Returns 31 bit hash of ns + name (so it fits in an off_t ) */ static unsigned int kernfs_name_hash(const char *name, const void *ns) { unsigned long hash = init_name_hash(ns); unsigned int len = strlen(name); while (len--) hash = partial_name_hash(*name++, hash); hash = end_name_hash(hash); hash &= 0x7fffffffU; /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ if (hash < 2) hash += 2; if (hash >= INT_MAX) hash = INT_MAX - 1; return hash; } static int kernfs_name_compare(unsigned int hash, const char *name, const void *ns, const struct kernfs_node *kn) { if (hash < kn->hash) return -1; if (hash > kn->hash) return 1; if (ns < kn->ns) return -1; if (ns > kn->ns) return 1; return strcmp(name, kn->name); } static int kernfs_sd_compare(const struct kernfs_node *left, const struct kernfs_node *right) { return kernfs_name_compare(left->hash, left->name, left->ns, right); } /** * kernfs_link_sibling - link kernfs_node into sibling rbtree * @kn: kernfs_node of interest * * Link @kn into its sibling rbtree which starts from * @kn->parent->dir.children. * * Locking: * kernfs_rwsem held exclusive * * RETURNS: * 0 on susccess -EEXIST on failure. */ static int kernfs_link_sibling(struct kernfs_node *kn) { struct rb_node **node = &kn->parent->dir.children.rb_node; struct rb_node *parent = NULL; while (*node) { struct kernfs_node *pos; int result; pos = rb_to_kn(*node); parent = *node; result = kernfs_sd_compare(kn, pos); if (result < 0) node = &pos->rb.rb_left; else if (result > 0) node = &pos->rb.rb_right; else return -EEXIST; } /* add new node and rebalance the tree */ rb_link_node(&kn->rb, parent, node); rb_insert_color(&kn->rb, &kn->parent->dir.children); /* successfully added, account subdir number */ if (kernfs_type(kn) == KERNFS_DIR) kn->parent->dir.subdirs++; kernfs_inc_rev(kn->parent); return 0; } /** * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree * @kn: kernfs_node of interest * * Try to unlink @kn from its sibling rbtree which starts from * kn->parent->dir.children. Returns %true if @kn was actually * removed, %false if @kn wasn't on the rbtree. * * Locking: * kernfs_rwsem held exclusive */ static bool kernfs_unlink_sibling(struct kernfs_node *kn) { if (RB_EMPTY_NODE(&kn->rb)) return false; if (kernfs_type(kn) == KERNFS_DIR) kn->parent->dir.subdirs--; kernfs_inc_rev(kn->parent); rb_erase(&kn->rb, &kn->parent->dir.children); RB_CLEAR_NODE(&kn->rb); return true; } /** * kernfs_get_active - get an active reference to kernfs_node * @kn: kernfs_node to get an active reference to * * Get an active reference of @kn. This function is noop if @kn * is NULL. * * RETURNS: * Pointer to @kn on success, NULL on failure. */ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) { if (unlikely(!kn)) return NULL; if (!atomic_inc_unless_negative(&kn->active)) return NULL; if (kernfs_lockdep(kn)) rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); return kn; } /** * kernfs_put_active - put an active reference to kernfs_node * @kn: kernfs_node to put an active reference to * * Put an active reference to @kn. This function is noop if @kn * is NULL. */ void kernfs_put_active(struct kernfs_node *kn) { int v; if (unlikely(!kn)) return; if (kernfs_lockdep(kn)) rwsem_release(&kn->dep_map, _RET_IP_); v = atomic_dec_return(&kn->active); if (likely(v != KN_DEACTIVATED_BIAS)) return; wake_up_all(&kernfs_root(kn)->deactivate_waitq); } /** * kernfs_drain - drain kernfs_node * @kn: kernfs_node to drain * * Drain existing usages and nuke all existing mmaps of @kn. Mutiple * removers may invoke this function concurrently on @kn and all will * return after draining is complete. */ static void kernfs_drain(struct kernfs_node *kn) __releases(&kernfs_rwsem) __acquires(&kernfs_rwsem) { struct kernfs_root *root = kernfs_root(kn); lockdep_assert_held_write(&kernfs_rwsem); WARN_ON_ONCE(kernfs_active(kn)); up_write(&kernfs_rwsem); if (kernfs_lockdep(kn)) { rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) lock_contended(&kn->dep_map, _RET_IP_); } /* but everyone should wait for draining */ wait_event(root->deactivate_waitq, atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); if (kernfs_lockdep(kn)) { lock_acquired(&kn->dep_map, _RET_IP_); rwsem_release(&kn->dep_map, _RET_IP_); } kernfs_drain_open_files(kn); down_write(&kernfs_rwsem); } /** * kernfs_get - get a reference count on a kernfs_node * @kn: the target kernfs_node */ void kernfs_get(struct kernfs_node *kn) { if (kn) { WARN_ON(!atomic_read(&kn->count)); atomic_inc(&kn->count); } } EXPORT_SYMBOL_GPL(kernfs_get); /** * kernfs_put - put a reference count on a kernfs_node * @kn: the target kernfs_node * * Put a reference count of @kn and destroy it if it reached zero. */ void kernfs_put(struct kernfs_node *kn) { struct kernfs_node *parent; struct kernfs_root *root; if (!kn || !atomic_dec_and_test(&kn->count)) return; root = kernfs_root(kn); repeat: /* * Moving/renaming is always done while holding reference. * kn->parent won't change beneath us. */ parent = kn->parent; WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, "kernfs_put: %s/%s: released with incorrect active_ref %d\n", parent ? parent->name : "", kn->name, atomic_read(&kn->active)); if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); kfree_const(kn->name); if (kn->iattr) { simple_xattrs_free(&kn->iattr->xattrs); kmem_cache_free(kernfs_iattrs_cache, kn->iattr); } spin_lock(&kernfs_idr_lock); idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); spin_unlock(&kernfs_idr_lock); kmem_cache_free(kernfs_node_cache, kn); kn = parent; if (kn) { if (atomic_dec_and_test(&kn->count)) goto repeat; } else { /* just released the root kn, free @root too */ idr_destroy(&root->ino_idr); kfree(root); } } EXPORT_SYMBOL_GPL(kernfs_put); /** * kernfs_node_from_dentry - determine kernfs_node associated with a dentry * @dentry: the dentry in question * * Return the kernfs_node associated with @dentry. If @dentry is not a * kernfs one, %NULL is returned. * * While the returned kernfs_node will stay accessible as long as @dentry * is accessible, the returned node can be in any state and the caller is * fully responsible for determining what's accessible. */ struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) { if (dentry->d_sb->s_op == &kernfs_sops) return kernfs_dentry_node(dentry); return NULL; } static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; u32 id_highbits; int ret; name = kstrdup_const(name, GFP_KERNEL); if (!name) return NULL; kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) goto err_out1; idr_preload(GFP_KERNEL); spin_lock(&kernfs_idr_lock); ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); if (ret >= 0 && ret < root->last_id_lowbits) root->id_highbits++; id_highbits = root->id_highbits; root->last_id_lowbits = ret; spin_unlock(&kernfs_idr_lock); idr_preload_end(); if (ret < 0) goto err_out2; kn->id = (u64)id_highbits << 32 | ret; atomic_set(&kn->count, 1); atomic_set(&kn->active, KN_DEACTIVATED_BIAS); RB_CLEAR_NODE(&kn->rb); kn->name = name; kn->mode = mode; kn->flags = flags; if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, .ia_uid = uid, .ia_gid = gid, }; ret = __kernfs_setattr(kn, &iattr); if (ret < 0) goto err_out3; } if (parent) { ret = security_kernfs_init_security(parent, kn); if (ret) goto err_out3; } return kn; err_out3: spin_lock(&kernfs_idr_lock); idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); spin_unlock(&kernfs_idr_lock); err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: kfree_const(name); return NULL; } struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, unsigned flags) { struct kernfs_node *kn; if (parent->mode & S_ISGID) { /* this code block imitates inode_init_owner() for * kernfs */ if (parent->iattr) gid = parent->iattr->ia_gid; if (flags & KERNFS_DIR) mode |= S_ISGID; } kn = __kernfs_new_node(kernfs_root(parent), parent, name, mode, uid, gid, flags); if (kn) { kernfs_get(parent); kn->parent = parent; } return kn; } /* * kernfs_find_and_get_node_by_id - get kernfs_node from node id * @root: the kernfs root * @id: the target node id * * @id's lower 32bits encode ino and upper gen. If the gen portion is * zero, all generations are matched. * * RETURNS: * NULL on failure. Return a kernfs node with reference counter incremented */ struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, u64 id) { struct kernfs_node *kn; ino_t ino = kernfs_id_ino(id); u32 gen = kernfs_id_gen(id); spin_lock(&kernfs_idr_lock); kn = idr_find(&root->ino_idr, (u32)ino); if (!kn) goto err_unlock; if (sizeof(ino_t) >= sizeof(u64)) { /* we looked up with the low 32bits, compare the whole */ if (kernfs_ino(kn) != ino) goto err_unlock; } else { /* 0 matches all generations */ if (unlikely(gen && kernfs_gen(kn) != gen)) goto err_unlock; } /* * ACTIVATED is protected with kernfs_mutex but it was clear when * @kn was added to idr and we just wanna see it set. No need to * grab kernfs_mutex. */ if (unlikely(!(kn->flags & KERNFS_ACTIVATED) || !atomic_inc_not_zero(&kn->count))) goto err_unlock; spin_unlock(&kernfs_idr_lock); return kn; err_unlock: spin_unlock(&kernfs_idr_lock); return NULL; } /** * kernfs_add_one - add kernfs_node to parent without warning * @kn: kernfs_node to be added * * The caller must already have initialized @kn->parent. This * function increments nlink of the parent's inode if @kn is a * directory and link into the children list of the parent. * * RETURNS: * 0 on success, -EEXIST if entry with the given name already * exists. */ int kernfs_add_one(struct kernfs_node *kn) { struct kernfs_node *parent = kn->parent; struct kernfs_iattrs *ps_iattr; bool has_ns; int ret; down_write(&kernfs_rwsem); ret = -EINVAL; has_ns = kernfs_ns_enabled(parent); if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", has_ns ? "required" : "invalid", parent->name, kn->name)) goto out_unlock; if (kernfs_type(parent) != KERNFS_DIR) goto out_unlock; ret = -ENOENT; if (parent->flags & KERNFS_EMPTY_DIR) goto out_unlock; if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) goto out_unlock; kn->hash = kernfs_name_hash(kn->name, kn->ns); ret = kernfs_link_sibling(kn); if (ret) goto out_unlock; /* Update timestamps on the parent */ ps_iattr = parent->iattr; if (ps_iattr) { ktime_get_real_ts64(&ps_iattr->ia_ctime); ps_iattr->ia_mtime = ps_iattr->ia_ctime; } up_write(&kernfs_rwsem); /* * Activate the new node unless CREATE_DEACTIVATED is requested. * If not activated here, the kernfs user is responsible for * activating the node with kernfs_activate(). A node which hasn't * been activated is not visible to userland and its removal won't * trigger deactivation. */ if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) kernfs_activate(kn); return 0; out_unlock: up_write(&kernfs_rwsem); return ret; } /** * kernfs_find_ns - find kernfs_node with the given name * @parent: kernfs_node to search under * @name: name to look for * @ns: the namespace tag to use * * Look for kernfs_node with name @name under @parent. Returns pointer to * the found kernfs_node on success, %NULL on failure. */ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, const unsigned char *name, const void *ns) { struct rb_node *node = parent->dir.children.rb_node; bool has_ns = kernfs_ns_enabled(parent); unsigned int hash; lockdep_assert_held(&kernfs_rwsem); if (has_ns != (bool)ns) { WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", has_ns ? "required" : "invalid", parent->name, name); return NULL; } hash = kernfs_name_hash(name, ns); while (node) { struct kernfs_node *kn; int result; kn = rb_to_kn(node); result = kernfs_name_compare(hash, name, ns, kn); if (result < 0) node = node->rb_left; else if (result > 0) node = node->rb_right; else return kn; } return NULL; } static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, const unsigned char *path, const void *ns) { size_t len; char *p, *name; lockdep_assert_held_read(&kernfs_rwsem); spin_lock_irq(&kernfs_pr_cont_lock); len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf)); if (len >= sizeof(kernfs_pr_cont_buf)) { spin_unlock_irq(&kernfs_pr_cont_lock); return NULL; } p = kernfs_pr_cont_buf; while ((name = strsep(&p, "/")) && parent) { if (*name == '\0') continue; parent = kernfs_find_ns(parent, name, ns); } spin_unlock_irq(&kernfs_pr_cont_lock); return parent; } /** * kernfs_find_and_get_ns - find and get kernfs_node with the given name * @parent: kernfs_node to search under * @name: name to look for * @ns: the namespace tag to use * * Look for kernfs_node with name @name under @parent and get a reference * if found. This function may sleep and returns pointer to the found * kernfs_node on success, %NULL on failure. */ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, const char *name, const void *ns) { struct kernfs_node *kn; down_read(&kernfs_rwsem); kn = kernfs_find_ns(parent, name, ns); kernfs_get(kn); up_read(&kernfs_rwsem); return kn; } EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); /** * kernfs_walk_and_get_ns - find and get kernfs_node with the given path * @parent: kernfs_node to search under * @path: path to look for * @ns: the namespace tag to use * * Look for kernfs_node with path @path under @parent and get a reference * if found. This function may sleep and returns pointer to the found * kernfs_node on success, %NULL on failure. */ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, const char *path, const void *ns) { struct kernfs_node *kn; down_read(&kernfs_rwsem); kn = kernfs_walk_ns(parent, path, ns); kernfs_get(kn); up_read(&kernfs_rwsem); return kn; } /** * kernfs_create_root - create a new kernfs hierarchy * @scops: optional syscall operations for the hierarchy * @flags: KERNFS_ROOT_* flags * @priv: opaque data associated with the new directory * * Returns the root of the new hierarchy on success, ERR_PTR() value on * failure. */ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, unsigned int flags, void *priv) { struct kernfs_root *root; struct kernfs_node *kn; root = kzalloc(sizeof(*root), GFP_KERNEL); if (!root) return ERR_PTR(-ENOMEM); idr_init(&root->ino_idr); INIT_LIST_HEAD(&root->supers); /* * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino. * High bits generation. The starting value for both ino and * genenration is 1. Initialize upper 32bit allocation * accordingly. */ if (sizeof(ino_t) >= sizeof(u64)) root->id_highbits = 0; else root->id_highbits = 1; kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) { idr_destroy(&root->ino_idr); kfree(root); return ERR_PTR(-ENOMEM); } kn->priv = priv; kn->dir.root = root; root->syscall_ops = scops; root->flags = flags; root->kn = kn; init_waitqueue_head(&root->deactivate_waitq); if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) kernfs_activate(kn); return root; } /** * kernfs_destroy_root - destroy a kernfs hierarchy * @root: root of the hierarchy to destroy * * Destroy the hierarchy anchored at @root by removing all existing * directories and destroying @root. */ void kernfs_destroy_root(struct kernfs_root *root) { kernfs_remove(root->kn); /* will also free @root */ } /** * kernfs_create_dir_ns - create a directory * @parent: parent in which to create a new directory * @name: name of the new directory * @mode: mode of the new directory * @uid: uid of the new directory * @gid: gid of the new directory * @priv: opaque data associated with the new directory * @ns: optional namespace tag of the directory * * Returns the created node on success, ERR_PTR() value on failure. */ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, umode_t mode, kuid_t uid, kgid_t gid, void *priv, const void *ns) { struct kernfs_node *kn; int rc; /* allocate */ kn = kernfs_new_node(parent, name, mode | S_IFDIR, uid, gid, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); kn->dir.root = parent->dir.root; kn->ns = ns; kn->priv = priv; /* link in */ rc = kernfs_add_one(kn); if (!rc) return kn; kernfs_put(kn); return ERR_PTR(rc); } /** * kernfs_create_empty_dir - create an always empty directory * @parent: parent in which to create a new directory * @name: name of the new directory * * Returns the created node on success, ERR_PTR() value on failure. */ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, const char *name) { struct kernfs_node *kn; int rc; /* allocate */ kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); if (!kn) return ERR_PTR(-ENOMEM); kn->flags |= KERNFS_EMPTY_DIR; kn->dir.root = parent->dir.root; kn->ns = NULL; kn->priv = NULL; /* link in */ rc = kernfs_add_one(kn); if (!rc) return kn; kernfs_put(kn); return ERR_PTR(rc); } static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) { struct kernfs_node *kn; if (flags & LOOKUP_RCU) return -ECHILD; /* Negative hashed dentry? */ if (d_really_is_negative(dentry)) { struct kernfs_node *parent; /* If the kernfs parent node has changed discard and * proceed to ->lookup. */ down_read(&kernfs_rwsem); spin_lock(&dentry->d_lock); parent = kernfs_dentry_node(dentry->d_parent); if (parent) { if (kernfs_dir_changed(parent, dentry)) { spin_unlock(&dentry->d_lock); up_read(&kernfs_rwsem); return 0; } } spin_unlock(&dentry->d_lock); up_read(&kernfs_rwsem); /* The kernfs parent node hasn't changed, leave the * dentry negative and return success. */ return 1; } kn = kernfs_dentry_node(dentry); down_read(&kernfs_rwsem); /* The kernfs node has been deactivated */ if (!kernfs_active(kn)) goto out_bad; /* The kernfs node has been moved? */ if (kernfs_dentry_node(dentry->d_parent) != kn->parent) goto out_bad; /* The kernfs node has been renamed */ if (strcmp(dentry->d_name.name, kn->name) != 0) goto out_bad; /* The kernfs node has been moved to a different namespace */ if (kn->parent && kernfs_ns_enabled(kn->parent) && kernfs_info(dentry->d_sb)->ns != kn->ns) goto out_bad; up_read(&kernfs_rwsem); return 1; out_bad: up_read(&kernfs_rwsem); return 0; } const struct dentry_operations kernfs_dops = { .d_revalidate = kernfs_dop_revalidate, }; static struct dentry *kernfs_iop_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct kernfs_node *parent = dir->i_private; struct kernfs_node *kn; struct inode *inode = NULL; const void *ns = NULL; down_read(&kernfs_rwsem); if (kernfs_ns_enabled(parent)) ns = kernfs_info(dir->i_sb)->ns; kn = kernfs_find_ns(parent, dentry->d_name.name, ns); /* attach dentry and inode */ if (kn) { /* Inactive nodes are invisible to the VFS so don't * create a negative. */ if (!kernfs_active(kn)) { up_read(&kernfs_rwsem); return NULL; } inode = kernfs_get_inode(dir->i_sb, kn); if (!inode) inode = ERR_PTR(-ENOMEM); } /* * Needed for negative dentry validation. * The negative dentry can be created in kernfs_iop_lookup() * or transforms from positive dentry in dentry_unlink_inode() * called from vfs_rmdir(). */ if (!IS_ERR(inode)) kernfs_set_rev(parent, dentry); up_read(&kernfs_rwsem); /* instantiate and hash (possibly negative) dentry */ return d_splice_alias(inode, dentry); } static int kernfs_iop_mkdir(struct user_namespace *mnt_userns, struct inode *dir, struct dentry *dentry, umode_t mode) { struct kernfs_node *parent = dir->i_private; struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; int ret; if (!scops || !scops->mkdir) return -EPERM; if (!kernfs_get_active(parent)) return -ENODEV; ret = scops->mkdir(parent, dentry->d_name.name, mode); kernfs_put_active(parent); return ret; } static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) { struct kernfs_node *kn = kernfs_dentry_node(dentry); struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; if (!scops || !scops->rmdir) return -EPERM; if (!kernfs_get_active(kn)) return -ENODEV; ret = scops->rmdir(kn); kernfs_put_active(kn); return ret; } static int kernfs_iop_rename(struct user_namespace *mnt_userns, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { struct kernfs_node *kn = kernfs_dentry_node(old_dentry); struct kernfs_node *new_parent = new_dir->i_private; struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; int ret; if (flags) return -EINVAL; if (!scops || !scops->rename) return -EPERM; if (!kernfs_get_active(kn)) return -ENODEV; if (!kernfs_get_active(new_parent)) { kernfs_put_active(kn); return -ENODEV; } ret = scops->rename(kn, new_parent, new_dentry->d_name.name); kernfs_put_active(new_parent); kernfs_put_active(kn); return ret; } const struct inode_operations kernfs_dir_iops = { .lookup = kernfs_iop_lookup, .permission = kernfs_iop_permission, .setattr = kernfs_iop_setattr, .getattr = kernfs_iop_getattr, .listxattr = kernfs_iop_listxattr, .mkdir = kernfs_iop_mkdir, .rmdir = kernfs_iop_rmdir, .rename = kernfs_iop_rename, }; static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos) { struct kernfs_node *last; while (true) { struct rb_node *rbn; last = pos; if (kernfs_type(pos) != KERNFS_DIR) break; rbn = rb_first(&pos->dir.children); if (!rbn) break; pos = rb_to_kn(rbn); } return last; } /** * kernfs_next_descendant_post - find the next descendant for post-order walk * @pos: the current position (%NULL to initiate traversal) * @root: kernfs_node whose descendants to walk * * Find the next descendant to visit for post-order traversal of @root's * descendants. @root is included in the iteration and the last node to be * visited. */ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, struct kernfs_node *root) { struct rb_node *rbn; lockdep_assert_held_write(&kernfs_rwsem); /* if first iteration, visit leftmost descendant which may be root */ if (!pos) return kernfs_leftmost_descendant(root); /* if we visited @root, we're done */ if (pos == root) return NULL; /* if there's an unvisited sibling, visit its leftmost descendant */ rbn = rb_next(&pos->rb); if (rbn) return kernfs_leftmost_descendant(rb_to_kn(rbn)); /* no sibling left, visit parent */ return pos->parent; } /** * kernfs_activate - activate a node which started deactivated * @kn: kernfs_node whose subtree is to be activated * * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node * needs to be explicitly activated. A node which hasn't been activated * isn't visible to userland and deactivation is skipped during its * removal. This is useful to construct atomic init sequences where * creation of multiple nodes should either succeed or fail atomically. * * The caller is responsible for ensuring that this function is not called * after kernfs_remove*() is invoked on @kn. */ void kernfs_activate(struct kernfs_node *kn) { struct kernfs_node *pos; down_write(&kernfs_rwsem); pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) { if (pos->flags & KERNFS_ACTIVATED) continue; WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); pos->flags |= KERNFS_ACTIVATED; } up_write(&kernfs_rwsem); } static void __kernfs_remove(struct kernfs_node *kn) { struct kernfs_node *pos; lockdep_assert_held_write(&kernfs_rwsem); /* * Short-circuit if non-root @kn has already finished removal. * This is for kernfs_remove_self() which plays with active ref * after removal. */ if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) return; pr_debug("kernfs %s: removing\n", kn->name); /* prevent any new usage under @kn by deactivating all nodes */ pos = NULL; while ((pos = kernfs_next_descendant_post(pos, kn))) if (kernfs_active(pos)) atomic_add(KN_DEACTIVATED_BIAS, &pos->active); /* deactivate and unlink the subtree node-by-node */ do { pos = kernfs_leftmost_descendant(kn); /* * kernfs_drain() drops kernfs_rwsem temporarily and @pos's * base ref could have been put by someone else by the time * the function returns. Make sure it doesn't go away * underneath us. */ kernfs_get(pos); /* * Drain iff @kn was activated. This avoids draining and * its lockdep annotations for nodes which have never been * activated and allows embedding kernfs_remove() in create * error paths without worrying about draining. */ if (kn->flags & KERNFS_ACTIVATED) kernfs_drain(pos); else WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); /* * kernfs_unlink_sibling() succeeds once per node. Use it * to decide who's responsible for cleanups. */ if (!pos->parent || kernfs_unlink_sibling(pos)) { struct kernfs_iattrs *ps_iattr = pos->parent ? pos->parent->iattr : NULL; /* update timestamps on the parent */ if (ps_iattr) { ktime_get_real_ts64(&ps_iattr->ia_ctime); ps_iattr->ia_mtime = ps_iattr->ia_ctime; } kernfs_put(pos); } kernfs_put(pos); } while (pos != kn); } /** * kernfs_remove - remove a kernfs_node recursively * @kn: the kernfs_node to remove * * Remove @kn along with all its subdirectories and files. */ void kernfs_remove(struct kernfs_node *kn) { down_write(&kernfs_rwsem); __kernfs_remove(kn); up_write(&kernfs_rwsem); } /** * kernfs_break_active_protection - break out of active protection * @kn: the self kernfs_node * * The caller must be running off of a kernfs operation which is invoked * with an active reference - e.g. one of kernfs_ops. Each invocation of * this function must also be matched with an invocation of * kernfs_unbreak_active_protection(). * * This function releases the active reference of @kn the caller is * holding. Once this function is called, @kn may be removed at any point * and the caller is solely responsible for ensuring that the objects it * dereferences are accessible. */ void kernfs_break_active_protection(struct kernfs_node *kn) { /* * Take out ourself out of the active ref dependency chain. If * we're called without an active ref, lockdep will complain. */ kernfs_put_active(kn); } /** * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() * @kn: the self kernfs_node * * If kernfs_break_active_protection() was called, this function must be * invoked before finishing the kernfs operation. Note that while this * function restores the active reference, it doesn't and can't actually * restore the active protection - @kn may already or be in the process of * being removed. Once kernfs_break_active_protection() is invoked, that * protection is irreversibly gone for the kernfs operation instance. * * While this function may be called at any point after * kernfs_break_active_protection() is invoked, its most useful location * would be right before the enclosing kernfs operation returns. */ void kernfs_unbreak_active_protection(struct kernfs_node *kn) { /* * @kn->active could be in any state; however, the increment we do * here will be undone as soon as the enclosing kernfs operation * finishes and this temporary bump can't break anything. If @kn * is alive, nothing changes. If @kn is being deactivated, the * soon-to-follow put will either finish deactivation or restore * deactivated state. If @kn is already removed, the temporary * bump is guaranteed to be gone before @kn is released. */ atomic_inc(&kn->active); if (kernfs_lockdep(kn)) rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); } /** * kernfs_remove_self - remove a kernfs_node from its own method * @kn: the self kernfs_node to remove * * The caller must be running off of a kernfs operation which is invoked * with an active reference - e.g. one of kernfs_ops. This can be used to * implement a file operation which deletes itself. * * For example, the "delete" file for a sysfs device directory can be * implemented by invoking kernfs_remove_self() on the "delete" file * itself. This function breaks the circular dependency of trying to * deactivate self while holding an active ref itself. It isn't necessary * to modify the usual removal path to use kernfs_remove_self(). The * "delete" implementation can simply invoke kernfs_remove_self() on self * before proceeding with the usual removal path. kernfs will ignore later * kernfs_remove() on self. * * kernfs_remove_self() can be called multiple times concurrently on the * same kernfs_node. Only the first one actually performs removal and * returns %true. All others will wait until the kernfs operation which * won self-removal finishes and return %false. Note that the losers wait * for the completion of not only the winning kernfs_remove_self() but also * the whole kernfs_ops which won the arbitration. This can be used to * guarantee, for example, all concurrent writes to a "delete" file to * finish only after the whole operation is complete. */ bool kernfs_remove_self(struct kernfs_node *kn) { bool ret; down_write(&kernfs_rwsem); kernfs_break_active_protection(kn); /* * SUICIDAL is used to arbitrate among competing invocations. Only * the first one will actually perform removal. When the removal * is complete, SUICIDED is set and the active ref is restored * while kernfs_rwsem for held exclusive. The ones which lost * arbitration waits for SUICIDED && drained which can happen only * after the enclosing kernfs operation which executed the winning * instance of kernfs_remove_self() finished. */ if (!(kn->flags & KERNFS_SUICIDAL)) { kn->flags |= KERNFS_SUICIDAL; __kernfs_remove(kn); kn->flags |= KERNFS_SUICIDED; ret = true; } else { wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; DEFINE_WAIT(wait); while (true) { prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); if ((kn->flags & KERNFS_SUICIDED) && atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) break; up_write(&kernfs_rwsem); schedule(); down_write(&kernfs_rwsem); } finish_wait(waitq, &wait); WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); ret = false; } /* * This must be done while kernfs_rwsem held exclusive; otherwise, * waiting for SUICIDED && deactivated could finish prematurely. */ kernfs_unbreak_active_protection(kn); up_write(&kernfs_rwsem); return ret; } /** * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it * @parent: parent of the target * @name: name of the kernfs_node to remove * @ns: namespace tag of the kernfs_node to remove * * Look for the kernfs_node with @name and @ns under @parent and remove it. * Returns 0 on success, -ENOENT if such entry doesn't exist. */ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, const void *ns) { struct kernfs_node *kn; if (!parent) { WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n", name); return -ENOENT; } down_write(&kernfs_rwsem); kn = kernfs_find_ns(parent, name, ns); if (kn) { kernfs_get(kn); __kernfs_remove(kn); kernfs_put(kn); } up_write(&kernfs_rwsem); if (kn) return 0; else return -ENOENT; } /** * kernfs_rename_ns - move and rename a kernfs_node * @kn: target node * @new_parent: new parent to put @sd under * @new_name: new name * @new_ns: new namespace tag */ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, const char *new_name, const void *new_ns) { struct kernfs_node *old_parent; const char *old_name = NULL; int error; /* can't move or rename root */ if (!kn->parent) return -EINVAL; down_write(&kernfs_rwsem); error = -ENOENT; if (!kernfs_active(kn) || !kernfs_active(new_parent) || (new_parent->flags & KERNFS_EMPTY_DIR)) goto out; error = 0; if ((kn->parent == new_parent) && (kn->ns == new_ns) && (strcmp(kn->name, new_name) == 0)) goto out; /* nothing to rename */ error = -EEXIST; if (kernfs_find_ns(new_parent, new_name, new_ns)) goto out; /* rename kernfs_node */ if (strcmp(kn->name, new_name) != 0) { error = -ENOMEM; new_name = kstrdup_const(new_name, GFP_KERNEL); if (!new_name) goto out; } else { new_name = NULL; } /* * Move to the appropriate place in the appropriate directories rbtree. */ kernfs_unlink_sibling(kn); kernfs_get(new_parent); /* rename_lock protects ->parent and ->name accessors */ spin_lock_irq(&kernfs_rename_lock); old_parent = kn->parent; kn->parent = new_parent; kn->ns = new_ns; if (new_name) { old_name = kn->name; kn->name = new_name; } spin_unlock_irq(&kernfs_rename_lock); kn->hash = kernfs_name_hash(kn->name, kn->ns); kernfs_link_sibling(kn); kernfs_put(old_parent); kfree_const(old_name); error = 0; out: up_write(&kernfs_rwsem); return error; } /* Relationship between mode and the DT_xxx types */ static inline unsigned char dt_type(struct kernfs_node *kn) { return (kn->mode >> 12) & 15; } static int kernfs_dir_fop_release(struct inode *inode, struct file *filp) { kernfs_put(filp->private_data); return 0; } static struct kernfs_node *kernfs_dir_pos(const void *ns, struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) { if (pos) { int valid = kernfs_active(pos) && pos->parent == parent && hash == pos->hash; kernfs_put(pos); if (!valid) pos = NULL; } if (!pos && (hash > 1) && (hash < INT_MAX)) { struct rb_node *node = parent->dir.children.rb_node; while (node) { pos = rb_to_kn(node); if (hash < pos->hash) node = node->rb_left; else if (hash > pos->hash) node = node->rb_right; else break; } } /* Skip over entries which are dying/dead or in the wrong namespace */ while (pos && (!kernfs_active(pos) || pos->ns != ns)) { struct rb_node *node = rb_next(&pos->rb); if (!node) pos = NULL; else pos = rb_to_kn(node); } return pos; } static struct kernfs_node *kernfs_dir_next_pos(const void *ns, struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos) { pos = kernfs_dir_pos(ns, parent, ino, pos); if (pos) { do { struct rb_node *node = rb_next(&pos->rb); if (!node) pos = NULL; else pos = rb_to_kn(node); } while (pos && (!kernfs_active(pos) || pos->ns != ns)); } return pos; } static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; struct kernfs_node *parent = kernfs_dentry_node(dentry); struct kernfs_node *pos = file->private_data; const void *ns = NULL; if (!dir_emit_dots(file, ctx)) return 0; down_read(&kernfs_rwsem); if (kernfs_ns_enabled(parent)) ns = kernfs_info(dentry->d_sb)->ns; for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos); pos; pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) { const char *name = pos->name; unsigned int type = dt_type(pos); int len = strlen(name); ino_t ino = kernfs_ino(pos); ctx->pos = pos->hash; file->private_data = pos; kernfs_get(pos); up_read(&kernfs_rwsem); if (!dir_emit(ctx, name, len, ino, type)) return 0; down_read(&kernfs_rwsem); } up_read(&kernfs_rwsem); file->private_data = NULL; ctx->pos = INT_MAX; return 0; } const struct file_operations kernfs_dir_fops = { .read = generic_read_dir, .iterate_shared = kernfs_fop_readdir, .release = kernfs_dir_fop_release, .llseek = generic_file_llseek, }; |
9 3 3 3 1 2 557 11 556 37 36 59 78 305 14 30 11 305 11 16 398 398 397 386 1 174 54 80 80 45 160 451 353 548 561 332 429 268 267 34 503 562 428 429 211 25 211 342 436 2 4 7 504 525 525 95 400 2 17 401 17 2 378 407 73 10 222 2 442 84 401 435 54 55 27 17 7 28 28 559 385 6 566 94 13 407 18 77 3 26 14 9 1 7 24 10 149 41 4 42 53 126 111 4 15 25 10 6 24 9 30 10 5 63 392 22 6 240 104 9 49 158 24 4 213 198 170 27 124 37 119 190 190 190 158 158 188 158 188 188 30 479 479 45 45 158 45 158 479 479 479 315 14 21 54 42 10 52 17 29 37 55 94 1 10 1 51 15 28 25 68 53 7 154 56 1 138 137 2 4 3 3 3 2 2 1 4 139 113 27 238 31 32 14 20 13 3 14 4 26 47 31 47 9 28 39 5 6 39 34 5 3 45 9 21 1 10 1 16 3 17 5 10 |